commit_id
string
repo
string
commit_message
string
diff
string
label
int64
bc9d890f48c4515162e6fbdd4af485b8649b0035
389ds/389-ds-base
Bug 750625 - Fix Coverity (11095) Explicit null dereferenced https://bugzilla.redhat.com/show_bug.cgi?id=750625 lib/libaccess/lasdns.cpp (LASDnsEval) Bug Description: LASDnsBuild could fail and context->Table could be left as NULL. Error checking for LASDnsBuild should be added. Fix Description: If LASDnsBuild returns LAS_EVAL_INVALID, return LAS_EVAL_FAIL there.
commit bc9d890f48c4515162e6fbdd4af485b8649b0035 Author: Noriko Hosoi <[email protected]> Date: Wed Nov 2 09:37:33 2011 -0700 Bug 750625 - Fix Coverity (11095) Explicit null dereferenced https://bugzilla.redhat.com/show_bug.cgi?id=750625 lib/libaccess/lasdns.cpp (LASDnsEval) Bug Description: LASDnsBuild could fail and context->Table could be left as NULL. Error checking for LASDnsBuild should be added. Fix Description: If LASDnsBuild returns LAS_EVAL_INVALID, return LAS_EVAL_FAIL there. diff --git a/lib/libaccess/lasdns.cpp b/lib/libaccess/lasdns.cpp index 6956878a7..e4032f1be 100644 --- a/lib/libaccess/lasdns.cpp +++ b/lib/libaccess/lasdns.cpp @@ -389,7 +389,13 @@ int LASDnsEval(NSErr_t *errp, char *attr_name, CmpOp_t comparator, return LAS_EVAL_FAIL; } context->Table = NULL; - LASDnsBuild(errp, attr_pattern, context, aliasflg); + if (LASDnsBuild(errp, attr_pattern, context, aliasflg) == + LAS_EVAL_INVALID) { + /* Error is already printed in LASDnsBuild */ + ACL_CritExit(); + return LAS_EVAL_FAIL; + } + /* After this line, it is assured context->Table is not NULL. */ } else { context = (LASDnsContext *) *LAS_cookie; }
0
d5e1164a67783e3d9a008420386f98cdfa3a8a1f
389ds/389-ds-base
Ticket 48377 - Bundle jemalloc Descriptrion: gperftools is going away in RHEL, that includes tcmalloc, so we now need to bundle jemalloc again. https://pagure.io/389-ds-base/issue/48377 Reviewed by: vashirov(Thanks!)
commit d5e1164a67783e3d9a008420386f98cdfa3a8a1f Author: Mark Reynolds <[email protected]> Date: Mon Jun 4 09:13:53 2018 -0400 Ticket 48377 - Bundle jemalloc Descriptrion: gperftools is going away in RHEL, that includes tcmalloc, so we now need to bundle jemalloc again. https://pagure.io/389-ds-base/issue/48377 Reviewed by: vashirov(Thanks!) diff --git a/ldap/admin/src/base-initconfig.in b/ldap/admin/src/base-initconfig.in index 1403adad9..f9e9bdc26 100644 --- a/ldap/admin/src/base-initconfig.in +++ b/ldap/admin/src/base-initconfig.in @@ -40,3 +40,9 @@ # is a problem and fail to start. # If using systemd, omit the "; export PID_TIME" at the end. #PID_TIME=600 ; export PID_TIME + +# jemalloc is a general purpose malloc implementation that emphasizes +# fragmentation avoidance and scalable concurrency support. jemalloc +# has been shown to have a significant positive impact on the Directory +# Server's process size/growth. +LD_PRELOAD=@libdir@/@package_name@/lib/libjemalloc.so diff --git a/rpm.mk b/rpm.mk index b72fa5fab..a62a15d7d 100644 --- a/rpm.mk +++ b/rpm.mk @@ -9,6 +9,9 @@ RPM_NAME_VERSION = $(PACKAGE)-$(RPM_VERSION)$(RPM_VERSION_PREREL) NAME_VERSION = $(PACKAGE)-$(RPM_VERSION)$(VERSION_PREREL) TARBALL = $(NAME_VERSION).tar.bz2 NUNC_STANS_ON = 1 +JEMALLOC_URL ?= $(shell rpmspec -P $(RPMBUILD)/SPECS/389-ds-base.spec | awk '/^Source3:/ {print $$2}') +JEMALLOC_TARBALL ?= $(shell basename "$(JEMALLOC_URL)") +BUNDLE_JEMALLOC = 1 # Some sanitizers are supported only by clang CLANG_ON = 0 @@ -37,6 +40,9 @@ tarballs: local-archive cd dist; tar cfj sources/$(TARBALL) $(NAME_VERSION) rm -rf dist/$(NAME_VERSION) cd dist/sources ; \ + if [ $(BUNDLE_JEMALLOC) -eq 1 ]; then \ + wget $(JEMALLOC_URL) ; \ + fi rpmroot: rm -rf $(RPMBUILD) @@ -55,6 +61,7 @@ rpmroot: -e s/__UBSAN_ON__/$(UBSAN_ON)/ \ -e s/__PERL_ON__/$(PERL_ON)/ \ -e s/__CLANG_ON__/$(CLANG_ON)/ \ + -e s/__BUNDLE_JEMALLOC__/$(BUNDLE_JEMALLOC)/ \ rpm/$(PACKAGE).spec.in > $(RPMBUILD)/SPECS/$(PACKAGE).spec rpmdistdir: @@ -66,6 +73,9 @@ srpmdistdir: rpmbuildprep: cp dist/sources/$(TARBALL) $(RPMBUILD)/SOURCES/ cp rpm/$(PACKAGE)-* $(RPMBUILD)/SOURCES/ + if [ $(BUNDLE_JEMALLOC) -eq 1 ]; then \ + cp dist/sources/$(JEMALLOC_TARBALL) $(RPMBUILD)/SOURCES/ ; \ + fi srpms: rpmroot srpmdistdir tarballs rpmbuildprep rpmbuild --define "_topdir $(RPMBUILD)" -bs $(RPMBUILD)/SPECS/$(PACKAGE).spec diff --git a/rpm/389-ds-base.spec.in b/rpm/389-ds-base.spec.in index a910ad91c..b773136c8 100644 --- a/rpm/389-ds-base.spec.in +++ b/rpm/389-ds-base.spec.in @@ -2,6 +2,12 @@ %global pkgname dirsrv %global srcname 389-ds-base +%global bundle_jemalloc __BUNDLE_JEMALLOC__ +%if %{bundle_jemalloc} +%global jemalloc_name jemalloc +%global jemalloc_ver 5.0.1 +%endif + # This is used in certain builds to help us know if it has extra features. %global variant base # for a pre-release, define the prerel field e.g. .a1 .rc2 - comment out for official release @@ -29,14 +35,7 @@ %define nss_version 3.11 %if %{use_asan} || %{use_msan} || %{use_tsan} || %{use_ubsan} -%global use_tcmalloc 0 %global variant base-xsan -%else -%if %{_arch} != "s390x" && %{_arch} != "s390" && !%{use_rust} -%global use_tcmalloc 1 -%else -%global use_tcmalloc 0 -%endif %endif # Use Clang instead of GCC @@ -81,6 +80,7 @@ BuildRequires: libatomic BuildRequires: clang BuildRequires: compiler-rt %else +BuildRequires: gcc BuildRequires: gcc-c++ %if %{use_asan} BuildRequires: libasan @@ -123,10 +123,6 @@ BuildRequires: python%{python3_pkgversion} BuildRequires: python%{python3_pkgversion}-devel BuildRequires: python%{python3_pkgversion}-setuptools -%if %{use_tcmalloc} -BuildRequires: gperftools-devel -%endif - # For cockpit BuildRequires: rsync # END BUILD REQUIRES @@ -137,10 +133,7 @@ Requires: %{name}-libs = %{version}-%{release} %if 0%{?rhel} > 7 || 0%{?fedora} Requires: python%{python3_pkgversion}-lib389 = %{version}-%{release} %endif -# Attach to -base our script deps -%if %{use_tcmalloc} -Requires: gperftools-libs -%endif + # this is needed for using semanage from our setup scripts Requires: policycoreutils-python # This is needed for our future move to python selinux interaction. @@ -184,6 +177,9 @@ Source0: http://www.port389.org/sources/%{name}-%{version}%{?prerel}.ta # 389-ds-git.sh should be used to generate the source tarball from git Source1: %{name}-git.sh Source2: %{name}-devel.README +%if %{bundle_jemalloc} +Source3: https://github.com/jemalloc/%{jemalloc_name}/releases/download/%{jemalloc_ver}/%{jemalloc_name}-%{jemalloc_ver}.tar.bz2 +%endif %description 389 Directory Server is an LDAPv3 compliant server. The base package includes @@ -309,6 +305,10 @@ A cockpit UI Plugin for configuring and administering the 389 Directory Server %prep %setup -q -n %{name}-%{version}%{?prerel} +%if %{bundle_jemalloc} +%setup -q -n %{name}-%{version}%{?prerel} -T -D -b 3 +%endif + cp %{SOURCE2} README.devel %build @@ -339,10 +339,6 @@ TSAN_FLAGS="--enable-tsan --enable-debug" UBSAN_FLAGS="--enable-ubsan --enable-debug" %endif -%if %{use_tcmalloc} -TCMALLOC_FLAGS="--enable-tcmalloc" -%endif - %if %{use_rust} RUST_FLAGS="--enable-rust" %endif @@ -351,6 +347,16 @@ RUST_FLAGS="--enable-rust" PERL_FLAGS="--disable-perl" %endif +%if %{bundle_jemalloc} +# Build jemalloc +pushd ../%{jemalloc_name}-%{jemalloc_ver} +%configure \ + --libdir=%{_libdir}/%{pkgname}/lib \ + --bindir=%{_libdir}/%{pkgname}/bin +make +popd +%endif + # Rebuild the autotool artifacts now. autoreconf -fiv @@ -360,7 +366,7 @@ autoreconf -fiv --with-systemdsystemconfdir=%{_sysconfdir}/systemd/system \ --with-systemdgroupname=%{groupname} \ --libexecdir=%{_libexecdir}/%{pkgname} \ - $NSSARGS $TCMALLOC_FLAGS $ASAN_FLAGS $MSAN_FLAGS $TSAN_FLAGS $UBSAN_FLAGS $RUST_FLAGS $PERL_FLAGS $CLANG_FLAGS \ + $NSSARGS $ASAN_FLAGS $MSAN_FLAGS $TSAN_FLAGS $UBSAN_FLAGS $RUST_FLAGS $PERL_FLAGS $CLANG_FLAGS \ --enable-cmocka %if 0%{?rhel} > 7 || 0%{?fedora} @@ -425,6 +431,14 @@ rm -f $RPM_BUILD_ROOT%{_libdir}/libsvrcore.la sed -i -e 's|#{{PERL-EXEC}}|#!/usr/bin/perl|' $RPM_BUILD_ROOT%{_datadir}/%{pkgname}/script-templates/template-*.pl %endif +%if %{bundle_jemalloc} +pushd ../%{jemalloc_name}-%{jemalloc_ver} +make DESTDIR="$RPM_BUILD_ROOT" install_lib install_bin +cp -pa COPYING ../%{name}-%{version}%{?prerel}/COPYING.jemalloc +cp -pa README ../%{name}-%{version}%{?prerel}/README.jemalloc +popd +%endif + %check # This checks the code, if it fails it prints why, then re-raises the fail to shortcircuit the rpm build. %if %{use_tsan} @@ -543,7 +557,11 @@ fi %files %defattr(-,root,root,-) +%if %{bundle_jemalloc} +%doc LICENSE LICENSE.GPLv3+ LICENSE.openssl README.jemalloc COPYING.jemalloc +%else %doc LICENSE LICENSE.GPLv3+ LICENSE.openssl +%endif %dir %{_sysconfdir}/%{pkgname} %dir %{_sysconfdir}/%{pkgname}/schema %config(noreplace)%{_sysconfdir}/%{pkgname}/schema/*.ldif @@ -625,6 +643,10 @@ fi %exclude %{_sbindir}/ldap-agent* %exclude %{_mandir}/man1/ldap-agent.1.gz %exclude %{_unitdir}/%{pkgname}-snmp.service +%if %{bundle_jemalloc} +%{_libdir}/%{pkgname}/lib/ +%{_libdir}/%{pkgname}/bin/ +%endif %files devel %defattr(-,root,root,-) @@ -638,6 +660,9 @@ fi %{_libdir}/%{pkgname}/libnunc-stans.so %{_libdir}/%{pkgname}/libsds.so %{_libdir}/%{pkgname}/libldaputil.so +%if %{bundle_jemalloc} +%{_libdir}/%{pkgname}/lib/libjemalloc.so +%endif %{_libdir}/pkgconfig/svrcore.pc %{_libdir}/pkgconfig/dirsrv.pc %{_libdir}/pkgconfig/libsds.pc @@ -653,6 +678,9 @@ fi %{_libdir}/%{pkgname}/libnunc-stans.so.* %{_libdir}/%{pkgname}/libsds.so.* %{_libdir}/%{pkgname}/libldaputil.so.* +%if %{bundle_jemalloc} +%{_libdir}/%{pkgname}/lib/libjemalloc.so.* +%endif %if %{use_rust} %{_libdir}/%{pkgname}/librsds.so %endif
0
1dbb69ba9d7c65de580e7c92105b36284833dc0a
389ds/389-ds-base
Issue 4209 - RFE - add bootstrap credentials to repl agreement (upgrade update) Description: Add an upgrade function to add the new bootstrap password attribute to the AES reversible password plugin. relates: https://github.com/389ds/389-ds-base/issues/4209 Reviewed by: tbordaz & firstyear (Thanks!!)
commit 1dbb69ba9d7c65de580e7c92105b36284833dc0a Author: Mark Reynolds <[email protected]> Date: Thu Aug 27 14:39:38 2020 -0400 Issue 4209 - RFE - add bootstrap credentials to repl agreement (upgrade update) Description: Add an upgrade function to add the new bootstrap password attribute to the AES reversible password plugin. relates: https://github.com/389ds/389-ds-base/issues/4209 Reviewed by: tbordaz & firstyear (Thanks!!) diff --git a/ldap/servers/slapd/upgrade.c b/ldap/servers/slapd/upgrade.c index 6ce5a2e2b..7b530e6cb 100644 --- a/ldap/servers/slapd/upgrade.c +++ b/ldap/servers/slapd/upgrade.c @@ -42,6 +42,60 @@ upgrade_entry_exists_or_create(char *upgrade_id, char *filter, char *dn, char *e return uresult; } +/* + * Add the new replication bootstrap bind DN password attribute to the AES + * reversible password plugin + */ +static int32_t +upgrade_AES_reverpwd_plugin(void) +{ + Slapi_PBlock *search_pb = slapi_pblock_new(); + Slapi_Entry *plugin_entry = NULL; + Slapi_DN *sdn = NULL; + const char *plugin_dn = "cn=AES,cn=Password Storage Schemes,cn=plugins,cn=config"; + char *plugin_attr = "nsslapd-pluginarg2"; + char *repl_bootstrap_val = "nsds5replicabootstrapcredentials"; + upgrade_status uresult = UPGRADE_SUCCESS; + + sdn = slapi_sdn_new_dn_byref(plugin_dn); + slapi_search_get_entry(&search_pb, sdn, NULL, &plugin_entry, NULL); + if (plugin_entry) { + if (slapi_entry_attr_get_ref(plugin_entry, plugin_attr) == NULL) { + /* The attribute is not set, add it */ + Slapi_PBlock *mod_pb = slapi_pblock_new(); + LDAPMod mod_add; + LDAPMod *mods[2]; + char *add_val[2]; + int32_t result; + + add_val[0] = repl_bootstrap_val; + add_val[1] = 0; + mod_add.mod_op = LDAP_MOD_ADD; + mod_add.mod_type = plugin_attr; + mod_add.mod_values = add_val; + mods[0] = &mod_add; + mods[1] = 0; + + slapi_modify_internal_set_pb(mod_pb, plugin_dn, + mods, 0, 0, (void *)plugin_get_default_component_id(), 0); + slapi_modify_internal_pb(mod_pb); + slapi_pblock_get(mod_pb, SLAPI_PLUGIN_INTOP_RESULT, &result); + if (result != LDAP_SUCCESS) { + slapi_log_err(SLAPI_LOG_ERR, "upgrade_AES_reverpwd_plugin", + "Failed to upgrade (%s) with new replication " + "bootstrap password attribute (%s), error %d\n", + plugin_dn, plugin_attr, result); + uresult = UPGRADE_FAILURE; + } + slapi_pblock_destroy(mod_pb); + } + } + slapi_search_get_entry_done(&search_pb); + slapi_sdn_free(&sdn); + + return uresult; +} + #ifdef RUST_ENABLE static upgrade_status upgrade_143_entryuuid_exists(void) { @@ -75,6 +129,10 @@ upgrade_server(void) { } #endif + if (upgrade_AES_reverpwd_plugin() != UPGRADE_SUCCESS) { + return UPGRADE_FAILURE; + } + return UPGRADE_SUCCESS; }
0
25687041f157da911bee039dbf01f76bdfcc518d
389ds/389-ds-base
Bug 711679 - unresponsive LDAP service when deleting vlv on replica https://bugzilla.redhat.com/show_bug.cgi?id=711679 Bug Description: When a vlv index is created, a newly generated vlv index object is linked in the vlv search list. If the server is under stress, there could be some contention among threads and each thread could add an identical vlv index object to the list, which leads to open the vlv index db more than once - count of the objects. Opening a db file locks it with a READ lock. Deleting the vlv index first closes the db which is supposed to release the READ lock. But since the db is opened more than once, still some READ lock remains. And the deleter's attempt to lock the db with WRITE lock is kept waiting for the READ lock released, that never happens. Fix Description: Check the vlv index object before adding it to the vlv search list. If the identical object is found in the list, adding is skipped.
commit 25687041f157da911bee039dbf01f76bdfcc518d Author: Noriko Hosoi <[email protected]> Date: Fri Jun 17 11:16:03 2011 -0700 Bug 711679 - unresponsive LDAP service when deleting vlv on replica https://bugzilla.redhat.com/show_bug.cgi?id=711679 Bug Description: When a vlv index is created, a newly generated vlv index object is linked in the vlv search list. If the server is under stress, there could be some contention among threads and each thread could add an identical vlv index object to the list, which leads to open the vlv index db more than once - count of the objects. Opening a db file locks it with a READ lock. Deleting the vlv index first closes the db which is supposed to release the READ lock. But since the db is opened more than once, still some READ lock remains. And the deleter's attempt to lock the db with WRITE lock is kept waiting for the READ lock released, that never happens. Fix Description: Check the vlv index object before adding it to the vlv search list. If the identical object is found in the list, adding is skipped. diff --git a/ldap/servers/slapd/back-ldbm/vlv.c b/ldap/servers/slapd/back-ldbm/vlv.c index ea08fd605..063b3a1a2 100644 --- a/ldap/servers/slapd/back-ldbm/vlv.c +++ b/ldap/servers/slapd/back-ldbm/vlv.c @@ -92,25 +92,32 @@ int vlv_AddSearchEntry(Slapi_PBlock *pb, Slapi_Entry* entryBefore, Slapi_Entry* int vlv_AddIndexEntry(Slapi_PBlock *pb, Slapi_Entry* entryBefore, Slapi_Entry* entryAfter, int *returncode, char *returntext, void *arg) { - struct vlvSearch *parent; - backend *be= ((ldbm_instance*)arg)->inst_be; - Slapi_DN parentdn; - - slapi_sdn_init(&parentdn); - slapi_sdn_get_parent(slapi_entry_get_sdn(entryBefore),&parentdn); + struct vlvSearch *parent; + backend *be= ((ldbm_instance*)arg)->inst_be; + Slapi_DN parentdn; + + slapi_sdn_init(&parentdn); + slapi_sdn_get_parent(slapi_entry_get_sdn(entryBefore),&parentdn); + + /* vlvIndex list is modified; need Wlock */ + PR_RWLock_Wlock(be->vlvSearchList_lock); + parent= vlvSearch_finddn((struct vlvSearch *)be->vlvSearchList, &parentdn); + if(parent!=NULL) { - /* vlvIndex list is modified; need Wlock */ - PR_RWLock_Wlock(be->vlvSearchList_lock); - parent= vlvSearch_finddn((struct vlvSearch *)be->vlvSearchList, &parentdn); - if(parent!=NULL) - { + char *name = slapi_entry_attr_get_charptr(entryBefore, type_vlvName); + if (vlvSearch_findname(parent, name)) { + /* The vlvindex is already in the vlvSearchList. Skip adding it. */ + LDAPDebug1Arg(LDAP_DEBUG_BACKLDBM, + "vlv_AddIndexEntry: %s is already in vlvSearchList\n", + slapi_entry_get_dn_const(entryBefore)); + } else { struct vlvIndex* newVlvIndex= vlvIndex_new(); newVlvIndex->vlv_be=be; vlvIndex_init(newVlvIndex, be, parent, entryBefore); vlvSearch_addIndex(parent, newVlvIndex); } - PR_RWLock_Unlock(be->vlvSearchList_lock); } + PR_RWLock_Unlock(be->vlvSearchList_lock); slapi_sdn_done(&parentdn); return SLAPI_DSE_CALLBACK_OK; } diff --git a/ldap/servers/slapd/back-ldbm/vlv_srch.c b/ldap/servers/slapd/back-ldbm/vlv_srch.c index a9dada60b..9195b286b 100644 --- a/ldap/servers/slapd/back-ldbm/vlv_srch.c +++ b/ldap/servers/slapd/back-ldbm/vlv_srch.c @@ -373,6 +373,9 @@ struct vlvIndex* vlvSearch_findindexname(const struct vlvSearch* plist, const char *name) { const struct vlvSearch* t= plist; + if (NULL == name) { + return NULL; + } for(; t!=NULL ; t= t->vlv_next) { struct vlvIndex *pi= t->vlv_index; @@ -781,7 +784,7 @@ void vlvIndex_go_offline(struct vlvIndex *p, backend *be) return; p->vlv_online = 0; p->vlv_enabled = 0; - p->vlv_indexlength = 0; + p->vlv_indexlength = 0; p->vlv_attrinfo->ai_indexmask |= INDEX_OFFLINE; dblayer_erase_index_file_nolock(be, p->vlv_attrinfo, 1 /* chkpt if not busy */); }
0
83668192bddcb6daa272eb418a96c10931d3386b
389ds/389-ds-base
Issue 6347 - better fix for desyncronized vlv cache (#6358) A better fix than PR 6349 about corrupted vlv cache Problem is a race condition because txn was released while building the cache. Solution keep the write txn open until the cache is fully rebuilt. Also fixed some debug logs And also added the source of a tool useful to check the vlv cache consistency Note: this remove PR #6349 and integrate PR #6356 Issue: #6347 Reviewed by @tbodaz (Thanks!)
commit 83668192bddcb6daa272eb418a96c10931d3386b Author: progier389 <[email protected]> Date: Thu Oct 10 11:29:00 2024 +0200 Issue 6347 - better fix for desyncronized vlv cache (#6358) A better fix than PR 6349 about corrupted vlv cache Problem is a race condition because txn was released while building the cache. Solution keep the write txn open until the cache is fully rebuilt. Also fixed some debug logs And also added the source of a tool useful to check the vlv cache consistency Note: this remove PR #6349 and integrate PR #6356 Issue: #6347 Reviewed by @tbodaz (Thanks!) diff --git a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_debug.c b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_debug.c index 3ccff4c2c..33e71f1a8 100644 --- a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_debug.c +++ b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_debug.c @@ -149,7 +149,7 @@ static inline void log_stack(int loglvl) } } -void dbi_str(MDB_cursor *cursor, int dbi, char *dbistr) +void dbi_str(MDB_cursor *cursor, int dbi, char dbistr[DBISTRMAXSIZE]) { const char *str = "?"; dbmdb_dbi_t * dbi1; @@ -161,7 +161,7 @@ void dbi_str(MDB_cursor *cursor, int dbi, char *dbistr) if (dbi1 && dbi1->dbname) { str = dbi1->dbname; } - PR_snprintf(dbistr, DBGVAL2STRMAXSIZE, "dbi: %d <%s>", dbi, str); + PR_snprintf(dbistr, DBISTRMAXSIZE, "dbi: %d <%s>", dbi, str); } #ifdef DBMDB_DEBUG @@ -297,7 +297,7 @@ dbg_mdb_cursor_open(const char *file, int lineno, const char *funcname, MDB_txn { int rc = mdb_cursor_open(txn, dbi, cursor); if (dbg_should_log(DBGMDB_LEVEL_MDBAPI, dbi, NULL)) { - char dbistr[DBGVAL2STRMAXSIZE]; + char dbistr[DBISTRMAXSIZE]; dbi_str(NULL, dbi, dbistr); dbg_log(file, lineno, funcname, DBGMDB_LEVEL_MDBAPI+DBGMDB_LEVEL_FORCE, "mdb_cursor_open(txn: %p, %s, cursor: %p)=%d", txn, dbistr, *cursor, rc); } @@ -318,7 +318,7 @@ void dbg_mdb_cursor_close(const char *file, int lineno, const char *funcname, MDB_cursor *cursor) { if (dbg_should_log(DBGMDB_LEVEL_MDBAPI, 0, cursor)) { - char dbistr[DBGVAL2STRMAXSIZE]; + char dbistr[DBISTRMAXSIZE]; dbi_str(cursor, 0, dbistr); mdb_cursor_close(cursor); dbg_log(file, lineno, funcname, DBGMDB_LEVEL_MDBAPI+DBGMDB_LEVEL_FORCE, "mdb_cursor_close(cursor: %p) %s", cursor, dbistr); @@ -336,7 +336,7 @@ __dbg_mdb_cursor_get(const char *file, int lineno, const char *funcname, int log char datastr[DBGVAL2STRMAXSIZE]; char flagsstr[DBGVAL2STRMAXSIZE]; char cursorstr[DBGVAL2STRMAXSIZE]; - char dbistr[DBGVAL2STRMAXSIZE]; + char dbistr[DBISTRMAXSIZE]; int sl = 0; if (dbg_should_log(loglvl, 0, cursor)) { dbi_str(cursor, 0, dbistr); @@ -376,7 +376,7 @@ if (data->mv_size <= 511) return 0; char keystr[DBGVAL2STRMAXSIZE]; char datastr[DBGVAL2STRMAXSIZE]; char flagsstr[DBGVAL2STRMAXSIZE]; - char dbistr[DBGVAL2STRMAXSIZE]; + char dbistr[DBISTRMAXSIZE]; dbi_str(NULL, dbi, dbistr); dbgval2str(keystr, sizeof keystr, key); @@ -394,7 +394,7 @@ dbg_mdb_get(const char *file, int lineno, const char *funcname, MDB_txn *txn, MD { char keystr[DBGVAL2STRMAXSIZE]; char datastr[DBGVAL2STRMAXSIZE]; - char dbistr[DBGVAL2STRMAXSIZE]; + char dbistr[DBISTRMAXSIZE]; if (dbg_should_log(DBGMDB_LEVEL_MDBAPI, dbi, NULL)) { dbi_str(NULL, dbi, dbistr); @@ -419,7 +419,7 @@ dbg_mdb_del(const char *file, int lineno, const char *funcname, MDB_txn *txn, MD if (dbg_should_log(DBGMDB_LEVEL_MDBAPI, dbi, NULL)) { char keystr[DBGVAL2STRMAXSIZE]; char datastr[DBGVAL2STRMAXSIZE]; - char dbistr[DBGVAL2STRMAXSIZE]; + char dbistr[DBISTRMAXSIZE]; dbi_str(NULL, dbi, dbistr); dbgval2str(keystr, sizeof keystr, key); @@ -438,7 +438,7 @@ dbg_mdb_cursor_put(const char *file, int lineno, const char *funcname, MDB_curso char datastr[DBGVAL2STRMAXSIZE]; char flagsstr[DBGVAL2STRMAXSIZE]; char cursorstr[DBGVAL2STRMAXSIZE]; - char dbistr[DBGVAL2STRMAXSIZE]; + char dbistr[DBISTRMAXSIZE]; dbi_str(cursor, 0, dbistr); dbgval2str(keystr, sizeof keystr, key); @@ -467,32 +467,46 @@ dbg_mdb_drop(const char *file, int lineno, const char *funcname, MDB_txn *txn, M { int rc = mdb_drop(txn, dbi, del); if (dbg_should_log(DBGMDB_LEVEL_MDBAPI, dbi, NULL)) { - char dbistr[DBGVAL2STRMAXSIZE]; + char dbistr[DBISTRMAXSIZE]; dbi_str(NULL, dbi, dbistr); dbg_log(file, lineno, funcname, DBGMDB_LEVEL_MDBAPI+DBGMDB_LEVEL_FORCE, "mdb_drop(txn: %p, %s, del: %d)=%d", txn, dbistr, del, rc); } return rc; } +int txn_loglvl() +{ + if (dbgmdb_level & DBGMDB_LEVEL_MDBAPI) { + return DBGMDB_LEVEL_MDBAPI; + } + if (dbgmdb_level & DBGMDB_LEVEL_TXN) { + return DBGMDB_LEVEL_TXN; + } + return 0; +} + int dbg_txn_begin(const char *file, int lineno, const char *funcname, MDB_env *env, MDB_txn *parent_txn, int flags, MDB_txn **txn) { - if (!(dbgmdb_level & DBGMDB_LEVEL_TXN)) { + int lvl = txn_loglvl(); + if (lvl == 0) { return mdb_txn_begin(env, parent_txn, flags, txn); } char strflags[100]; dbmdb_envflags2str(flags, strflags, sizeof strflags); - dbg_log(file, lineno, funcname, DBGMDB_LEVEL_TXN, "TXN_BEGIN[%d]. txn_parent=%p, %s, stack is:", pthread_gettid(), parent_txn, strflags); + dbg_log(file, lineno, funcname, lvl, "TXN_BEGIN[%d]. txn_parent=%p, %s, stack is:", pthread_gettid(), parent_txn, strflags); log_stack(DBGMDB_LEVEL_TXN); - dbg_log(file, lineno, funcname, DBGMDB_LEVEL_TXN, "Waiting ...\n"); + dbg_log(file, lineno, funcname, lvl, "Waiting ...\n"); int rc = mdb_txn_begin(env, parent_txn, flags, txn); - dbg_log(file, lineno, funcname, DBGMDB_LEVEL_TXN, "Done. txn_begin(env=%p, txn_parent=%p, flags=0x%x, txn=0x%p) returned %d.", + dbg_log(file, lineno, funcname, lvl, "Done. txn_begin(env=%p, txn_parent=%p, flags=0x%x, txn=0x%p) returned %d.", env, parent_txn, flags, *txn, rc); return rc; } int dbg_txn_end(const char *file, int lineno, const char *funcname, MDB_txn *txn, int iscommit) { - if (!(dbgmdb_level & DBGMDB_LEVEL_TXN)) { + int lvl = txn_loglvl(); + char *stackis = (lvl == DBGMDB_LEVEL_TXN) ? " stack is:" : ""; + if (lvl == 0) { if (iscommit) { return mdb_txn_commit(txn); } else { @@ -503,12 +517,14 @@ int dbg_txn_end(const char *file, int lineno, const char *funcname, MDB_txn *txn int rc = 0; if (iscommit) { rc = mdb_txn_commit(txn); - dbg_log(file, lineno, funcname, DBGMDB_LEVEL_TXN, "TXN_COMMIT[%d] (txn=0x%p) returned %d. stack is:", pthread_gettid(), txn, rc); + dbg_log(file, lineno, funcname, lvl, "TXN_COMMIT[%d] (txn=0x%p) returned %d.%s", pthread_gettid(), txn, rc, stackis); } else { mdb_txn_abort(txn); - dbg_log(file, lineno, funcname, DBGMDB_LEVEL_TXN, "TXN_ABORT[%d] (txn=0x%p). stack is:", pthread_gettid(), txn); + dbg_log(file, lineno, funcname, lvl, "TXN_ABORT[%d] (txn=0x%p).%s", pthread_gettid(), txn, stackis); } + if (lvl == DBGMDB_LEVEL_TXN) { log_stack(DBGMDB_LEVEL_TXN); + } return rc; } @@ -516,14 +532,14 @@ int dbg_txn_end(const char *file, int lineno, const char *funcname, MDB_txn *txn void dbg_txn_reset(const char *file, int lineno, const char *funcname, MDB_txn *txn) { mdb_txn_reset(txn); - dbg_log(file, lineno, funcname, DBGMDB_LEVEL_TXN, "TXN_RESET[%d] (txn=0x%p). stack is:", pthread_gettid(), txn); + dbg_log(file, lineno, funcname, txn_loglvl(), "TXN_RESET[%d] (txn=0x%p). stack is:", pthread_gettid(), txn); log_stack(DBGMDB_LEVEL_TXN); } int dbg_txn_renew(const char *file, int lineno, const char *funcname, MDB_txn *txn) { int rc = mdb_txn_renew(txn); - dbg_log(file, lineno, funcname, DBGMDB_LEVEL_TXN, "TXN_RENEW[%d] (txn=0x%p) returned %d. stack is:", pthread_gettid(), txn, rc); + dbg_log(file, lineno, funcname, txn_loglvl(), "TXN_RENEW[%d] (txn=0x%p) returned %d. stack is:", pthread_gettid(), txn, rc); log_stack(DBGMDB_LEVEL_TXN); return rc; } @@ -533,7 +549,7 @@ void dbmdb_log_dbi_set_fn(const char *file, int lineno, const char *funcname, co Dl_info info = {0}; dladdr(fn, &info); /* Cannot use dbi_str here because slot is not yet up2date (so dbname is an argument) */ - dbg_log(file, lineno, funcname, DBGMDB_LEVEL_MDBAPI, "%s(txn=0x%p, dbi=%d <%s>, fn=0x%p <%s>)\n", action, txn, dbi, dbname, fn, info.dli_sname); + dbg_log(file, lineno, funcname, DBGMDB_LEVEL_MDBAPI, "%s(txn=0x%p, dbi=%d <%s>, fn=0x%p <%s>)\n", action, txn, dbi, dbname, fn, info.dli_sname); } int dbg_mdb_bulkop_cursor_get(const char *file, int lineno, const char *funcname, MDB_cursor *cursor, MDB_val *key, MDB_val *data, MDB_cursor_op op) diff --git a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_debug.h b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_debug.h index e25d77509..3bd709a4c 100644 --- a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_debug.h +++ b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_debug.h @@ -20,11 +20,13 @@ void dbmdb_format_dbslist_info(char *info, dbmdb_dbi_t *dbi); #define DBGMDB_LEVEL_PRINTABLE 0xfff +#define DBISTRMAXSIZE 80 extern int dbgmdb_level; /* defined in mdb_debug.c */ void dbg_log(const char *file, int lineno, const char *funcname, int loglevel, char *fmt, ...); void dbgval2str(char *buff, size_t bufsiz, MDB_val *val); void dbmdb_dbg_set_dbi_slots(dbmdb_dbi_t *slots); +void dbi_str(MDB_cursor *cursor, int dbi, char dbistr[DBISTRMAXSIZE]); /* #define DBMDB_DEBUG 1 */ #define DBGMDB_LEVEL_DEFAULT DBGMDB_LEVEL_MDBAPI+DBGMDB_LEVEL_TXN+DBGMDB_LEVEL_IMPORT+ \ diff --git a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c index f03e6741b..9504c2434 100644 --- a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c +++ b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c @@ -3413,6 +3413,7 @@ dbmdb_add_import_index(ImportCtx_t *ctx, const char *name, IndexInfo *ii) } } + DBG_LOG(DBGMDB_LEVEL_OTHER,"Calling dbmdb_open_dbi_from_filename for %s flags = 0x%x", mii->name, dbi_flags); dbmdb_open_dbi_from_filename(&mii->dbi, job->inst->inst_be, mii->name, mii->ai, dbi_flags); avl_insert(&ctx->indexes, mii, cmp_mii, NULL); } diff --git a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_instance.c b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_instance.c index 66e6e0e8d..50db548d9 100644 --- a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_instance.c +++ b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_instance.c @@ -1245,6 +1245,8 @@ int dbmdb_open_dbi_from_filename(dbmdb_dbi_t **dbi, backend *be, const char *fil dbi_open_ctx_t octx = {0}; dbi_txn_t *txn = NULL; int rc = 0; + DBG_LOG(DBGMDB_LEVEL_OTHER, "dbmdb_open_dbi_from_filename: filename=%s flags=0x%x", filename, flags); + if (ctx->readonly || (flags&MDB_RDONLY)) { flags &= ~MDB_CREATE; @@ -1292,19 +1294,24 @@ int dbmdb_open_dbi_from_filename(dbmdb_dbi_t **dbi, backend *be, const char *fil } } if (rc) { + DBG_LOG(DBGMDB_LEVEL_OTHER, "returning %d", rc); return rc; } if (!*dbi) { + DBG_LOG(DBGMDB_LEVEL_OTHER, "returning MDB_NOTFOUND"); return MDB_NOTFOUND; } + DBG_LOG(DBGMDB_LEVEL_OTHER, "So far rc = %d", rc); if (ai && ai->ai_key_cmp_fn != (*dbi)->cmp_fn) { if (! (*dbi)->cmp_fn) { rc = dbmdb_update_dbi_cmp_fn(ctx, *dbi, ai->ai_key_cmp_fn, NULL); } (*dbi)->cmp_fn = ai->ai_key_cmp_fn; } + DBG_LOG(DBGMDB_LEVEL_OTHER, "So far rc = %d", rc); if (((*dbi)->state.state & DBIST_DIRTY) && !(flags & MDB_OPEN_DIRTY_DBI)) { + DBG_LOG(DBGMDB_LEVEL_OTHER, "returning MDB_NOTFOUND"); return MDB_NOTFOUND; } if (!rc && !((*dbi)->state.state & DBIST_DIRTY) && (flags & MDB_MARK_DIRTY_DBI)) { @@ -1312,12 +1319,15 @@ int dbmdb_open_dbi_from_filename(dbmdb_dbi_t **dbi, backend *be, const char *fil st.state |= DBIST_DIRTY; rc = dbmdb_update_dbi_state(ctx, *dbi, &st, NULL, PR_FALSE); } + DBG_LOG(DBGMDB_LEVEL_OTHER, "So far rc = %d", rc); if (!rc && (flags & MDB_TRUNCATE_DBI)) { octx.ctx = ctx; octx.dbi = *dbi; octx.deletion_flags = 0; + DBG_LOG(DBGMDB_LEVEL_OTHER, "truncating db"); rc = dbi_remove(&octx); } + DBG_LOG(DBGMDB_LEVEL_OTHER, "returning rc=%d", rc); return rc; } @@ -1401,11 +1411,14 @@ int dbmdb_recno_cache_get_mode(dbmdb_recno_cache_ctx_t *rcctx) rc = MDB_GET(txn, rcctx->rcdbi->dbi, &rcctx->key, &rcctx->data); if (rc == MDB_SUCCESS) { rcctx->mode = RCMODE_USE_CURSOR_TXN; + DBG_LOG(DBGMDB_LEVEL_VLV, "dbmdb_recno_cache_get_mode(%s) mode=RCMODE_USE_CURSOR_TXN rc=0", rcdbname); + return rc; } if (rc != MDB_NOTFOUND) { /* There was an error or cache is valid. * Im both cases there is no need to rebuilt the cache. */ + DBG_LOG(DBGMDB_LEVEL_VLV, "dbmdb_recno_cache_get_mode(%s) mode=RCMODE_UNKNOWN rc=%d", rcdbname, rc); return rc; } } @@ -1415,7 +1428,9 @@ int dbmdb_recno_cache_get_mode(dbmdb_recno_cache_ctx_t *rcctx) TXN_ABORT(txn); txn = NULL; rcctx->mode = RCMODE_USE_SUBTXN; + DBG_LOG(DBGMDB_LEVEL_VLV, "dbmdb_recno_cache_get_mode(%s) mode=RCMODE_USE_SUBTXN rc=0", rcdbname); } else if (rc == EINVAL) { + DBG_LOG(DBGMDB_LEVEL_VLV, "dbmdb_recno_cache_get_mode(%s) mode=RCMODE_USE_NEW_THREAD rc=0", rcdbname); rcctx->mode = RCMODE_USE_NEW_THREAD; rc = 0; } diff --git a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.c b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.c index 313eb35ae..e4bf27b96 100644 --- a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.c +++ b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.c @@ -2108,6 +2108,7 @@ void *dbmdb_recno_cache_build(void *arg) int len = 0; int rc = 0; + DBG_LOG(DBGMDB_LEVEL_VLV, "dbmdb_recno_cache_build(%s)", rcctx->rcdbname); /* Open/creat cache dbi */ rc = dbmdb_open_dbi_from_filename(&rcctx->rcdbi, rcctx->cursor->be, rcctx->rcdbname, NULL, MDB_CREATE); slapi_ch_free_string(&rcctx->rcdbname); @@ -2131,63 +2132,46 @@ void *dbmdb_recno_cache_build(void *arg) txn_ctx.flags |= DBMDB_TXNCTX_NEED_COMMIT; } } + if (rc == 0) { + rc = MDB_CURSOR_GET(txn_ctx.cursor, &key, &data, MDB_FIRST); + recno = 1; + } while (rc == 0) { slapi_log_err(SLAPI_LOG_DEBUG, "dbmdb_recno_cache_build", "recno=%d\n", recno); - if (recno % RECNO_CACHE_INTERVAL != 1) { - recno++; - rc = MDB_CURSOR_GET(txn_ctx.cursor, &key, &data, MDB_NEXT); - continue; - } - /* close the txn from time to time to avoid locking all dbi page */ - rc = dbmdb_end_recno_cache_txn(&txn_ctx, 0); - rc |= dbmdb_begin_recno_cache_txn(rcctx, &txn_ctx, rcctx->dbi->dbi); - if (rc) { - break; - } - /* Reset to new cursor to the old position */ - if (recno == 1) { - rc = MDB_CURSOR_GET(txn_ctx.cursor, &key, &data, MDB_FIRST); - } else { - rc = MDB_CURSOR_GET(txn_ctx.cursor, &key, &data, MDB_SET); - if (rc == MDB_NOTFOUND) { - rc = MDB_CURSOR_GET(txn_ctx.cursor, &key, &data, MDB_SET_RANGE); - } - } - if (rc) { - break; - } - /* Prepare the cache data */ - len = sizeof(*rce) + data.mv_size + key.mv_size; - rce = (dbmdb_recno_cache_elmt_t*)slapi_ch_malloc(len); - rce->len = len; - rce->recno = recno; - rce->key.mv_size = key.mv_size; - rce->key.mv_data = &rce[1]; - rce->data.mv_size = data.mv_size; - rce->data.mv_data = ((char*)&rce[1])+rce->key.mv_size; - memcpy(rce->key.mv_data, key.mv_data, key.mv_size); - memcpy(rce->data.mv_data, data.mv_data, data.mv_size); - rcdata.mv_data = rce; - rcdata.mv_size = len; - dbmdb_generate_recno_cache_key_by_recno(&rckey, recno); - rc = MDB_PUT(txn_ctx.txn, rcctx->rcdbi->dbi, &rckey, &rcdata, 0); - slapi_ch_free(&rckey.mv_data); - if (rc) { - slapi_log_err(SLAPI_LOG_ERR, "dbmdb_recno_cache_build", - "Failed to write record in db %s, key=%s error: %s\n", - rcctx->rcdbi->dbname, (char*)(key.mv_data), mdb_strerror(rc)); - } else { - dbmdb_generate_recno_cache_key_by_data(&rckey, &key, &data); + if (recno % RECNO_CACHE_INTERVAL == 1) { + /* Prepare the cache data */ + len = sizeof(*rce) + data.mv_size + key.mv_size; + rce = (dbmdb_recno_cache_elmt_t*)slapi_ch_malloc(len); + rce->len = len; + rce->recno = recno; + rce->key.mv_size = key.mv_size; + rce->key.mv_data = &rce[1]; + rce->data.mv_size = data.mv_size; + rce->data.mv_data = ((char*)&rce[1])+rce->key.mv_size; + memcpy(rce->key.mv_data, key.mv_data, key.mv_size); + memcpy(rce->data.mv_data, data.mv_data, data.mv_size); + rcdata.mv_data = rce; + rcdata.mv_size = len; + dbmdb_generate_recno_cache_key_by_recno(&rckey, recno); rc = MDB_PUT(txn_ctx.txn, rcctx->rcdbi->dbi, &rckey, &rcdata, 0); slapi_ch_free(&rckey.mv_data); - txn_ctx.flags |= DBMDB_TXNCTX_NEED_COMMIT; if (rc) { slapi_log_err(SLAPI_LOG_ERR, "dbmdb_recno_cache_build", "Failed to write record in db %s, key=%s error: %s\n", rcctx->rcdbi->dbname, (char*)(key.mv_data), mdb_strerror(rc)); + } else { + dbmdb_generate_recno_cache_key_by_data(&rckey, &key, &data); + rc = MDB_PUT(txn_ctx.txn, rcctx->rcdbi->dbi, &rckey, &rcdata, 0); + slapi_ch_free(&rckey.mv_data); + txn_ctx.flags |= DBMDB_TXNCTX_NEED_COMMIT; + if (rc) { + slapi_log_err(SLAPI_LOG_ERR, "dbmdb_recno_cache_build", + "Failed to write record in db %s, key=%s error: %s\n", + rcctx->rcdbi->dbname, (char*)(key.mv_data), mdb_strerror(rc)); + } } + slapi_ch_free(&rcdata.mv_data); } - slapi_ch_free(&rcdata.mv_data); rc = MDB_CURSOR_GET(txn_ctx.cursor, &key, &data, MDB_NEXT); recno++; } @@ -2378,26 +2362,27 @@ int dbmdb_cursor_set_recno(dbi_cursor_t *cursor, MDB_val *dbmdb_key, MDB_val *db } memcpy(&recno, dbmdb_key->mv_data, sizeof (dbi_recno_t)); +#ifdef DBMDB_DEBUG + char dbistr[DBISTRMAXSIZE]; + dbi_str(cursor->cur, 0, dbistr); + DBG_LOG(DBGMDB_LEVEL_VLV, "dbmdb_cursor_set_recno: recno=%d dbi=%s", recno, dbistr); +#endif dbmdb_generate_recno_cache_key_by_recno(&cache_key, recno); rc = dbmdb_recno_cache_lookup(cursor, &cache_key, &rce); if (rc ==0) { rc = MDB_CURSOR_GET(cursor->cur, &rce->key, &rce->data, MDB_SET_RANGE); } while (rc == 0 && recno > rce->recno) { - DBG_LOG(DBGMDB_LEVEL_VLV, "Current record index is %d Target is %d\n", rce->recno, recno); + DBG_LOG(DBGMDB_LEVEL_VLV, "Current record index is %d Target is %d", rce->recno, recno); rce->recno++; rc = MDB_CURSOR_GET(cursor->cur, &rce->key, &rce->data, MDB_NEXT); } - if (rc == MDB_NOTFOUND) { - /* Stay on last record if there are no more records */ - rc = 0; - } if (rc == 0 && dbmdb_data->mv_size == rce->data.mv_size) { /* Should always be the case */ - DBG_LOG(DBGMDB_LEVEL_VLV, "SUCCESS\n"); + DBG_LOG(DBGMDB_LEVEL_VLV, "SUCCESS"); memcpy(dbmdb_data->mv_data , rce->data.mv_data, dbmdb_data->mv_size); } else { - DBG_LOG(DBGMDB_LEVEL_VLV, "FAILURE: rc=%d dbmdb_data->mv_size=%d rce->data.mv_size=%d\n", rc, dbmdb_data->mv_size, rce->data.mv_size); + DBG_LOG(DBGMDB_LEVEL_VLV, "FAILURE: rc=%d dbmdb_data->mv_size=%d rce->data.mv_size=%d", rc, dbmdb_data->mv_size, rce->data.mv_size); } slapi_ch_free((void**)&rce); @@ -2878,6 +2863,7 @@ dbmdb_public_clear_vlv_cache(Slapi_Backend *be, dbi_txn_t *txn, dbi_db_t *db) MDB_val ok = { 0 }; int rc = 0; + DBG_LOG(DBGMDB_LEVEL_VLV, "dbmdb_public_clear_vlv_cache(%s)", rcdbname); ok.mv_data = "OK"; ok.mv_size = 2; rc = dbmdb_open_dbi_from_filename(&rcdbi, be, rcdbname, NULL, 0); diff --git a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_ldif2db.c b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_ldif2db.c index 548ad2910..477a48ec9 100644 --- a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_ldif2db.c +++ b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_ldif2db.c @@ -1423,7 +1423,7 @@ _get_and_add_parent_rdns(backend *be, key.mv_data = &storedid; memset(&data, 0, sizeof(data)); - rc = mdb_get(TXN(cur->txn), cur->dbi->dbi, &key, &data); + rc = MDB_GET(TXN(cur->txn), cur->dbi->dbi, &key, &data); if (rc) { slapi_log_err(SLAPI_LOG_ERR, "_get_and_add_parent_rdns", "Failed to position cursor at ID " ID_FMT "\n", id); diff --git a/ldap/servers/slapd/back-ldbm/idl_new.c b/ldap/servers/slapd/back-ldbm/idl_new.c index 9c455f5df..a1c1395bc 100644 --- a/ldap/servers/slapd/back-ldbm/idl_new.c +++ b/ldap/servers/slapd/back-ldbm/idl_new.c @@ -939,7 +939,6 @@ error: /* Close the cursor */ if (0 == idl_range_ctx.flag_err) { idl_range_ctx.flag_err = ret; -slapi_log_err(SLAPI_LOG_INFO, "idl_lmdb_range_fetch", "flag_err=%d\n", idl_range_ctx.flag_err); } ret = dblayer_cursor_op(&cursor, DBI_OP_CLOSE, NULL, NULL); if (ret) { @@ -955,7 +954,6 @@ slapi_log_err(SLAPI_LOG_INFO, "idl_lmdb_range_fetch", "flag_err=%d\n", idl_range } if (0 == idl_range_ctx.flag_err) { idl_range_ctx.flag_err = ret; -slapi_log_err(SLAPI_LOG_INFO, "idl_lmdb_range_fetch", "flag_err=%d\n", idl_range_ctx.flag_err); } /* sort idl */ diff --git a/ldap/servers/slapd/tools/chkvlv.c b/ldap/servers/slapd/tools/chkvlv.c new file mode 100644 index 000000000..243802150 --- /dev/null +++ b/ldap/servers/slapd/tools/chkvlv.c @@ -0,0 +1,363 @@ +/** BEGIN COPYRIGHT BLOCK + * Copyright (C) 2024 Red Hat, Inc. + * All rights reserved. + * + * License: GPL (version 3 or any later version). + * See LICENSE for details. + * END COPYRIGHT BLOCK **/ + + +/* Build with: gcc -o chkvlv chkvlv.c -llmdb */ +/* Usage: chkvlv dbdir */ + +#include <pthread.h> +#include <stdlib.h> +#include <stdio.h> +#include <string.h> +#include <sys/stat.h> +#include <sys/types.h> +#include <unistd.h> +#include <lmdb.h> +#include <stdint.h> + + +#define T(expr) rc = expr; if (rc) { printf("%s[%d]: %s returned %s (%d)\n", __FILE__, __LINE__, #expr, mdb_strerror(rc), rc); exit(1); } + +#define RECNO_CACHE_PREFIX "/~recno-cache" +#define VLV_PREFIX "/vlv#" + + +typedef struct { + char *name; + MDB_dbi dbi; + int is_vlv; + int recno_idx; +} dbi_t; + +typedef struct { + MDB_val data; + MDB_val key; + int len; + uint32_t recno; + /* followed by key value then data value */ +} recno_elmt_t; + +typedef struct { + MDB_cursor *cur; + MDB_txn *txn; + MDB_val key; + MDB_val data; + int count; +} iterator_t; + + +int nbdbis; +dbi_t *dbis; +MDB_env *env = NULL; +MDB_dbi dbi = 0; + + +/* Iterate calling 'cb' callback on each database instance records */ +int +iterate(MDB_txn *txn, MDB_dbi dbi, int (*cb)(iterator_t *, void*), void *ctx) +{ + int rc = 0; + iterator_t it = {0}; + it.txn = txn; + T(mdb_cursor_open(txn, dbi, &it.cur)); + rc = mdb_cursor_get(it.cur, &it.key, &it.data, MDB_FIRST); + while (rc==0) { + rc = cb(&it, ctx); + if (rc == 0) { + it.count++; + rc = mdb_cursor_get(it.cur, &it.key, &it.data, MDB_NEXT); + } + } + mdb_cursor_close(it.cur); + if (rc == MDB_NOTFOUND) { + rc = 0; + } + return rc; +} + +void +open_db(const char *dbdir) +{ + int rc = 0; + char buf[200]; + char buf2[200]; + + FILE *fd = NULL; + size_t maxsize = 0; + MDB_dbi maxdbs = 0; + unsigned int maxreaders = 0; + char *pt = NULL; + + T(mdb_env_create(&env)); + sprintf(buf,"%s/INFO.mdb",dbdir); + fd = fopen(buf, "r"); + if (fd==NULL) { + perror(buf); + printf("The <dbdir>' parameter is probably invalid.\n"); + exit(1); + } + while (pt=fgets(buf2, (sizeof buf2), fd)) { + sscanf(buf2, "MAXSIZE=%ld", &maxsize); + sscanf(buf2, "MAXREADERS=%ud", &maxreaders); + sscanf(buf2, "MAXDBS=%ud", &maxdbs); + } + fclose(fd); + + sprintf(buf,"%s/data.mdb",dbdir); + T(mdb_env_set_maxdbs(env, maxdbs)); + T(mdb_env_set_mapsize(env, maxsize)); + T(mdb_env_set_maxreaders(env, maxreaders)); + T(mdb_env_open(env, dbdir, MDB_RDONLY , 0700)); +} + +char * +dup_val(const MDB_val *val) +{ + char *str = malloc(val->mv_size+1); + if (str==NULL) { + fprintf(stderr, "Cannot alloc %ld bytes.\n", val->mv_size+1); + exit(1); + } + memcpy(str, val->mv_data, val->mv_size); + str[val->mv_size] = 0; + return str; +} + + +int +dup_recno_elmt(const MDB_val *val, recno_elmt_t *elmt) +{ + if (val->mv_size < sizeof *elmt) { + printf("Unexpected record size %ld (Should be >= %ld)\n", + val->mv_size, sizeof *elmt); + return -1; + } + memcpy(elmt, val->mv_data, sizeof *elmt); + size_t expected_size = (sizeof *elmt) + elmt->key.mv_size + elmt->data.mv_size; + if (val->mv_size != expected_size) { + printf("Unexpected record size %ld (Should be %ld)\n", + val->mv_size, expected_size); + elmt->key.mv_data = elmt->data.mv_data = NULL; + return -1; + } + char *pt = val->mv_data; + elmt->key.mv_data = pt+sizeof *elmt; + elmt->data.mv_data = pt+(sizeof *elmt)+elmt->key.mv_size; + elmt->key.mv_data = dup_val(&elmt->key); + elmt->data.mv_data = dup_val(&elmt->data); + return 0; +} + +void +free_recno_elmt(recno_elmt_t *elmt) +{ + if (elmt->key.mv_data) { + free(elmt->key.mv_data); + elmt->key.mv_data = NULL; + } + if (elmt->data.mv_data) { + free(elmt->data.mv_data); + elmt->data.mv_data = NULL; + } +} + +int +count_cb(iterator_t *it, void *ctx) +{ + *(int*)ctx = it->count; + return 0; +} + +int +store_dbi(iterator_t *it, void *ctx) +{ + int rc = 0; + if (it->count > nbdbis) { + return MDB_NOTFOUND; + } + char *name = dup_val(&it->key); + T(mdb_dbi_open(it->txn, name , 0, &dbis[it->count].dbi)); + dbis[it->count].name = name; + dbis[it->count].is_vlv = strstr(name, VLV_PREFIX) && !strstr(name, RECNO_CACHE_PREFIX); + return 0; +} + +void open_dbis() +{ + int rc = 0; + MDB_dbi dbi = 0; + MDB_txn *txn = 0; + + T(mdb_txn_begin(env, NULL, MDB_RDONLY, &txn)); + T(mdb_dbi_open(txn, "__DBNAMES", 0, &dbi)); + T(mdb_txn_commit(txn)); + T(mdb_txn_begin(env, NULL, MDB_RDONLY, &txn)); + T(iterate(txn, dbi, count_cb, &nbdbis)); + dbis = calloc(nbdbis, sizeof (dbi_t)); + if (!dbis) { + fprintf(stderr, "Cannot alloc %ld bytes.\n", nbdbis*sizeof (dbi_t)); + exit(1); + } + T(iterate(txn, dbi, store_dbi, NULL)); + T(mdb_txn_commit(txn)); + + for (size_t count = 0; count < nbdbis; count++) { + if (dbis[count].is_vlv) { + char buf2[200]; + char *pt = dbis[count].name; + char *pt2 = buf2; + while (*pt!='/') { + *pt2++ = *pt++; + } + strcpy(pt2,RECNO_CACHE_PREFIX); + pt2 += strlen(pt2); + strcpy(pt2,pt); + + for (size_t i = 0; i < nbdbis; i++) { + if (strcmp(dbis[i].name, buf2)==0) { + dbis[count].recno_idx = i; + } + } + } + } +} + +void +dump_val(const MDB_val *val) +{ + unsigned char *pt = val->mv_data; + for (size_t i = val->mv_size; i >0; i--) { + if ( *pt >= 0x32 && *pt < 0x7f && *pt != '\\') { + putchar(*pt); + } else { + printf("\\%02x", *pt); + } + pt++; + } +} + +int +cmp_val(const MDB_val *val1, const MDB_val *val2) +{ + size_t len = val1->mv_size > val2->mv_size ? val2->mv_size : val1->mv_size; + int rc = memcmp(val1->mv_data, val2->mv_data, len); + if (rc!=0) return rc; + return val1->mv_size - val2->mv_size; +} + +typedef struct { + dbi_t *vlvdbi; + recno_elmt_t *elmt; + iterator_t *it; + int found; +} check_recno_ctx_t; + +int +check_recno_ctx(iterator_t *it, void *ctx) +{ + check_recno_ctx_t *rctx = ctx; + if (cmp_val(&it->key, &rctx->elmt->key) == 0) { + rctx->found = 1; + if (it->count+1 != rctx->elmt->recno) { + printf("Problem (invalid recno value) detected in vlv cache record #%d\n", it->count); + printf("Found %d instead of %d\n", rctx->elmt->recno, it->count+1); + } + } + return 0; +} + +void +check_recno_record(iterator_t *it, dbi_t *vlvdbi, recno_elmt_t *elmt) +{ + int rc = 0; + check_recno_ctx_t ctx = {0}; + ctx.vlvdbi = vlvdbi; + ctx.elmt = elmt; + ctx.it = it; + if (dup_recno_elmt(&it->data, elmt)) { + printf("Problem (invalid data size) detected in vlv cache record #%d\n", it->count); + return; + } + T(iterate(it->txn, vlvdbi->dbi, check_recno_ctx, &ctx)); +} + +int +walk_cache(iterator_t *it, void *ctx) +{ + char *pt = it->key.mv_data; + recno_elmt_t elmt = {0}; + MDB_val vkey; + MDB_val vdata; + switch (*pt) { + case 'O': + printf("vlv cache is in sync.\n"); + return 0; + case 'D': + if (it->key.mv_size < sizeof vkey.mv_size) { + printf("Problem (invalid key size) detected in vlv cache record #%d\n", it->count); + return 0; + } + memcpy(&vkey.mv_size, pt + it->key.mv_size - sizeof vkey.mv_size, sizeof vkey.mv_size); + vkey.mv_data = pt+1; + vdata.mv_data = pt+1+vkey.mv_size; + vdata.mv_size = it->key.mv_size - 1-vkey.mv_size - sizeof vkey.mv_size; + printf("vkey: "); dump_val(&vkey); putchar('\n'); + printf("vdata: "); dump_val(&vdata); putchar('\n'); + check_recno_record(it, ctx, &elmt); + if (cmp_val(&vkey, &elmt.key) != 0) { + printf("Problem (missmatching key value) detected in vlv cache record #%d\n", it->count); + return 0; + } + if (cmp_val(&vdata, &elmt.data) != 0) { + printf("Problem (missmatching data value) detected in vlv cache record #%d\n", it->count); + return 0; + } + return 0; + case 'R': + if (it->key.mv_size !=11) { + printf("Problem (invalid key size) detected in vlv cache record #%d\n", it->count); + return 0; + } + check_recno_record(it, ctx, &elmt); + return 0; + } + + return 0; +} + +void +process_vlv(int idx) +{ + int rc = 0; + MDB_txn *txn = 0; + printf("Processing: %s\n", dbis[idx].name); + T(mdb_txn_begin(env, NULL, MDB_RDONLY, &txn)); + T(iterate(txn, dbis[dbis[idx].recno_idx].dbi, walk_cache, &dbis[idx])) + T(mdb_txn_commit(txn)); +} + + +int main(int argc, char **argv) +{ + int rc = 0; + if (argc != 2) { + printf("Usage: %s <dbdir>\n", argv[1]); + printf("\tThis tools check the lmdb vlv caches consistency\n") + exit(1); + } + char *dbdir = argv[1]; + + open_db(argv[1]); + open_dbis(); + for (size_t i = 0; i < nbdbis; i++) { + if (dbis[i].is_vlv) { + process_vlv(i); + } + } + return 0; +}
0
aa64641d1974bb52fc4d02808362e76dd86d9cd0
389ds/389-ds-base
Ticket #48755 - moving an entry could make the online init fail Description: The upgrade script template 91reindex.pl.in had a syntax error. See also Bug 1353592 - Setup-ds.pl --update fails https://fedorahosted.org/389/ticket/48755 Note: one character fix.
commit aa64641d1974bb52fc4d02808362e76dd86d9cd0 Author: Noriko Hosoi <[email protected]> Date: Thu Jul 7 16:38:13 2016 -0700 Ticket #48755 - moving an entry could make the online init fail Description: The upgrade script template 91reindex.pl.in had a syntax error. See also Bug 1353592 - Setup-ds.pl --update fails https://fedorahosted.org/389/ticket/48755 Note: one character fix. diff --git a/ldap/admin/src/scripts/91reindex.pl.in b/ldap/admin/src/scripts/91reindex.pl.in index c861f64cf..99b08e33d 100644 --- a/ldap/admin/src/scripts/91reindex.pl.in +++ b/ldap/admin/src/scripts/91reindex.pl.in @@ -12,7 +12,7 @@ sub runinst { # rdn-format value. See $rdn_format set below. # If equal to or greater than this value, no need to reindex. # If it needs to be unconditionally reindexed, set 0. - my @rdnconditions = (4) + my @rdnconditions = (4); my $config = $conn->search("cn=config", "base", "(objectclass=*)"); if (!$config) {
0
9132f07b84d7513fe75c38ee1e5c42ad25de19c4
389ds/389-ds-base
Issue 5743 - Disabling replica crashes the server (#5746) * Issue 5743 - Disabling replica crashes the server Problem: Server crash when disabling replication on a supplier/hub/consumer because of a null pointer exception while trying to delete the changelog. Solution is trivial: do not try to use NULL pointer. I double checked that the changelog db is still deleted in SUPPLIER/HUB case (without the fix the crash also occurs in these cases, I suspect that the changelog removal code is called twice)
commit 9132f07b84d7513fe75c38ee1e5c42ad25de19c4 Author: progier389 <[email protected]> Date: Fri Apr 28 17:02:29 2023 +0200 Issue 5743 - Disabling replica crashes the server (#5746) * Issue 5743 - Disabling replica crashes the server Problem: Server crash when disabling replication on a supplier/hub/consumer because of a null pointer exception while trying to delete the changelog. Solution is trivial: do not try to use NULL pointer. I double checked that the changelog db is still deleted in SUPPLIER/HUB case (without the fix the crash also occurs in these cases, I suspect that the changelog removal code is called twice) diff --git a/dirsrvtests/tests/suites/replication/replica_roles_test.py b/dirsrvtests/tests/suites/replication/replica_roles_test.py new file mode 100644 index 000000000..5f70488e3 --- /dev/null +++ b/dirsrvtests/tests/suites/replication/replica_roles_test.py @@ -0,0 +1,125 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2023 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging +import os +import itertools +import pytest +import ldap +from lib389._constants import SUFFIX +from lib389.topologies import topology_st as topo +from lib389.replica import Replicas + + +log = logging.getLogger(__name__) + + +ROLE_TO_CONFIG = { + "None" : {}, + "supplier" : { + "nsDS5Flags": 1, + "nsDS5ReplicaType": 3, + "nsDS5ReplicaId": 1, + }, + "hub" : { + "nsDS5Flags": 1, + "nsDS5ReplicaType": 2, + "nsDS5ReplicaId": 65535, + }, + "consumer" : { + "nsDS5Flags": 0, + "nsDS5ReplicaType": 2, + "nsDS5ReplicaId": 65535, + }, + +} + +REPLICA_PROPERTIES = { + 'cn': 'replica', + 'nsDS5ReplicaRoot': SUFFIX, + 'nsDS5ReplicaBindDN': 'cn=replmgr,cn=config', +} + + +def verify_role(replicas, role): + """Verify that instance has the right replica attrbutes.""" + log.info("Verify role '%s'", role) + expected = ROLE_TO_CONFIG[role] + rep = {} + try: + replica = replicas.get(SUFFIX) + rep["nsDS5Flags"] = replica.get_attr_val_int("nsDS5Flags") + rep["nsDS5ReplicaType"] = replica.get_attr_val_int("nsDS5ReplicaType") + rep["nsDS5ReplicaId"] = replica.get_attr_val_int("nsDS5ReplicaId") + except ldap.NO_SUCH_OBJECT: + pass + log.info('verify_role: role: %s expected: %s found: %s', role, expected, rep) + assert rep == expected + + +def config_role(replicas, role): + """Configure replica role.""" + log.info("Set role to: '%s'", role) + try: + replica = replicas.get(SUFFIX) + except ldap.NO_SUCH_OBJECT: + replica = None + properties = { key:str(val) for dct in (REPLICA_PROPERTIES, + ROLE_TO_CONFIG[role]) for key,val in dct.items() } + if replica: + if role == "None": + replica.delete() + else: + # Cannot use replica.ensure_state here because: + # lib389 complains if nsDS5ReplicaRoot is not set + # 389ds complains if nsDS5ReplicaRoot it is set + # replica.ensure_state(rdn='cn=replica', properties=properties) + mods = [ (key, str(val)) + for key,val in ROLE_TO_CONFIG[role].items() + if str(val).lower() != replica.get_attr_val_utf8_l(key) ] + log.debug(f'replica.replace_many({mods})') + replica.replace_many(*mods) + elif role != "None": + replicas.create(properties=properties) + + [email protected]( + "from_role,to_role", + itertools.permutations( ("None", "supplier", "hub", "consumer" ) , 2 ) +) +def test_switching_roles(topo, from_role, to_role): + """Test all transitions between roles/ CONSUMER/HUB/SUPPLIER/NONE + + :id: 6e9a697b-d5a0-45ff-b9c7-5fa14ea0c102 + :setup: Standalone Instance + :steps: + 1. Set initial replica role + 2. Verify initial replica role + 3. Set final replica role + 4. Verify final replica role + :expectedresults: + 1. No error + 2. No error + 3. No error + 4. No error + """ + + inst = topo.standalone + replicas = Replicas(inst) + inst.start() + config_role(replicas, from_role) + verify_role(replicas, from_role) + config_role(replicas, to_role) + verify_role(replicas, to_role) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main(["-s", CURRENT_FILE]) diff --git a/ldap/servers/plugins/replication/cl5_api.c b/ldap/servers/plugins/replication/cl5_api.c index 2bb6f785f..3f4e57444 100644 --- a/ldap/servers/plugins/replication/cl5_api.c +++ b/ldap/servers/plugins/replication/cl5_api.c @@ -365,8 +365,10 @@ cldb_RemoveReplicaDB(Replica *replica) int rc = 0; cldb_Handle *cldb = replica_get_cl_info(replica); - cldb->deleteFile = 1; - rc = cldb_UnSetReplicaDB(replica, NULL); + if (cldb) { + cldb->deleteFile = 1; + rc = cldb_UnSetReplicaDB(replica, NULL); + } return rc; }
0
582957d7b7a2cc438de748a6752c21dc9af1040e
389ds/389-ds-base
Fixed missing symbol issues for Solaris ds_newinst
commit 582957d7b7a2cc438de748a6752c21dc9af1040e Author: Nathan Kinder <[email protected]> Date: Wed Mar 16 00:02:58 2005 +0000 Fixed missing symbol issues for Solaris ds_newinst diff --git a/ldap/admin/src/Makefile b/ldap/admin/src/Makefile index 2d4315790..b78bd1f26 100644 --- a/ldap/admin/src/Makefile +++ b/ldap/admin/src/Makefile @@ -63,6 +63,7 @@ ifeq ($(ARCH), WINNT) PLATFORM_INCLUDE = -I$(BUILD_ROOT)/include/nt SUBSYSTEM=console EXTRA_LIBS+=comctl32.lib $(LDAP_LIBUTIL) +OPENSOURCE_LIBS += comctl32.lib $(LDAP_LIBUTIL) EXTRA_LIBS_DEP+=$(LDAP_LIBUTIL_DEP) ifeq ($(DEBUG), optimize) @@ -80,9 +81,11 @@ ifeq ($(USE_64), 1) EXTRALDFLAGS += -xarch=v9 endif EXTRA_LIBS += -lsocket -lnsl -lgen -lm -lposix4 -lthread +OPENSOURCE_LIBS += -lsocket -lnsl -lgen -lm -lposix4 -lthread else ifeq ($(ARCH),SOLARISx86) EXTRA_LIBS += -lsocket -lnsl -lgen -lm -lposix4 -lthread +OPENSOURCE_LIBS += -lsocket -lnsl -lgen -lm -lposix4 -lthread else ifeq ($(ARCH),HPUX) ifdef FORTEZZA diff --git a/ldap/admin/src/create_instance.c b/ldap/admin/src/create_instance.c index cba7f6042..a7ead0ae9 100644 --- a/ldap/admin/src/create_instance.c +++ b/ldap/admin/src/create_instance.c @@ -108,57 +108,6 @@ static char *gen_presence_init_script(char *sroot, server_config_s *cf, static int init_presence(char *sroot, server_config_s *cf, char *cs_path); #endif -#if defined( SOLARIS ) -/* - * Solaris 9+ specific installation - */ -extern int iDSISolaris; -static char *sub_token(const char *, const char *, int, const char *, int); -/* - * If for some reasons, sub_token fails to generate the - * "etc" and "var" server_root from the actual "server_root", - * then the following hard-coded pathnames will be used. - */ -#define SOLARIS_ETC_DIR "/etc/iplanet/ds5" -#define SOLARIS_VAR_DIR "/var/ds5" - -/* - * Solaris 9+ specific installation - * The following function replaces the first occurence - * of "token" in the string "s" by "replace" - */ -static char * -sub_token(const char *s, const char *token, int tokenlen, - const char *replace, int replacelen) -{ - char *n = 0, *d; - char *ptr = (char*)strstr(s, token); - const char *begin; - int len; - if (!ptr) - return n; - - d = n = (char *) calloc(strlen(s) + replacelen + 1, 1); - if (!n) - return n; - begin = s; - len = (int)(ptr - begin); - strncpy(d, begin, len); - d += len; - begin = ptr + tokenlen; - len = replacelen; - strncpy(d, replace, len); - d += len; - for (ptr = (char *)begin; ptr && *ptr; LDAP_UTF8INC(ptr)) - { - *d = *ptr; - LDAP_UTF8INC(d); - } - *d = 0; - return n; -} -#endif /* SOLARIS */ - static char *make_error(char *fmt, ...) { static char errbuf[ERR_SIZE]; @@ -761,17 +710,6 @@ char *gen_perl_script(char *s_root, char *cs_path, char *name, char *fmt, ...) chmod( fn, NEWSCRIPT_MODE); #endif -#if defined( SOLARIS ) - /* - * Solaris 9+ specific installation - * Log all non <server_root>/slapd-identifier files/directories - * created by the post_installer so that they can be removed - * during un-install. - */ - if (iDSISolaris) - logUninstallInfo(s_root, PRODUCT_NAME, PRODUCT_NAME, fn); -#endif - return NULL; } @@ -822,13 +760,6 @@ char *gen_perl_script_auto(char *s_root, char *cs_path, char *name, return make_error("Could not write %s to %s (%s).", ofn, fn, ds_system_errmsg()); } -#if defined( SOLARIS ) - /* - * Solaris 9+ specific installation - */ - if (iDSISolaris) - logUninstallInfo(s_root, PRODUCT_NAME, PRODUCT_NAME, fn); -#endif return NULL; } @@ -877,14 +808,6 @@ char *gen_perl_script_auto_for_migration(char *s_root, char *cs_path, char *name ds_system_errmsg()); } -#if defined( SOLARIS ) - /* - * Solaris 9+ specific installation - */ - if (iDSISolaris) - logUninstallInfo(s_root, PRODUCT_NAME, PRODUCT_NAME, fn); -#endif - return NULL; } @@ -1126,78 +1049,15 @@ char *create_server(server_config_s *cf, char *param_name) return t; /* Create slapd-nickname directory */ -#if defined( SOLARIS ) - /* - * Verify if configuration is for native solaris packages - * This is because if console is used to create instance - * then -S is not passed to ds_create. - * <server_root>/.native_solaris file acts as the flag - */ - if (!iDSISolaris) { - PR_snprintf(otherline, sizeof(otherline), "%s%c.native_solaris", sroot, FILE_PATHSEP); - if (create_instance_exists(otherline)) { - iDSISolaris = 1; - } - } - - if (iDSISolaris) { - /* - * Create the slapd-nickname directory under "var" - */ - sub = sub_token(sroot,"/usr/iplanet/",13,"/var/",5); - if (sub) { - PR_snprintf(subdirvar, sizeof(subdirvar), "%s/"PRODUCT_NAME"-%s", sub, cf->servid); - free(sub); - } - else { - PR_snprintf(subdirvar, sizeof(subdirvar), "%s/"PRODUCT_NAME"-%s", SOLARIS_VAR_DIR, cf->servid); - } - if( (create_instance_mkdir_p(subdirvar, NEWDIR_MODE)) ) - return make_error("mkdir %s failed (%s)", subdirvar, ds_system_errmsg()); - - /* - * Create the slapd-nickname directory under "etc" - */ - sub = sub_token(sroot,"/usr/",5,"/etc/",5); - if (sub) { - PR_snprintf(subdiretc, sizeof(subdiretc), "%s/"PRODUCT_NAME"-%s", sub, cf->servid); - free(sub); - } - else { - PR_snprintf(subdiretc, sizeof(subdiretc), "%s/"PRODUCT_NAME"-%s", SOLARIS_ETC_DIR, cf->servid); - } - if( (create_instance_mkdir_p(subdiretc, NEWDIR_MODE)) ) - return make_error("mkdir %s failed (%s)", subdiretc, ds_system_errmsg()); - PR_snprintf(subdir, sizeof(subdir), "%s%c"PRODUCT_NAME"-%s", sroot, FILE_PATHSEP, - cf->servid); - if( (create_instance_symlink(subdirvar, subdir)) ) - return make_error("symlink %s ==> %s failed (%s)", subdir, subdirvar, ds_system_errmsg()); - } - else { - PR_snprintf(subdir, sizeof(subdir), "%s%c"PRODUCT_NAME"-%s", sroot, FILE_PATHSEP, - cf->servid); - if( (create_instance_mkdir(subdir, NEWDIR_MODE)) ) - return make_error("mkdir %s failed (%s)", subdir, ds_system_errmsg()); - } -#else PR_snprintf(subdir, sizeof(subdir), "%s%c"PRODUCT_NAME"-%s", sroot, FILE_PATHSEP, cf->servid); if( (create_instance_mkdir(subdir, NEWDIR_MODE)) ) return make_error("mkdir %s failed (%s)", subdir, ds_system_errmsg()); -#endif /* SOLARIS */ /* Create slapd-nickname/config directory */ PR_snprintf(line, sizeof(line), "%s%cconfig", subdir, FILE_PATHSEP); if( (create_instance_mkdir(line, NEWDIR_MODE)) ) return make_error("mkdir %s failed (%s)", line, ds_system_errmsg()); -#if defined( SOLARIS ) - if (iDSISolaris) { - PR_snprintf(line, sizeof(line), "%s%cconfig", subdirvar, FILE_PATHSEP); - PR_snprintf(otherline, sizeof(otherline), "%s%cconfig", subdiretc, FILE_PATHSEP); - if( (create_instance_symlink(line, otherline)) ) - return make_error("symlink %s ==> %s failed (%s)", otherline, line, ds_system_errmsg()); - } -#endif /* SOLARIS */ /* Create slapd-nickname/config/schema directory */ PR_snprintf(line, sizeof(line), "%s%cconfig%cschema", subdir, FILE_PATHSEP, FILE_PATHSEP); @@ -1220,18 +1080,10 @@ char *create_server(server_config_s *cf, char *param_name) PR_snprintf(line, sizeof(line), "%s%chttpacl", cf->sroot, FILE_PATHSEP); if( (create_instance_mkdir(line, NEWDIR_MODE)) ) return make_error("mkdir %s failed (%s)", line, ds_system_errmsg()); -#if defined( SOLARIS ) - if (iDSISolaris) - logUninstallInfo(sroot, PRODUCT_NAME, PRODUCT_NAME, line); -#endif /* SOLARIS */ #ifdef XP_UNIX /* Start/stop/rotate/restart scripts */ -#if defined( SOLARIS ) - if (getenv("USE_DEBUGGER") && !iDSISolaris) -#else if (getenv("USE_DEBUGGER")) -#endif /* SOLARIS */ { char *debugger = getenv("DSINST_DEBUGGER"); char *debugger_command = getenv("DSINST_DEBUGGER_CMD"); @@ -1504,14 +1356,6 @@ char *create_server(server_config_s *cf, char *param_name) /* config subdir owned by server user */ if( (t = chownconfig(subdir, cf->servuser)) ) return t; -#if defined( SOLARIS ) - if (iDSISolaris) { - /* Need to change owner of the etc link too */ - if( (t = chownconfig(subdiretc, cf->servuser)) ) - return t; - } -#endif /* SOLARIS */ - #else /* XP_WIN32 */ /* Windows platforms have some extra setup */ @@ -2033,13 +1877,6 @@ char *ds_cre_subdirs(char *sroot, server_config_s *cf, char *cs_path, PR_snprintf(subdir, sizeof(subdir), "%s%cbin%cslapd%cauthck", sroot, FILE_PATHSEP, FILE_PATHSEP, FILE_PATHSEP); if( (t = create_instance_mkdir_p(subdir, NEWDIR_MODE)) ) return(t); -#if defined( SOLARIS ) - /* - * Solaris 9+ specific installation - */ - if (iDSISolaris) - logUninstallInfo(sroot, PRODUCT_NAME, PRODUCT_NAME, subdir); -#endif /* SOLARIS */ return (t); } @@ -2491,17 +2328,6 @@ char *ds_gen_scripts(char *sroot, server_config_s *cf, char *cs_path) " com.netscape.admin.dirserv.cmdln.%s $arg\n", sroot, cl_javafiles[cls]); if(t) return t; -#if defined( SOLARIS ) - /* - * Solaris 9+ specific installation - */ - if (iDSISolaris) - { - PR_snprintf(fn, sizeof(fn), "%s/%s", server, cl_scripts[cls]); - logUninstallInfo(sroot, PRODUCT_NAME, PRODUCT_NAME, fn); - } -#endif /* SOLARIS */ - } @@ -4687,14 +4513,6 @@ write_ldap_info( char *slapd_server_root, server_config_s *cf) fclose(fp); } -#if defined( SOLARIS ) - /* - * Solaris 9+ specific installation - */ - if (iDSISolaris) - logUninstallInfo(slapd_server_root, PRODUCT_NAME, PRODUCT_NAME, infoFileName); - -#endif /* SOLARIS */ PR_smprintf_free(infoFileName); return ret;
0
e867d1451c21d2b4d11cd8b8e77b9c762135f323
389ds/389-ds-base
Issue 5385 - LMDB - import crash in rdncache_add_elem (#5406) * Issue 5385 - LMDB - import crash in rdncache_add_elem * Issue 5385 - LMDB - fix tabulation issues.
commit e867d1451c21d2b4d11cd8b8e77b9c762135f323 Author: progier389 <[email protected]> Date: Tue Aug 9 16:31:08 2022 +0200 Issue 5385 - LMDB - import crash in rdncache_add_elem (#5406) * Issue 5385 - LMDB - import crash in rdncache_add_elem * Issue 5385 - LMDB - fix tabulation issues. diff --git a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_rdncache.c b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_rdncache.c index b10cfd747..81918e023 100644 --- a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_rdncache.c +++ b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_rdncache.c @@ -6,6 +6,7 @@ * See LICENSE for details. * END COPYRIGHT BLOCK **/ +#include <assert.h> #include "mdb_import.h" static RDNcacheElem_t *rdncache_new_elem(RDNcacheHead_t *head, ID entryid, ID parentid, int nrdnlen, const char *nrdn, int rdnlen, const char *rdn, WorkerQueueData_t *slot); @@ -15,6 +16,10 @@ int rdncache_has_older_slots(ImportCtx_t *ctx, WorkerQueueData_t *slot); /* Should maybe use ./ldap/libraries/libavl/avl.c instead of array */ +#define RDNCACHE_MUTEX_LOCK(l) assert(pthread_mutex_lock(l) == 0) +#define RDNCACHE_MUTEX_UNLOCK(l) assert(pthread_mutex_unlock(l) == 0) + + static RDNcacheHead_t * rdncache_new_head(RDNcache_t *cache) { @@ -89,11 +94,11 @@ rdncache_elem_release(RDNcacheElem_t **elem) RDNcache_t * rdncache_init(ImportCtx_t *ctx) { - RDNcache_t *cache = CALLOC(RDNcache_t); + RDNcache_t *cache = CALLOC(RDNcache_t); cache->ctx = ctx; pthread_mutex_init(&cache->mutex, NULL); pthread_cond_init(&cache->condvar, NULL); - cache->cur = rdncache_new_head(cache); + cache->cur = rdncache_new_head(cache); cache->prev = rdncache_new_head(cache); return cache; } @@ -175,8 +180,8 @@ rdncache_index_lookup_by_id(RDNcache_t *cache, ID entryid) RDNcacheElem_t *elem = NULL; ImportCtx_t *ctx = cache->ctx; backend *be = ctx->job->inst->inst_be; - MDB_val key = {0}; - MDB_val data = {0}; + MDB_val key = {0}; + MDB_val data = {0}; dbmdb_cursor_t cur = {0}; char key_str[10]; int nrdnlen = 0; @@ -220,8 +225,8 @@ rdncache_index_lookup_by_rdn(RDNcache_t *cache, ID parentid, int _nrdnlen, cons backend *be = ctx->job->inst->inst_be; char *elem2search = NULL; dbmdb_cursor_t cur = {0}; - MDB_val data = {0}; - MDB_val key = {0}; + MDB_val data = {0}; + MDB_val key = {0}; char *nrdn = NULL; char *rdn = NULL; char key_str[10]; @@ -276,6 +281,25 @@ rdncache_index_lookup_by_rdn(RDNcache_t *cache, ID parentid, int _nrdnlen, cons return elem; } +/* + * Wait until all rdn of entries with entryid < current entryid are in the cache. + * Note: Caller must hold cache->mutex + * Beware: cache->cur may change while being in this function (because the mutex is released) + */ +static void +rdncache_wait4older_slots(RDNcache_t *cache, WorkerQueueData_t *slot) +{ + int has_working_worker; + if (slot) { + /* Must process dn in order to check for duplicate dn */ + while ((has_working_worker = rdncache_has_older_slots(cache->ctx, slot))) { + /* So let wait until previous working slot have stored the entry rdn */ + safe_cond_wait(&cache->condvar, &cache->mutex); + } + } +} + + /* * Add a new item in the entryrdn cur cache * parentid == 0 means that we are adding the suffix DN @@ -295,13 +319,6 @@ rdncache_new_elem(RDNcacheHead_t *head, ID entryid, ID parentid, int nrdnlen, int len; if (slot) { - /* Must process dn in order to check for duplicate dn */ - int has_working_worker = rdncache_has_older_slots(cache->ctx, slot); - while (has_working_worker) { - /* So let wait until previous working slot have stored the entry rdn */ - safe_cond_wait(&cache->condvar, &cache->mutex); - has_working_worker = rdncache_has_older_slots(cache->ctx, slot); - } elem = rdncache_rdn_lookup_no_lock(cache, slot, parentid, nrdn, 0); if (elem) { return elem; @@ -367,10 +384,11 @@ RDNcacheElem_t * rdncache_add_elem(RDNcache_t *cache, WorkerQueueData_t *slot, ID entryid, ID parentid, int nrdnlen, const char *nrdn, int rdnlen, const char *rdn) { RDNcacheElem_t *elem; - pthread_mutex_lock(&cache->mutex); + RDNCACHE_MUTEX_LOCK(&cache->mutex); + rdncache_wait4older_slots(cache, slot); elem = rdncache_new_elem(cache->cur, entryid, parentid, nrdnlen, nrdn, rdnlen, rdn, slot); elem = rdncache_elem_get(elem); - pthread_mutex_unlock(&cache->mutex); + RDNCACHE_MUTEX_UNLOCK(&cache->mutex); return elem; } @@ -379,11 +397,11 @@ void rdncache_rotate(RDNcache_t *cache) { RDNcacheHead_t *oldhead; RDNcacheHead_t *newhead = rdncache_new_head(cache); - pthread_mutex_lock(&cache->mutex); + RDNCACHE_MUTEX_LOCK(&cache->mutex); oldhead = cache->prev; cache->prev = cache->cur; cache->cur = newhead; - pthread_mutex_unlock(&cache->mutex); + RDNCACHE_MUTEX_UNLOCK(&cache->mutex); rdncache_head_release(&oldhead); } @@ -409,12 +427,9 @@ rdncache_has_older_slots(ImportCtx_t *ctx, WorkerQueueData_t *slot) RDNcacheElem_t *rdncache_id_lookup(RDNcache_t *cache, WorkerQueueData_t *slot, ID entryid) { RDNcacheElem_t *elem = NULL; - int has_working_worker; int idx; - pthread_mutex_lock(&cache->mutex); - has_working_worker = rdncache_has_older_slots(cache->ctx, slot); - + RDNCACHE_MUTEX_LOCK(&cache->mutex); idx = rdncache_lookup_by_id(cache->cur, entryid); if (idx >= 0) { elem = cache->cur->head_per_id[idx]; @@ -433,19 +448,16 @@ RDNcacheElem_t *rdncache_id_lookup(RDNcache_t *cache, WorkerQueueData_t *slot, } /* If still not found, last chance is that another worker is being processing it. */ - while (!elem && has_working_worker) { - /* So let wait until previous working slot have stored the entry rdn */ - safe_cond_wait(&cache->condvar, &cache->mutex); - has_working_worker = rdncache_has_older_slots(cache->ctx, slot); - } - /* Now either it is in the current cache or it is missing */ if (!elem) { + /* So let wait until previous working slot have stored the entry rdn */ + rdncache_wait4older_slots(cache, slot); + /* Now either it is in the current cache or it is missing */ idx = rdncache_lookup_by_id(cache->cur, entryid); elem = (idx >= 0) ? cache->cur->head_per_id[idx] : NULL; } /* increase refcount while still holding the lock */ elem = rdncache_elem_get(elem); - pthread_mutex_unlock(&cache->mutex); + RDNCACHE_MUTEX_UNLOCK(&cache->mutex); return elem; } @@ -483,24 +495,19 @@ rdncache_rdn_lookup_no_lock(RDNcache_t *cache, WorkerQueueData_t *slot, ID pare RDNcacheElem_t *rdncache_rdn_lookup(RDNcache_t *cache, WorkerQueueData_t *slot, ID parentid, const char *nrdn) { RDNcacheElem_t *elem = NULL; - int has_working_worker; - pthread_mutex_lock(&cache->mutex); - has_working_worker = rdncache_has_older_slots(cache->ctx, slot); + RDNCACHE_MUTEX_LOCK(&cache->mutex); elem = rdncache_rdn_lookup_no_lock(cache, slot, parentid, nrdn, 0); /* If not found, last chance is that another worker is being processing it. */ - while (!elem && has_working_worker) { - /* So let wait until previous working slot have stored the entry rdn */ - safe_cond_wait(&cache->condvar, &cache->mutex); - has_working_worker = rdncache_has_older_slots(cache->ctx, slot); - } - /* Now either it is in the current cache or it is missing */ if (!elem) { + /* So let wait until previous working slot have stored the entry rdn */ + rdncache_wait4older_slots(cache, slot); + /* Now either it is in the current cache or it is missing */ elem = rdncache_rdn_lookup_no_lock(cache, slot, parentid, nrdn, 1); } /* increase refcount while still holding the lock */ elem = rdncache_elem_get(elem); - pthread_mutex_unlock(&cache->mutex); + RDNCACHE_MUTEX_UNLOCK(&cache->mutex); return elem; }
0
a9509045f6973d94537d41870044839b89483ffb
389ds/389-ds-base
Issue 6571 - Nested group does not receive memberOf attribute (#6679) Bug description: There is a risk to create a loop in group membership. For example G2 is member of G1 and G1 is member of G2. Memberof plugins iterates from a node to its ancestors to update the 'memberof' values of the node. The plugin uses a valueset ('already_seen_ndn_vals') to keep the track of the nodes it already visited. It uses this valueset to detect a possible loop and in that case it does not add the ancestor as the memberof value of the node. This is an error in case there are multiples paths up to an ancestor. Fix description: The ancestor should be added to the node systematically, just in case the ancestor is in 'already_seen_ndn_vals' it skips the final recursion fixes: #6571 Reviewed by: Pierre Rogier, Mark Reynolds (Thanks !!!)
commit a9509045f6973d94537d41870044839b89483ffb Author: tbordaz <[email protected]> Date: Tue Mar 25 09:20:50 2025 +0100 Issue 6571 - Nested group does not receive memberOf attribute (#6679) Bug description: There is a risk to create a loop in group membership. For example G2 is member of G1 and G1 is member of G2. Memberof plugins iterates from a node to its ancestors to update the 'memberof' values of the node. The plugin uses a valueset ('already_seen_ndn_vals') to keep the track of the nodes it already visited. It uses this valueset to detect a possible loop and in that case it does not add the ancestor as the memberof value of the node. This is an error in case there are multiples paths up to an ancestor. Fix description: The ancestor should be added to the node systematically, just in case the ancestor is in 'already_seen_ndn_vals' it skips the final recursion fixes: #6571 Reviewed by: Pierre Rogier, Mark Reynolds (Thanks !!!) diff --git a/dirsrvtests/tests/suites/memberof_plugin/regression_test.py b/dirsrvtests/tests/suites/memberof_plugin/regression_test.py index 646eb7433..51c43a71e 100644 --- a/dirsrvtests/tests/suites/memberof_plugin/regression_test.py +++ b/dirsrvtests/tests/suites/memberof_plugin/regression_test.py @@ -465,6 +465,21 @@ def _find_memberof_ext(server, user_dn=None, group_dn=None, find_result=True): else: assert (not found) +def _check_membership(server, entry, expected_members, expected_memberof): + assert server + assert entry + + memberof = entry.get_attr_vals('memberof') + member = entry.get_attr_vals('member') + assert len(member) == len(expected_members) + assert len(memberof) == len(expected_memberof) + for e in expected_members: + server.log.info("Checking %s has member %s" % (entry.dn, e.dn)) + assert e.dn.encode() in member + for e in expected_memberof: + server.log.info("Checking %s is member of %s" % (entry.dn, e.dn)) + assert e.dn.encode() in memberof + def test_memberof_group(topology_st): """Test memberof does not fail if group is moved into scope @@ -532,6 +547,100 @@ def test_memberof_group(topology_st): _find_memberof_ext(inst, dn1, g2n, True) _find_memberof_ext(inst, dn2, g2n, True) +def test_multipaths(topology_st, request): + """Test memberof succeeds to update memberof when + there are multiple paths from a leaf to an intermediate node + + :id: 35aa704a-b895-4153-9dcb-1e8a13612ebf + + :setup: Single instance + + :steps: + 1. Create a graph G1->U1, G2->G21->U1 + 2. Add G2 as member of G1: G1->U1, G1->G2->G21->U1 + 3. Check members and memberof in entries G1,G2,G21,User1 + + :expectedresults: + 1. Graph should be created + 2. succeed + 3. Membership is okay + """ + + inst = topology_st.standalone + memberof = MemberOfPlugin(inst) + memberof.enable() + memberof.replace('memberOfEntryScope', SUFFIX) + if (memberof.get_memberofdeferredupdate() and memberof.get_memberofdeferredupdate().lower() == "on"): + delay = 3 + else: + delay = 0 + inst.restart() + + # + # Create the hierarchy + # + # + # Grp1 ---------------> User1 + # ^ + # / + # Grp2 ----> Grp21 ------/ + # + users = UserAccounts(inst, SUFFIX, rdn=None) + user1 = users.create(properties={'uid': "user1", + 'cn': "user1", + 'sn': 'SN', + 'description': 'leaf', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/user1' + }) + group = Groups(inst, SUFFIX, rdn=None) + g1 = group.create(properties={'cn': 'group1', + 'member': user1.dn, + 'description': 'group1'}) + g21 = group.create(properties={'cn': 'group21', + 'member': user1.dn, + 'description': 'group21'}) + g2 = group.create(properties={'cn': 'group2', + 'member': [g21.dn], + 'description': 'group2'}) + + # Enable debug logs if necessary + #inst.config.replace('nsslapd-errorlog-level', '65536') + #inst.config.set('nsslapd-accesslog-level','260') + #inst.config.set('nsslapd-plugin-logging', 'on') + #inst.config.set('nsslapd-auditlog-logging-enabled','on') + #inst.config.set('nsslapd-auditfaillog-logging-enabled','on') + + # + # Update the hierarchy + # + # + # Grp1 ----------------> User1 + # \ ^ + # \ / + # --> Grp2 --> Grp21 -- + # + g1.add_member(g2.dn) + time.sleep(delay) + + # + # Check G1, G2, G21 and User1 members and memberof + # + _check_membership(inst, g1, expected_members=[g2, user1], expected_memberof=[]) + _check_membership(inst, g2, expected_members=[g21], expected_memberof=[g1]) + _check_membership(inst, g21, expected_members=[user1], expected_memberof=[g2, g1]) + _check_membership(inst, user1, expected_members=[], expected_memberof=[g21, g2, g1]) + + def fin(): + try: + user1.delete() + g1.delete() + g2.delete() + g21.delete() + except: + pass + request.addfinalizer(fin) def _config_memberof_entrycache_on_modrdn_failure(server): diff --git a/dirsrvtests/tests/suites/plugins/memberof_test.py b/dirsrvtests/tests/suites/plugins/memberof_test.py index 0ebdf7fb3..912dead39 100644 --- a/dirsrvtests/tests/suites/plugins/memberof_test.py +++ b/dirsrvtests/tests/suites/plugins/memberof_test.py @@ -2169,9 +2169,14 @@ def test_complex_group_scenario_6(topology_st): # add Grp[1-4] (uniqueMember) to grp5 # it creates a membership loop !!! + topology_st.standalone.config.replace('nsslapd-errorlog-level', '65536') mods = [(ldap.MOD_ADD, 'uniqueMember', memofegrp020_5)] for grp in [memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4]: topology_st.standalone.modify_s(ensure_str(grp), mods) + topology_st.standalone.config.replace('nsslapd-errorlog-level', '0') + + results = topology_st.standalone.ds_error_log.match('.*detecting a loop in group.*') + assert results time.sleep(5) # assert user[1-4] are member of grp20_[1-4] diff --git a/dirsrvtests/tests/suites/slapi_memberof/basic_interface_test.py b/dirsrvtests/tests/suites/slapi_memberof/basic_interface_test.py index c5ecf5227..cc25f7e6c 100644 --- a/dirsrvtests/tests/suites/slapi_memberof/basic_interface_test.py +++ b/dirsrvtests/tests/suites/slapi_memberof/basic_interface_test.py @@ -4220,18 +4220,18 @@ def test_slapi_memberof_reuse_only_1(topo, request, install_test_plugin): def test_slapi_memberof_reuse_only_2(topo, request, install_test_plugin): """ - Test that management hierarchy (manager) is computed with slapi_memberof + Test that membership is computed with slapi_memberof It requires slapi_memberof to ONLY reuse the computed values from memberof plugins. As memberof plugin is enabled, it returns memberof. with following parameters - member attribute: memberof - - membership attribute: 'manager' + - membership attribute: 'member' - span over all backends: 'off' - skip nesting membership: 'off' - - computation mode: MEMBEROF_REUSE_IF_POSSIBLE <-- + - computation mode: MEMBEROF_REUSE_ONLY <-- - Scope: None - - ExcludeScope: ou=foo1,dc=example,dc=com <-- + - ExcludeScope: dc=example,dc=com <-- - Maximum return entries: None :id: fb4f8c86-aa39-4252-90e0-36cfd7b3dd80 @@ -4274,59 +4274,141 @@ def test_slapi_memberof_reuse_only_2(topo, request, install_test_plugin): --- e_1_parent_1_1_3_0 ---- e_1_parent_1_1_1_3_0 """ + # Configure memberof plugin to add 'memberof' attribute + # to the members ('member') of groups that are in the suffix memberof = MemberOfPlugin(topo.standalone) memberof.enable() memberof.replace('memberOfAttr', 'memberof') - memberof.replace('memberOfGroupAttr', 'manager') + memberof.replace('memberOfGroupAttr', 'member') memberof.replace('memberOfAllBackends', 'off') memberof.replace('memberOfSkipNested', 'off') memberof.replace('memberOfEntryScope', DEFAULT_SUFFIX) topo.standalone.restart() - user = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + #topo.standalone.config.replace('nsslapd-errorlog-level', '65536') + #topo.standalone.config.set('nsslapd-accesslog-level','260') + #topo.standalone.config.set('nsslapd-auditlog-logging-enabled','on') + #topo.standalone.config.set('nsslapd-auditfaillog-logging-enabled','on') + #topo.standalone.config.set('nsslapd-plugin-logging', 'on') # First subtree - e_1_parent_0 = add_entry(topo.standalone, uid="e_1_parent_0") - - e_1_parent_1_0 = add_entry(topo.standalone, uid="e_1_parent_1_0", manager=[ensure_bytes(e_1_parent_0)]) - - e_1_parent_1_1_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_0", manager=[ensure_bytes(e_1_parent_1_0)]) - - e_1_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) - e_2_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_2_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) - e_3_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_3_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) - e_4_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_4_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) - e_5_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_5_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) - - e_2_parent_1_1_0 = add_entry(topo.standalone, uid="e_2_parent_1_1_0", manager=[ensure_bytes(e_1_parent_1_0)]) - - e_2_parent_1_0 = add_entry(topo.standalone, uid="e_2_parent_1_0", manager=[ensure_bytes(e_1_parent_0)]) - - e_1_parent_2_1_0 = add_entry(topo.standalone, uid="e_1_parent_2_1_0", manager=[ensure_bytes(e_2_parent_1_0)]) - e_2_parent_2_1_0 = add_entry(topo.standalone, uid="e_2_parent_2_1_0", manager=[ensure_bytes(e_2_parent_1_0)]) - e_1_parent_2_2_1_0 = add_entry(topo.standalone, uid="e_1_parent_2_2_1_0", manager=[ensure_bytes(e_2_parent_2_1_0)]) - e_3_parent_2_1_0 = add_entry(topo.standalone, uid="e_3_parent_2_1_0", manager=[ensure_bytes(e_2_parent_1_0)]) - e_4_parent_2_1_0 = add_entry(topo.standalone, uid="e_4_parent_2_1_0", manager=[ensure_bytes(e_2_parent_1_0)]) - - # 2nd subtree - e_2_parent_0 = add_entry(topo.standalone, uid="e_2_parent_0") - - e_1_parent_2_0 = add_entry(topo.standalone, uid="e_1_parent_2_0", manager=[ensure_bytes(e_2_parent_0)]) - e_2_parent_2_0 = add_entry(topo.standalone, uid="e_2_parent_2_0", manager=[ensure_bytes(e_2_parent_0)]) - e_3_parent_2_0 = add_entry(topo.standalone, uid="e_3_parent_2_0", manager=[ensure_bytes(e_2_parent_0)]) - e_4_parent_2_0 = add_entry(topo.standalone, uid="e_4_parent_2_0", manager=[ensure_bytes(e_2_parent_0)]) - - # third subtree - e_3_parent_0 = add_entry(topo.standalone, uid="e_3_parent_0") - - e_1_parent_3_0 = add_entry(topo.standalone, uid="e_1_parent_3_0", manager=[ensure_bytes(e_3_parent_0)]) - - e_1_parent_1_3_0 = add_entry(topo.standalone, uid="e_1_parent_1_3_0", manager=[ensure_bytes(e_1_parent_3_0)]) - - e_1_parent_1_1_3_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_3_0", manager=[ensure_bytes(e_1_parent_1_3_0)]) - - e_1_parent_1_1_1_3_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_1_3_0", manager=[ensure_bytes(e_1_parent_1_1_3_0)]) + e_1_parent_0 = add_entry(topo.standalone, uid="e_1_parent_0") + e_1_parent_1_0 = add_entry(topo.standalone, uid="e_1_parent_1_0") + e_1_parent_1_1_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_0") + e_1_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_1_0") + e_2_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_2_parent_1_1_1_0") + e_3_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_3_parent_1_1_1_0") + e_4_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_4_parent_1_1_1_0") + e_5_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_5_parent_1_1_1_0") + e_2_parent_1_1_0 = add_entry(topo.standalone, uid="e_2_parent_1_1_0") + e_2_parent_1_0 = add_entry(topo.standalone, uid="e_2_parent_1_0") + e_1_parent_2_1_0 = add_entry(topo.standalone, uid="e_1_parent_2_1_0") + e_2_parent_2_1_0 = add_entry(topo.standalone, uid="e_2_parent_2_1_0") + e_1_parent_2_2_1_0 = add_entry(topo.standalone, uid="e_1_parent_2_2_1_0") + e_3_parent_2_1_0 = add_entry(topo.standalone, uid="e_3_parent_2_1_0") + e_4_parent_2_1_0 = add_entry(topo.standalone, uid="e_4_parent_2_1_0") + + e_2_parent_0 = add_entry(topo.standalone, uid="e_2_parent_0") + e_1_parent_2_0 = add_entry(topo.standalone, uid="e_1_parent_2_0") + e_2_parent_2_0 = add_entry(topo.standalone, uid="e_2_parent_2_0") + e_3_parent_2_0 = add_entry(topo.standalone, uid="e_3_parent_2_0") + e_4_parent_2_0 = add_entry(topo.standalone, uid="e_4_parent_2_0") + + e_3_parent_0 = add_entry(topo.standalone, uid="e_3_parent_0") + e_1_parent_3_0 = add_entry(topo.standalone, uid="e_1_parent_3_0") + e_1_parent_1_3_0 = add_entry(topo.standalone, uid="e_1_parent_1_3_0") + e_1_parent_1_1_3_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_3_0") + e_1_parent_1_1_1_3_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_1_3_0") + + # e_1_parent_0 + # - e_1_parent_1_0 + # - e_2_parent_1_0 + members = [ensure_bytes(e_1_parent_1_0), + ensure_bytes(e_2_parent_1_0)] + mod = [(ldap.MOD_REPLACE, 'member', members)] + topo.standalone.modify_s(e_1_parent_0, mod) + + # - e_1_parent_1_0 + # -- e_1_parent_1_1_0 + # -- e_2_parent_1_1_0 + members = [ensure_bytes(e_1_parent_1_1_0), + ensure_bytes(e_2_parent_1_1_0)] + mod = [(ldap.MOD_REPLACE, 'member', members)] + topo.standalone.modify_s(e_1_parent_1_0, mod) + + # -- e_1_parent_1_1_0 + # --- e_1_parent_1_1_1_0 + # --- e_2_parent_1_1_1_0 + # --- e_3_parent_1_1_1_0 + # --- e_4_parent_1_1_1_0 + # --- e_5_parent_1_1_1_0 + members = [ensure_bytes(e_1_parent_1_1_1_0), + ensure_bytes(e_2_parent_1_1_1_0), + ensure_bytes(e_3_parent_1_1_1_0), + ensure_bytes(e_4_parent_1_1_1_0), + ensure_bytes(e_5_parent_1_1_1_0)] + mod = [(ldap.MOD_REPLACE, 'member', members)] + topo.standalone.modify_s(e_1_parent_1_1_0, mod) + + # - e_2_parent_1_0 + # -- e_1_parent_2_1_0 + # -- e_2_parent_2_1_0 + # -- e_3_parent_2_1_0 + # -- e_4_parent_2_1_0 + members = [ensure_bytes(e_1_parent_2_1_0), + ensure_bytes(e_2_parent_2_1_0), + ensure_bytes(e_3_parent_2_1_0), + ensure_bytes(e_4_parent_2_1_0)] + mod = [(ldap.MOD_REPLACE, 'member', members)] + topo.standalone.modify_s(e_2_parent_1_0, mod) + + # -- e_2_parent_2_1_0 + # --- e_1_parent_2_2_1_0 + members = [ensure_bytes(e_1_parent_2_2_1_0)] + mod = [(ldap.MOD_REPLACE, 'member', members)] + topo.standalone.modify_s(e_2_parent_2_1_0, mod) + + # e_2_parent_0 + # - e_1_parent_2_0 + # - e_2_parent_2_0 + # - e_3_parent_2_0 + # - e_4_parent_2_0 + members = [ensure_bytes(e_1_parent_2_0), + ensure_bytes(e_2_parent_2_0), + ensure_bytes(e_3_parent_2_0), + ensure_bytes(e_4_parent_2_0)] + mod = [(ldap.MOD_REPLACE, 'member', members)] + topo.standalone.modify_s(e_2_parent_0, mod) + + # e_3_parent_0 + # - e_1_parent_3_0 + members = [ensure_bytes(e_1_parent_3_0)] + mod = [(ldap.MOD_REPLACE, 'member', members)] + topo.standalone.modify_s(e_3_parent_0, mod) + + # - e_1_parent_3_0 + # -- e_1_parent_1_3_0 + members = [ensure_bytes(e_1_parent_1_3_0)] + mod = [(ldap.MOD_REPLACE, 'member', members)] + topo.standalone.modify_s(e_1_parent_3_0, mod) + + # -- e_1_parent_1_3_0 + # --- e_1_parent_1_1_3_0 + members = [ensure_bytes(e_1_parent_1_1_3_0)] + mod = [(ldap.MOD_REPLACE, 'member', members)] + topo.standalone.modify_s(e_1_parent_1_3_0, mod) + + # --- e_1_parent_1_1_3_0 + # ---- e_1_parent_1_1_1_3_0 + members = [ensure_bytes(e_1_parent_1_1_1_3_0)] + mod = [(ldap.MOD_REPLACE, 'member', members)] + topo.standalone.modify_s(e_1_parent_1_1_3_0, mod) + # + # configure the test plugin to request 'memberof' with the + # same scope and groupAttr ('member') so that we can + # reuse the values computed by memberof plugin + # dn_config = 'cn=test_slapi_memberof,cn=plugins,cn=config' topo.standalone.add_s(Entry((dn_config, {'objectclass': 'top nsSlapdPlugin extensibleObject'.split(), 'cn': 'test_slapi_memberof', @@ -4337,7 +4419,7 @@ def test_slapi_memberof_reuse_only_2(topo, request, install_test_plugin): 'nsslapd-plugin-depends-on-type': 'database', 'nsslapd-pluginId': 'test_slapi_memberof-plugin', 'slapimemberOfMemberDN': 'uid=test_user_11,ou=People,dc=example,dc=com', - 'slapimemberOfGroupAttr': 'manager', + 'slapimemberOfGroupAttr': 'member', 'slapimemberOfAttr': 'memberof', 'slapimemberOfFlag': 'MEMBEROF_REUSE_ONLY', 'slapimemberOfAllBackends': 'off', @@ -4350,63 +4432,63 @@ def test_slapi_memberof_reuse_only_2(topo, request, install_test_plugin): topo.standalone.restart() # Check the first subtree - expected = [ e_1_parent_1_0, e_1_parent_1_1_0, e_1_parent_1_1_1_0, e_2_parent_1_1_1_0, e_3_parent_1_1_1_0, e_4_parent_1_1_1_0, e_5_parent_1_1_1_0, e_2_parent_1_1_0, e_2_parent_1_0, e_1_parent_2_1_0, e_2_parent_2_1_0, e_1_parent_2_2_1_0, e_3_parent_2_1_0, e_4_parent_2_1_0] - res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_0, relation="manager") + expected = [EMPTY_RESULT] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_0, relation="member") _check_res_vs_expected("first subtree", res, expected) # Check the second subtree - expected = [e_1_parent_2_0, e_2_parent_2_0, e_3_parent_2_0, e_4_parent_2_0] - res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_0, relation="manager") + expected = [EMPTY_RESULT] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_0, relation="member") _check_res_vs_expected("second subtree", res, expected) # Check the third subtree - expected = [e_1_parent_3_0, e_1_parent_1_3_0, e_1_parent_1_1_3_0, e_1_parent_1_1_1_3_0] - res = _extop_test_slapi_member(server=topo.standalone, dn=e_3_parent_0, relation="manager") + expected = [EMPTY_RESULT] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_3_parent_0, relation="member") _check_res_vs_expected("third subtree", res, expected) # check e_1_parent_1_0 - expected = [e_1_parent_1_1_0, e_1_parent_1_1_1_0, e_2_parent_1_1_1_0, e_3_parent_1_1_1_0, e_4_parent_1_1_1_0, e_5_parent_1_1_1_0, e_2_parent_1_1_0] - res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_0, relation="manager") - _check_res_vs_expected("organisation reporting to e_1_parent_1_0", res, expected) + expected = [e_1_parent_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_0, relation="member") + _check_res_vs_expected("Groups which e_1_parent_1_0 is member of", res, expected) # check e_1_parent_1_1_0 - expected = [e_1_parent_1_1_1_0, e_2_parent_1_1_1_0, e_3_parent_1_1_1_0, e_4_parent_1_1_1_0, e_5_parent_1_1_1_0] - res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_1_0, relation="manager") + expected = [e_1_parent_0, e_1_parent_1_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_1_0, relation="member") _check_res_vs_expected("organisation reporting to e_1_parent_1_1_0", res, expected) # check e_2_parent_1_1_0 - expected = [EMPTY_RESULT] - res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_1_1_0, relation="manager") + expected = [e_1_parent_0, e_1_parent_1_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_1_1_0, relation="member") _check_res_vs_expected("organisation reporting to e_2_parent_1_1_0", res, expected) # check e_2_parent_1_0 - expected = [e_1_parent_2_1_0, e_2_parent_2_1_0, e_1_parent_2_2_1_0, e_3_parent_2_1_0, e_4_parent_2_1_0] - res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_1_0, relation="manager") + expected = [e_1_parent_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_1_0, relation="member") _check_res_vs_expected("organisation reporting to e_2_parent_1_0", res, expected) # check e_2_parent_2_1_0 - expected = [e_1_parent_2_2_1_0] - res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_2_1_0, relation="manager") + expected = [e_1_parent_0, e_2_parent_1_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_2_1_0, relation="member") _check_res_vs_expected("organisation reporting to e_2_parent_2_1_0", res, expected) # Check e_1_parent_3_0 - expected = [e_1_parent_1_3_0, e_1_parent_1_1_3_0, e_1_parent_1_1_1_3_0] - res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_3_0, relation="manager") + expected = [e_3_parent_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_3_0, relation="member") _check_res_vs_expected("organisation reporting to e_1_parent_3_0", res, expected) # Check e_1_parent_1_3_0 - expected = [e_1_parent_1_1_3_0, e_1_parent_1_1_1_3_0] - res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_3_0, relation="manager") + expected = [e_3_parent_0, e_1_parent_3_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_3_0, relation="member") _check_res_vs_expected("organisation reporting to e_1_parent_1_3_0", res, expected) # Check e_1_parent_1_1_3_0 - expected = [e_1_parent_1_1_1_3_0] - res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_1_3_0, relation="manager") + expected = [e_3_parent_0, e_1_parent_3_0, e_1_parent_1_3_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_1_3_0, relation="member") _check_res_vs_expected("organisation reporting to e_1_parent_1_1_3_0", res, expected) # Check e_1_parent_1_1_1_3_0 - expected = [EMPTY_RESULT] - res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_1_1_3_0, relation="manager") + expected = [e_3_parent_0, e_1_parent_3_0, e_1_parent_1_3_0, e_1_parent_1_1_3_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_1_1_3_0, relation="member") _check_res_vs_expected("organisation reporting to e_1_parent_1_1_1_3_0", res, expected) def fin(): diff --git a/ldap/servers/plugins/memberof/memberof.c b/ldap/servers/plugins/memberof/memberof.c index 408bfdbf5..c5290d091 100644 --- a/ldap/servers/plugins/memberof/memberof.c +++ b/ldap/servers/plugins/memberof/memberof.c @@ -1601,7 +1601,7 @@ memberof_call_foreach_dn(Slapi_PBlock *pb __attribute__((unused)), Slapi_DN *sdn ht_grp = ancestors_cache_lookup(config, (const void *)ndn); if (ht_grp) { #if MEMBEROF_CACHE_DEBUG - slapi_log_err(SLAPI_LOG_PLUGIN, MEMBEROF_PLUGIN_SUBSYSTEM, "memberof_call_foreach_dn: Ancestors of %s already cached (%x)\n", ndn, ht_grp); + slapi_log_err(SLAPI_LOG_PLUGIN, MEMBEROF_PLUGIN_SUBSYSTEM, "memberof_call_foreach_dn: Ancestors of %s already cached (%lx)\n", ndn, (ulong) ht_grp); #endif add_ancestors_cbdata(ht_grp, callback_data); *cached = 1; @@ -1609,7 +1609,7 @@ memberof_call_foreach_dn(Slapi_PBlock *pb __attribute__((unused)), Slapi_DN *sdn } } #if MEMBEROF_CACHE_DEBUG - slapi_log_err(SLAPI_LOG_PLUGIN, MEMBEROF_PLUGIN_SUBSYSTEM, "memberof_call_foreach_dn: Ancestors of %s not cached\n", ndn); + slapi_log_err(SLAPI_LOG_PLUGIN, MEMBEROF_PLUGIN_SUBSYSTEM, "memberof_call_foreach_dn: Ancestors of %s not cached\n", slapi_sdn_get_ndn(sdn)); #endif /* Escape the dn, and build the search filter. */ @@ -3248,7 +3248,8 @@ cache_ancestors(MemberOfConfig *config, Slapi_Value **member_ndn_val, memberof_g return; } #if MEMBEROF_CACHE_DEBUG - if (double_check = ancestors_cache_lookup(config, (const void*) key)) { + double_check = ancestors_cache_lookup(config, (const void*) key); + if (double_check) { dump_cache_entry(double_check, "read back"); } #endif @@ -3278,13 +3279,13 @@ merge_ancestors(Slapi_Value **member_ndn_val, memberof_get_groups_data *v1, memb sval_dn = slapi_value_new_string(slapi_value_get_string(sval)); if (sval_dn) { /* Use the normalized dn from v1 to search it - * in v2 - */ + * in v2 + */ val_sdn = slapi_sdn_new_dn_byval(slapi_value_get_string(sval_dn)); sval_ndn = slapi_value_new_string(slapi_sdn_get_ndn(val_sdn)); if (!slapi_valueset_find( ((memberof_get_groups_data *)v2)->config->group_slapiattrs[0], v2_group_norm_vals, sval_ndn)) { -/* This ancestor was not already present in v2 => Add it + /* This ancestor was not already present in v2 => Add it * Using slapi_valueset_add_value it consumes val * so do not free sval */ @@ -3333,7 +3334,7 @@ memberof_get_groups_r(MemberOfConfig *config, Slapi_DN *member_sdn, memberof_get merge_ancestors(&member_ndn_val, &member_data, data); if (!cached && member_data.use_cache) - cache_ancestors(config, &member_ndn_val, &member_data); + cache_ancestors(config, &member_ndn_val, data); slapi_value_free(&member_ndn_val); slapi_valueset_free(groupvals); @@ -3394,25 +3395,6 @@ memberof_get_groups_callback(Slapi_Entry *e, void *callback_data) goto bail; } - /* Have we been here before? Note that we don't loop through all of the group_slapiattrs - * in config. We only need this attribute for it's syntax so the comparison can be - * performed. Since all of the grouping attributes are validated to use the Dinstinguished - * Name syntax, we can safely just use the first group_slapiattr. */ - if (slapi_valueset_find( - ((memberof_get_groups_data *)callback_data)->config->group_slapiattrs[0], already_seen_ndn_vals, group_ndn_val)) { - /* we either hit a recursive grouping, or an entry is - * a member of a group through multiple paths. Either - * way, we can just skip processing this entry since we've - * already gone through this part of the grouping hierarchy. */ - slapi_log_err(SLAPI_LOG_PLUGIN, MEMBEROF_PLUGIN_SUBSYSTEM, - "memberof_get_groups_callback - Possible group recursion" - " detected in %s\n", - group_ndn); - slapi_value_free(&group_ndn_val); - ((memberof_get_groups_data *)callback_data)->use_cache = PR_FALSE; - goto bail; - } - /* if the group does not belong to an excluded subtree, adds it to the valueset */ if (memberof_entry_in_scope(config, group_sdn)) { /* Push group_dn_val into the valueset. This memory is now owned @@ -3422,9 +3404,21 @@ memberof_get_groups_callback(Slapi_Entry *e, void *callback_data) group_dn_val = slapi_value_new_string(group_dn); slapi_valueset_add_value_ext(groupvals, group_dn_val, SLAPI_VALUE_FLAG_PASSIN); - /* push this ndn to detect group recursion */ - already_seen_ndn_val = slapi_value_new_string(group_ndn); - slapi_valueset_add_value_ext(already_seen_ndn_vals, already_seen_ndn_val, SLAPI_VALUE_FLAG_PASSIN); + if (slapi_valueset_find( + ((memberof_get_groups_data *)callback_data)->config->group_slapiattrs[0], already_seen_ndn_vals, group_ndn_val)) { + /* The group group_ndn_val has already been processed + * skip the final recursion to prevent infinite loop + */ + slapi_log_err(SLAPI_LOG_PLUGIN, MEMBEROF_PLUGIN_SUBSYSTEM, + "memberof_get_groups_callback - detecting a loop in group %s (stop building memberof)\n", + group_ndn); + ((memberof_get_groups_data *)callback_data)->use_cache = PR_FALSE; + goto bail; + } else { + /* keep this ndn to detect a possible group recursion */ + already_seen_ndn_val = slapi_value_new_string(group_ndn); + slapi_valueset_add_value_ext(already_seen_ndn_vals, already_seen_ndn_val, SLAPI_VALUE_FLAG_PASSIN); + } } if (!config->skip_nested || config->fixup_task) { /* now recurse to find ancestors groups of e */
0
d6226865d957bfe282fb6fbb8cfcf7b29a80f04b
389ds/389-ds-base
Ticket 50459 - c_mutex to use pthread_mutex to allow ns sharing Bug Description: To allow nunc-stans to share the same lock as c_mutex we need to change conn to use a pthread_mutex instead. Fix Description: Change c_mutex to pthread https://pagure.io/389-ds-base/issue/50459 Author: William Brown <[email protected]> Review by: tbordaz, mreynolds (Thank you!)
commit d6226865d957bfe282fb6fbb8cfcf7b29a80f04b Author: William Brown <[email protected]> Date: Mon Jul 8 15:43:41 2019 +1000 Ticket 50459 - c_mutex to use pthread_mutex to allow ns sharing Bug Description: To allow nunc-stans to share the same lock as c_mutex we need to change conn to use a pthread_mutex instead. Fix Description: Change c_mutex to pthread https://pagure.io/389-ds-base/issue/50459 Author: William Brown <[email protected]> Review by: tbordaz, mreynolds (Thank you!) diff --git a/ldap/servers/slapd/abandon.c b/ldap/servers/slapd/abandon.c index 3f7bef018..26a2e7bf8 100644 --- a/ldap/servers/slapd/abandon.c +++ b/ldap/servers/slapd/abandon.c @@ -88,7 +88,7 @@ do_abandon(Slapi_PBlock *pb) * flag and abort the operation at a convenient time. */ - PR_EnterMonitor(pb_conn->c_mutex); + pthread_mutex_lock(&(pb_conn->c_mutex)); for (o = pb_conn->c_ops; o != NULL; o = o->o_next) { if (o->o_msgid == id && o != pb_op) break; @@ -151,7 +151,7 @@ do_abandon(Slapi_PBlock *pb) o->o_results.r.r_search.nentries, (int64_t)o_hr_time_end.tv_sec, (int64_t)o_hr_time_end.tv_nsec); } - PR_ExitMonitor(pb_conn->c_mutex); + pthread_mutex_unlock(&(pb_conn->c_mutex)); /* * Wake up the persistent searches, so they * can notice if they've been abandoned. diff --git a/ldap/servers/slapd/bind.c b/ldap/servers/slapd/bind.c index b443d9da4..314b7c4ea 100644 --- a/ldap/servers/slapd/bind.c +++ b/ldap/servers/slapd/bind.c @@ -232,7 +232,7 @@ do_bind(Slapi_PBlock *pb) slapi_pblock_get(pb, SLAPI_PWPOLICY, &pw_response_requested); } - PR_EnterMonitor(pb_conn->c_mutex); + pthread_mutex_lock(&(pb_conn->c_mutex)); bind_credentials_clear(pb_conn, PR_FALSE, /* do not lock conn */ PR_FALSE /* do not clear external creds. */); @@ -263,7 +263,7 @@ do_bind(Slapi_PBlock *pb) * bound user can work properly */ pb_conn->c_needpw = 0; - PR_ExitMonitor(pb_conn->c_mutex); + pthread_mutex_unlock(&(pb_conn->c_mutex)); log_bind_access(pb, dn ? dn : "empty", method, version, saslmech, NULL); diff --git a/ldap/servers/slapd/connection.c b/ldap/servers/slapd/connection.c index d3bf82b67..6136d729f 100644 --- a/ldap/servers/slapd/connection.c +++ b/ldap/servers/slapd/connection.c @@ -144,18 +144,19 @@ connection_done(Connection *conn) connection_cleanup(conn); /* free the private content, the buffer has been freed by above connection_cleanup */ slapi_ch_free((void **)&conn->c_private); + pthread_mutex_destroy(&(conn->c_mutex)); if (NULL != conn->c_sb) { ber_sockbuf_free(conn->c_sb); } - if (NULL != conn->c_mutex) { - PR_DestroyMonitor(conn->c_mutex); - } if (NULL != conn->c_pdumutex) { PR_DestroyLock(conn->c_pdumutex); } /* PAGED_RESULTS */ pagedresults_cleanup_all(conn, 0); + /* Finally, flag that we are clean - basically write a 0 ...*/ + conn->c_state = CONN_STATE_FREE; + /* * WARNING: There is a memory leak here! During a shutdown, connections * can still have events in ns add io timeout job because of post connection @@ -751,12 +752,12 @@ connection_is_free(Connection *conn, int use_lock) int rc; if (use_lock) { - PR_EnterMonitor(conn->c_mutex); + pthread_mutex_lock(&(conn->c_mutex)); } rc = conn->c_sd == SLAPD_INVALID_SOCKET && conn->c_refcnt == 0 && !(conn->c_flags & CONN_FLAG_CLOSING); if (use_lock) { - PR_ExitMonitor(conn->c_mutex); + pthread_mutex_unlock(&(conn->c_mutex)); } return rc; @@ -1130,7 +1131,7 @@ connection_read_operation(Connection *conn, Operation *op, ber_tag_t *tag, int * size_t buffer_data_avail; int conn_closed = 0; - PR_EnterMonitor(conn->c_mutex); + pthread_mutex_lock(&(conn->c_mutex)); /* * if the socket is still valid, get the ber element * waiting for us on this connection. timeout is handled @@ -1323,16 +1324,16 @@ connection_read_operation(Connection *conn, Operation *op, ber_tag_t *tag, int * } op->o_tag = *tag; done: - PR_ExitMonitor(conn->c_mutex); + pthread_mutex_unlock(&(conn->c_mutex)); return ret; } void connection_make_readable(Connection *conn) { - PR_EnterMonitor(conn->c_mutex); + pthread_mutex_lock(&(conn->c_mutex)); conn->c_gettingber = 0; - PR_ExitMonitor(conn->c_mutex); + pthread_mutex_unlock(&(conn->c_mutex)); signal_listner(); } @@ -1356,7 +1357,7 @@ connection_check_activity_level(Connection *conn) { int current_count = 0; int delta_count = 0; - PR_EnterMonitor(conn->c_mutex); + pthread_mutex_lock(&(conn->c_mutex)); /* get the current op count */ current_count = conn->c_opscompleted; /* compare to the previous op count */ @@ -1367,7 +1368,7 @@ connection_check_activity_level(Connection *conn) conn->c_private->previous_op_count = current_count; /* update the last checked time */ conn->c_private->previous_count_check_time = slapi_current_utc_time(); - PR_ExitMonitor(conn->c_mutex); + pthread_mutex_unlock(&(conn->c_mutex)); slapi_log_err(SLAPI_LOG_CONNS, "connection_check_activity_level", "conn %" PRIu64 " activity level = %d\n", conn->c_connid, delta_count); } @@ -1415,7 +1416,7 @@ connection_enter_leave_turbo(Connection *conn, int current_turbo_flag, int *new_ int connection_count = 0; int our_rank = 0; int threshold_rank = 0; - PR_EnterMonitor(conn->c_mutex); + pthread_mutex_lock(&(conn->c_mutex)); /* We can already be in turbo mode, or not */ current_mode = current_turbo_flag; if (pagedresults_in_use_nolock(conn)) { @@ -1460,7 +1461,7 @@ connection_enter_leave_turbo(Connection *conn, int current_turbo_flag, int *new_ new_mode = 1; } } - PR_ExitMonitor(conn->c_mutex); + pthread_mutex_unlock(&(conn->c_mutex)); if (current_mode != new_mode) { if (current_mode) { slapi_log_err(SLAPI_LOG_CONNS, "connection_enter_leave_turbo", "conn %" PRIu64 " leaving turbo mode\n", conn->c_connid); @@ -1541,7 +1542,7 @@ connection_threadmain() return; } - PR_EnterMonitor(pb_conn->c_mutex); + pthread_mutex_lock(&(pb_conn->c_mutex)); if (pb_conn->c_anonlimits_set == 0) { /* * We have a new connection, set the anonymous reslimit idletimeout @@ -1567,7 +1568,7 @@ connection_threadmain() */ pb_conn->c_anonlimits_set = 1; } - PR_ExitMonitor(pb_conn->c_mutex); + pthread_mutex_unlock(&(pb_conn->c_mutex)); if (connection_call_io_layer_callbacks(pb_conn)) { slapi_log_err(SLAPI_LOG_ERR, "connection_threadmain", @@ -1584,14 +1585,14 @@ connection_threadmain() */ PR_Sleep(PR_INTERVAL_NO_WAIT); - PR_EnterMonitor(conn->c_mutex); + pthread_mutex_lock(&(conn->c_mutex)); /* Make our own pb in turbo mode */ connection_make_new_pb(pb, conn); if (connection_call_io_layer_callbacks(conn)) { slapi_log_err(SLAPI_LOG_ERR, "connection_threadmain", "Could not add/remove IO layers from connection\n"); } - PR_ExitMonitor(conn->c_mutex); + pthread_mutex_unlock(&(conn->c_mutex)); if (!config_check_referral_mode()) { slapi_counter_increment(ops_initiated); slapi_counter_increment(g_get_global_snmp_vars()->ops_tbl.dsInOps); @@ -1706,9 +1707,9 @@ connection_threadmain() if ((tag != LDAP_REQ_UNBIND) && !thread_turbo_flag && !replication_connection) { if (!more_data) { conn->c_flags &= ~CONN_FLAG_MAX_THREADS; - PR_EnterMonitor(conn->c_mutex); + pthread_mutex_lock(&(conn->c_mutex)); connection_make_readable_nolock(conn); - PR_ExitMonitor(conn->c_mutex); + pthread_mutex_unlock(&(conn->c_mutex)); /* once the connection is readable, another thread may access conn, * so need locking from here on */ signal_listner(); @@ -1720,7 +1721,7 @@ connection_threadmain() */ } else if (!enable_nunc_stans) { /* more data in conn - just put back on work_q - bypass poll */ bypasspollcnt++; - PR_EnterMonitor(conn->c_mutex); + pthread_mutex_lock(&(conn->c_mutex)); /* don't do this if it would put us over the max threads per conn */ if (conn->c_threadnumber < maxthreads) { /* for turbo, c_idlesince is set above - for !turbo and @@ -1736,7 +1737,7 @@ connection_threadmain() /* keep count of how many times maxthreads has blocked an operation */ conn->c_maxthreadsblocked++; } - PR_ExitMonitor(conn->c_mutex); + pthread_mutex_unlock(&(conn->c_mutex)); } } @@ -1772,14 +1773,14 @@ connection_threadmain() done: if (doshutdown) { - PR_EnterMonitor(conn->c_mutex); + pthread_mutex_lock(&(conn->c_mutex)); connection_remove_operation_ext(pb, conn, op); connection_make_readable_nolock(conn); conn->c_threadnumber--; slapi_counter_decrement(conns_in_maxthreads); slapi_counter_decrement(g_get_global_snmp_vars()->ops_tbl.dsConnectionsInMaxThreads); connection_release_nolock(conn); - PR_ExitMonitor(conn->c_mutex); + pthread_mutex_unlock(&(conn->c_mutex)); signal_listner(); slapi_pblock_destroy(pb); return; @@ -1804,9 +1805,9 @@ connection_threadmain() * continues to hold the connection */ if (!thread_turbo_flag && !more_data) { - PR_EnterMonitor(conn->c_mutex); + pthread_mutex_lock(&(conn->c_mutex)); connection_release_nolock(conn); /* psearch acquires ref to conn - release this one now */ - PR_ExitMonitor(conn->c_mutex); + pthread_mutex_unlock(&(conn->c_mutex)); } /* ps_add makes a shallow copy of the pb - so we * can't free it or init it here - just set operation to NULL. @@ -1817,7 +1818,7 @@ connection_threadmain() } else { /* delete from connection operation queue & decr refcnt */ int conn_closed = 0; - PR_EnterMonitor(conn->c_mutex); + pthread_mutex_lock(&(conn->c_mutex)); connection_remove_operation_ext(pb, conn, op); /* If we're in turbo mode, we keep our reference to the connection alive */ @@ -1869,7 +1870,7 @@ connection_threadmain() signal_listner(); } } - PR_ExitMonitor(conn->c_mutex); + pthread_mutex_unlock(&(conn->c_mutex)); } } /* while (1) */ } @@ -2125,7 +2126,7 @@ op_copy_identity(Connection *conn, Operation *op) size_t dnlen; size_t typelen; - PR_EnterMonitor(conn->c_mutex); + pthread_mutex_lock(&(conn->c_mutex)); dnlen = conn->c_dn ? strlen(conn->c_dn) : 0; typelen = conn->c_authtype ? strlen(conn->c_authtype) : 0; @@ -2157,14 +2158,14 @@ op_copy_identity(Connection *conn, Operation *op) op->o_ssf = conn->c_local_ssf; } - PR_ExitMonitor(conn->c_mutex); + pthread_mutex_unlock(&(conn->c_mutex)); } /* Sets the SSL SSF in the connection struct. */ static void connection_set_ssl_ssf(Connection *conn) { - PR_EnterMonitor(conn->c_mutex); + pthread_mutex_lock(&(conn->c_mutex)); if (conn->c_flags & CONN_FLAG_SSL) { SSL_SecurityStatus(conn->c_prfd, NULL, NULL, NULL, &(conn->c_ssl_ssf), NULL, NULL); @@ -2172,7 +2173,7 @@ connection_set_ssl_ssf(Connection *conn) conn->c_ssl_ssf = 0; } - PR_ExitMonitor(conn->c_mutex); + pthread_mutex_unlock(&(conn->c_mutex)); } static int @@ -2223,9 +2224,9 @@ log_ber_too_big_error(const Connection *conn, ber_len_t ber_len, ber_len_t maxbe void disconnect_server(Connection *conn, PRUint64 opconnid, int opid, PRErrorCode reason, PRInt32 error) { - PR_EnterMonitor(conn->c_mutex); + pthread_mutex_lock(&(conn->c_mutex)); disconnect_server_nomutex(conn, opconnid, opid, reason, error); - PR_ExitMonitor(conn->c_mutex); + pthread_mutex_unlock(&(conn->c_mutex)); } static ps_wakeup_all_fn_ptr ps_wakeup_all_fn = NULL; diff --git a/ldap/servers/slapd/conntable.c b/ldap/servers/slapd/conntable.c index 45bcab2fb..0de34cc71 100644 --- a/ldap/servers/slapd/conntable.c +++ b/ldap/servers/slapd/conntable.c @@ -51,6 +51,11 @@ connection_table_new(int table_size) ct->c[i].c_prev = NULL; ct->c[i].c_ci = i; ct->c[i].c_fdi = SLAPD_INVALID_SOCKET_INDEX; + /* + * Technically this is a no-op due to calloc, but we should always be + * careful with things like this .... + */ + ct->c[i].c_state = CONN_STATE_FREE; } return ct; } @@ -75,10 +80,10 @@ connection_table_abandon_all_operations(Connection_Table *ct) { int i; for (i = 0; i < ct->size; i++) { - if (ct->c[i].c_mutex) { - PR_EnterMonitor(ct->c[i].c_mutex); + if (ct->c[i].c_state != CONN_STATE_FREE) { + pthread_mutex_lock(&(ct->c[i].c_mutex)); connection_abandon_operations(&ct->c[i]); - PR_ExitMonitor(ct->c[i].c_mutex); + pthread_mutex_unlock(&(ct->c[i].c_mutex)); } } } @@ -87,11 +92,11 @@ void connection_table_disconnect_all(Connection_Table *ct) { for (size_t i = 0; i < ct->size; i++) { - if (ct->c[i].c_mutex) { + if (ct->c[i].c_state != CONN_STATE_FREE) { Connection *c = &(ct->c[i]); - PR_EnterMonitor(c->c_mutex); + pthread_mutex_lock(&(c->c_mutex)); disconnect_server_nomutex(c, c->c_connid, -1, SLAPD_DISCONNECT_ABORT, ECANCELED); - PR_ExitMonitor(c->c_mutex); + pthread_mutex_unlock(&(c->c_mutex)); } } } @@ -117,11 +122,10 @@ connection_table_get_connection(Connection_Table *ct, int sd) /* Do not use slot 0, slot 0 is head of the list of active connections */ if (index == 0) { continue; - } else if (ct->c[index].c_mutex == NULL) { + } else if (ct->c[index].c_state == CONN_STATE_FREE) { break; - } - - if (connection_is_free(&(ct->c[index]), 1 /*use lock */)) { + } else if (connection_is_free(&(ct->c[index]), 1 /*use lock */)) { + /* Connection must be allocated, check if it's okay */ break; } } @@ -132,17 +136,30 @@ connection_table_get_connection(Connection_Table *ct, int sd) PR_ASSERT(c->c_next == NULL); PR_ASSERT(c->c_prev == NULL); PR_ASSERT(c->c_extension == NULL); - if (c->c_mutex == NULL) { + + if (c->c_state == CONN_STATE_FREE) { PR_Lock(ct->table_mutex); - c->c_mutex = PR_NewMonitor(); + + c->c_state = CONN_STATE_INIT; + + pthread_mutexattr_t monitor_attr = {0}; + pthread_mutexattr_init(&monitor_attr); + pthread_mutexattr_settype(&monitor_attr, PTHREAD_MUTEX_RECURSIVE); + if (pthread_mutex_init(&(c->c_mutex), &monitor_attr) != 0) { + slapi_log_err(SLAPI_LOG_ERR, "connection_table_get_connection", "pthread_mutex_init failed\n"); + exit(1); + } + c->c_pdumutex = PR_NewLock(); PR_Unlock(ct->table_mutex); - if (c->c_mutex == NULL || c->c_pdumutex == NULL) { - c->c_mutex = NULL; + if (c->c_pdumutex == NULL) { c->c_pdumutex = NULL; slapi_log_err(SLAPI_LOG_ERR, "connection_table_get_connection", "PR_NewLock failed\n"); exit(1); } + } else { + slapi_log_err(SLAPI_LOG_ERR, "connection_table_get_connection", "Invalide connection table state - We tried to allocate to a conn NOT in state CONN_STATE_FREE - this is a complete disaster!\n"); + exit(1); } /* Let's make sure there's no cruft left on there from the last time this connection was used. */ /* Note: no need to lock c->c_mutex because this function is only @@ -364,14 +381,14 @@ connection_table_as_entry(Connection_Table *ct, Slapi_Entry *e) nreadwaiters = 0; for (i = 0; i < (ct != NULL ? ct->size : 0); i++) { PR_Lock(ct->table_mutex); - if ((ct->c[i].c_mutex == NULL) || (ct->c[i].c_mutex == (PRMonitor *)-1)) { + if (ct->c[i].c_state == CONN_STATE_FREE) { PR_Unlock(ct->table_mutex); continue; } /* Can't take c_mutex if holding table_mutex; temporarily unlock */ PR_Unlock(ct->table_mutex); - PR_EnterMonitor(ct->c[i].c_mutex); + pthread_mutex_lock(&(ct->c[i].c_mutex)); if (ct->c[i].c_sd != SLAPD_INVALID_SOCKET) { char buf2[SLAPI_TIMESTAMP_BUFSIZE+1]; size_t lendn = ct->c[i].c_dn ? strlen(ct->c[i].c_dn) : 6; /* "NULLDN" */ @@ -445,7 +462,7 @@ connection_table_as_entry(Connection_Table *ct, Slapi_Entry *e) attrlist_merge(&e->e_attrs, "connection", vals); slapi_ch_free_string(&newbuf); } - PR_ExitMonitor(ct->c[i].c_mutex); + pthread_mutex_unlock(&(ct->c[i].c_mutex)); } snprintf(buf, sizeof(buf), "%d", nconns); @@ -486,10 +503,10 @@ connection_table_dump_activity_to_errors_log(Connection_Table *ct) for (i = 0; i < ct->size; i++) { Connection *c = &(ct->c[i]); - if (c->c_mutex) { + if (c->c_state) { /* Find the connection we are referring to */ int j = c->c_fdi; - PR_EnterMonitor(c->c_mutex); + pthread_mutex_lock(&(c->c_mutex)); if ((c->c_sd != SLAPD_INVALID_SOCKET) && (j >= 0) && (c->c_prfd == ct->fd[j].fd)) { int r = ct->fd[j].out_flags & SLAPD_POLL_FLAGS; @@ -498,7 +515,7 @@ connection_table_dump_activity_to_errors_log(Connection_Table *ct) "activity on %d%s\n", i, r ? "r" : ""); } } - PR_ExitMonitor(c->c_mutex); + pthread_mutex_unlock(&(c->c_mutex)); } } } diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c index 519fd2f86..a240d6e78 100644 --- a/ldap/servers/slapd/daemon.c +++ b/ldap/servers/slapd/daemon.c @@ -167,69 +167,6 @@ static ns_job_func_t ns_handlers[] = { ns_handle_pr_read_ready, ns_handle_closure }; -/* - * NSPR has different implementations for PRMonitor, depending - * on the availble threading model - * The PR_TestAndEnterMonitor is not available for pthreads - * so this is a implementation based on the code in - * prmon.c adapted to resemble the implementation in ptsynch.c - * - * The function needs access to the elements of the PRMonitor struct. - * Therfor the pthread variant of PRMonitor is copied here. - */ -typedef struct MY_PRMonitor -{ - const char *name; - pthread_mutex_t lock; - pthread_t owner; - pthread_cond_t entryCV; - pthread_cond_t waitCV; - PRInt32 refCount; - PRUint32 entryCount; - PRIntn notifyTimes; -} MY_PRMonitor; - -static PRBool -MY_TestAndEnterMonitor(MY_PRMonitor *mon) -{ - pthread_t self = pthread_self(); - PRStatus rv; - PRBool rc = PR_FALSE; - - PR_ASSERT(mon != NULL); - rv = pthread_mutex_lock(&mon->lock); - if (rv != 0) { - slapi_log_err(SLAPI_LOG_ERR, "TestAndEnterMonitor", - "Failed to acquire monitor mutex, error (%d)\n", rv); - return rc; - } - if (mon->entryCount != 0) { - if (pthread_equal(mon->owner, self)) - goto done; - rv = pthread_mutex_unlock(&mon->lock); - if (rv != 0) { - slapi_log_err(SLAPI_LOG_ERR, "TestAndEnterMonitor", - "Failed to release monitor mutex, error (%d)\n", rv); - } - return PR_FALSE; - } - /* and now I have the monitor */ - PR_ASSERT(mon->notifyTimes == 0); - PR_ASSERT((mon->owner) == 0); - mon->owner = self; - -done: - mon->entryCount += 1; - rv = pthread_mutex_unlock(&mon->lock); - if (rv == PR_SUCCESS) { - rc = PR_TRUE; - } else { - slapi_log_err(SLAPI_LOG_ERR, "TestAndEnterMonitor", - "Failed to release monitor mutex, error (%d)\n", rv); - rc = PR_FALSE; - } - return rc; -} /* Globals which are used to store the sockets between * calls to daemon_pre_setuid_init() and the daemon thread * creation. */ @@ -1491,13 +1428,13 @@ setup_pr_read_pds(Connection_Table *ct, PRFileDesc **n_tcps, PRFileDesc **s_tcps c = connection_table_get_first_active_connection(ct); while (c) { next = connection_table_get_next_active_connection(ct, c); - if (c->c_mutex == NULL) { + if (c->c_state == CONN_STATE_FREE) { connection_table_move_connection_out_of_active_list(ct, c); } else { /* we try to acquire the connection mutex, if it is already * acquired by another thread, don't wait */ - if (PR_FALSE == MY_TestAndEnterMonitor((MY_PRMonitor *)c->c_mutex)) { + if (pthread_mutex_trylock(&(c->c_mutex)) == EBUSY) { c = next; continue; } @@ -1538,7 +1475,7 @@ setup_pr_read_pds(Connection_Table *ct, PRFileDesc **n_tcps, PRFileDesc **s_tcps c->c_fdi = SLAPD_INVALID_SOCKET_INDEX; } } - PR_ExitMonitor(c->c_mutex); + pthread_mutex_unlock(&(c->c_mutex)); } c = next; } @@ -1579,12 +1516,13 @@ handle_pr_read_ready(Connection_Table *ct, PRIntn num_poll __attribute__((unused */ for (c = connection_table_get_first_active_connection(ct); c != NULL; c = connection_table_get_next_active_connection(ct, c)) { - if (c->c_mutex != NULL) { + if (c->c_state != CONN_STATE_FREE) { /* this check can be done without acquiring the mutex */ - if (c->c_gettingber) + if (c->c_gettingber) { continue; + } - PR_EnterMonitor(c->c_mutex); + pthread_mutex_lock(&(c->c_mutex)); if (connection_is_active_nolock(c) && c->c_gettingber == 0) { PRInt16 out_flags; short readready; @@ -1634,7 +1572,7 @@ handle_pr_read_ready(Connection_Table *ct, PRIntn num_poll __attribute__((unused SLAPD_DISCONNECT_IDLE_TIMEOUT, EAGAIN); } } - PR_ExitMonitor(c->c_mutex); + pthread_mutex_unlock(&(c->c_mutex)); } } } @@ -1668,7 +1606,7 @@ ns_handle_closure(struct ns_job_t *job) Connection *c = (Connection *)ns_job_get_data(job); int do_yield = 0; - PR_EnterMonitor(c->c_mutex); + pthread_mutex_lock(&(c->c_mutex)); /* Assert we really have the right job state. */ PR_ASSERT(job == c->c_job); @@ -1678,7 +1616,7 @@ ns_handle_closure(struct ns_job_t *job) /* Because handle closure will add a new job, we need to detach our current one. */ c->c_job = NULL; do_yield = ns_handle_closure_nomutex(c); - PR_ExitMonitor(c->c_mutex); + pthread_mutex_unlock(&(c->c_mutex)); /* Remove this task now. */ ns_job_done(job); if (do_yield) { @@ -1855,7 +1793,7 @@ ns_handle_pr_read_ready(struct ns_job_t *job) { Connection *c = (Connection *)ns_job_get_data(job); - PR_EnterMonitor(c->c_mutex); + pthread_mutex_lock(&(c->c_mutex)); /* Assert we really have the right job state. */ PR_ASSERT(job == c->c_job); @@ -1921,7 +1859,7 @@ ns_handle_pr_read_ready(struct ns_job_t *job) c->c_connid, c->c_sd); } /* Since we call done on the job, we need to remove it here. */ - PR_ExitMonitor(c->c_mutex); + pthread_mutex_unlock(&(c->c_mutex)); ns_job_done(job); return; } @@ -2390,7 +2328,7 @@ handle_new_connection(Connection_Table *ct, int tcps, PRFileDesc *pr_acceptfd, i PR_Close(pr_acceptfd); return -1; } - PR_EnterMonitor(conn->c_mutex); + pthread_mutex_lock(&(conn->c_mutex)); /* * Set the default idletimeout and the handle. We'll update c_idletimeout @@ -2478,7 +2416,7 @@ handle_new_connection(Connection_Table *ct, int tcps, PRFileDesc *pr_acceptfd, i connection_table_move_connection_on_to_active_list(the_connection_table, conn); } - PR_ExitMonitor(conn->c_mutex); + pthread_mutex_unlock(&(conn->c_mutex)); g_increment_current_conn_count(); @@ -2529,9 +2467,9 @@ ns_handle_new_connection(struct ns_job_t *job) * that poll() was avoided, even at the expense of putting this new fd back * in nunc-stans to poll for read ready. */ - PR_EnterMonitor(c->c_mutex); + pthread_mutex_lock(&(c->c_mutex)); ns_connection_post_io_or_closing(c); - PR_ExitMonitor(c->c_mutex); + pthread_mutex_unlock(&(c->c_mutex)); return; } diff --git a/ldap/servers/slapd/extendop.c b/ldap/servers/slapd/extendop.c index 98595bcaa..f9f19d365 100644 --- a/ldap/servers/slapd/extendop.c +++ b/ldap/servers/slapd/extendop.c @@ -136,10 +136,10 @@ extop_handle_import_start(Slapi_PBlock *pb, char *extoid __attribute__((unused)) */ slapi_pblock_get(pb, SLAPI_CONNECTION, &pb_conn); if (pb_conn) { - PR_EnterMonitor(pb_conn->c_mutex); + pthread_mutex_lock(&(pb_conn->c_mutex)); pb_conn->c_flags |= CONN_FLAG_IMPORT; pb_conn->c_bi_backend = be; - PR_ExitMonitor(pb_conn->c_mutex); + pthread_mutex_unlock(&(pb_conn->c_mutex)); } slapi_pblock_set(pb, SLAPI_EXT_OP_RET_OID, EXTOP_BULK_IMPORT_START_OID); @@ -164,11 +164,11 @@ extop_handle_import_done(Slapi_PBlock *pb, char *extoid __attribute__((unused)), Connection *pb_conn; slapi_pblock_get(pb, SLAPI_CONNECTION, &pb_conn); - PR_EnterMonitor(pb_conn->c_mutex); + pthread_mutex_lock(&(pb_conn->c_mutex)); pb_conn->c_flags &= ~CONN_FLAG_IMPORT; be = pb_conn->c_bi_backend; pb_conn->c_bi_backend = NULL; - PR_ExitMonitor(pb_conn->c_mutex); + pthread_mutex_unlock(&(pb_conn->c_mutex)); if ((be == NULL) || (be->be_wire_import == NULL)) { /* can this even happen? */ diff --git a/ldap/servers/slapd/operation.c b/ldap/servers/slapd/operation.c index f85a67b09..904f13d04 100644 --- a/ldap/servers/slapd/operation.c +++ b/ldap/servers/slapd/operation.c @@ -542,7 +542,7 @@ slapi_connection_acquire(Slapi_Connection *conn) { int rc; - PR_EnterMonitor(conn->c_mutex); + pthread_mutex_lock(&(conn->c_mutex)); /* rc = connection_acquire_nolock(conn); */ /* connection in the closing state can't be acquired */ if (conn->c_flags & CONN_FLAG_CLOSING) { @@ -555,7 +555,7 @@ slapi_connection_acquire(Slapi_Connection *conn) conn->c_refcnt++; rc = 0; } - PR_ExitMonitor(conn->c_mutex); + pthread_mutex_unlock(&(conn->c_mutex)); return (rc); } @@ -565,7 +565,7 @@ slapi_connection_remove_operation(Slapi_PBlock *pb __attribute__((unused)), Slap int rc = 0; Slapi_Operation **olist = &conn->c_ops; Slapi_Operation **tmp; - PR_EnterMonitor(conn->c_mutex); + pthread_mutex_lock(&(conn->c_mutex)); /* connection_remove_operation_ext(pb, conn,op); */ for (tmp = olist; *tmp != NULL && *tmp != op; tmp = &(*tmp)->o_next) ; /* NULL */ @@ -594,7 +594,7 @@ slapi_connection_remove_operation(Slapi_PBlock *pb __attribute__((unused)), Slap rc = 0; } } - PR_ExitMonitor(conn->c_mutex); + pthread_mutex_unlock(&(conn->c_mutex)); return (rc); } diff --git a/ldap/servers/slapd/opshared.c b/ldap/servers/slapd/opshared.c index dbc069935..25ae6d671 100644 --- a/ldap/servers/slapd/opshared.c +++ b/ldap/servers/slapd/opshared.c @@ -660,7 +660,7 @@ op_shared_search(Slapi_PBlock *pb, int send_result) * In async paged result case, the search result might be released * by other theads. We need to double check it in the locked region. */ - PR_EnterMonitor(pb_conn->c_mutex); + pthread_mutex_lock(&(pb_conn->c_mutex)); pr_search_result = pagedresults_get_search_result(pb_conn, operation, 1 /*locked*/, pr_idx); if (pr_search_result) { if (pagedresults_is_abandoned_or_notavailable(pb_conn, 1 /*locked*/, pr_idx)) { @@ -668,7 +668,7 @@ op_shared_search(Slapi_PBlock *pb, int send_result) /* Previous operation was abandoned and the simplepaged object is not in use. */ send_ldap_result(pb, 0, NULL, "Simple Paged Results Search abandoned", 0, NULL); rc = LDAP_SUCCESS; - PR_ExitMonitor(pb_conn->c_mutex); + pthread_mutex_unlock(&(pb_conn->c_mutex)); goto free_and_return; } else { slapi_pblock_set(pb, SLAPI_SEARCH_RESULT_SET, pr_search_result); @@ -682,7 +682,7 @@ op_shared_search(Slapi_PBlock *pb, int send_result) pr_stat = PAGEDRESULTS_SEARCH_END; rc = LDAP_SUCCESS; } - PR_ExitMonitor(pb_conn->c_mutex); + pthread_mutex_unlock(&(pb_conn->c_mutex)); pagedresults_unlock(pb_conn, pr_idx); if ((PAGEDRESULTS_SEARCH_END == pr_stat) || (0 == pnentries)) { @@ -803,10 +803,10 @@ op_shared_search(Slapi_PBlock *pb, int send_result) /* PAGED RESULTS */ if (op_is_pagedresults(operation)) { /* cleanup the slot */ - PR_EnterMonitor(pb_conn->c_mutex); + pthread_mutex_lock(&(pb_conn->c_mutex)); pagedresults_set_search_result(pb_conn, operation, NULL, 1, pr_idx); rc = pagedresults_set_current_be(pb_conn, NULL, pr_idx, 1); - PR_ExitMonitor(pb_conn->c_mutex); + pthread_mutex_unlock(&(pb_conn->c_mutex)); } if (1 == flag_no_such_object) { break; @@ -845,11 +845,11 @@ op_shared_search(Slapi_PBlock *pb, int send_result) slapi_pblock_get(pb, SLAPI_SEARCH_RESULT_SET, &sr); if ((PAGEDRESULTS_SEARCH_END == pr_stat) || (0 == pnentries)) { /* no more entries, but at least another backend */ - PR_EnterMonitor(pb_conn->c_mutex); + pthread_mutex_lock(&(pb_conn->c_mutex)); pagedresults_set_search_result(pb_conn, operation, NULL, 1, pr_idx); be->be_search_results_release(&sr); rc = pagedresults_set_current_be(pb_conn, next_be, pr_idx, 1); - PR_ExitMonitor(pb_conn->c_mutex); + pthread_mutex_unlock(&(pb_conn->c_mutex)); pr_stat = PAGEDRESULTS_SEARCH_END; /* make sure stat is SEARCH_END */ if (NULL == next_be) { /* no more entries && no more backends */ @@ -875,9 +875,9 @@ op_shared_search(Slapi_PBlock *pb, int send_result) next_be = NULL; /* to break the loop */ if (operation->o_status & SLAPI_OP_STATUS_ABANDONED) { /* It turned out this search was abandoned. */ - PR_EnterMonitor(pb_conn->c_mutex); + pthread_mutex_lock(&(pb_conn->c_mutex)); pagedresults_free_one_msgid_nolock(pb_conn, operation->o_msgid); - PR_ExitMonitor(pb_conn->c_mutex); + pthread_mutex_unlock(&(pb_conn->c_mutex)); /* paged-results-request was abandoned; making an empty cookie. */ pagedresults_set_response_control(pb, 0, estimate, -1, pr_idx); send_ldap_result(pb, 0, NULL, "Simple Paged Results Search abandoned", 0, NULL); diff --git a/ldap/servers/slapd/pagedresults.c b/ldap/servers/slapd/pagedresults.c index 8cefe164e..d8b8798b6 100644 --- a/ldap/servers/slapd/pagedresults.c +++ b/ldap/servers/slapd/pagedresults.c @@ -98,7 +98,7 @@ pagedresults_parse_control_value(Slapi_PBlock *pb, return LDAP_UNWILLING_TO_PERFORM; } - PR_EnterMonitor(conn->c_mutex); + pthread_mutex_lock(&(conn->c_mutex)); /* the ber encoding is no longer needed */ ber_free(ber, 1); if (cookie.bv_len <= 0) { @@ -206,7 +206,7 @@ bail: } } } - PR_ExitMonitor(conn->c_mutex); + pthread_mutex_unlock(&(conn->c_mutex)); slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_parse_control_value", "<= idx %d\n", *index); @@ -300,7 +300,7 @@ pagedresults_free_one(Connection *conn, Operation *op, int index) slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_free_one", "=> idx=%d\n", index); if (conn && (index > -1)) { - PR_EnterMonitor(conn->c_mutex); + pthread_mutex_lock(&(conn->c_mutex)); if (conn->c_pagedresults.prl_count <= 0) { slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_free_one", "conn=%" PRIu64 " paged requests list count is %d\n", @@ -311,7 +311,7 @@ pagedresults_free_one(Connection *conn, Operation *op, int index) conn->c_pagedresults.prl_count--; rc = 0; } - PR_ExitMonitor(conn->c_mutex); + pthread_mutex_unlock(&(conn->c_mutex)); } slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_free_one", "<= %d\n", rc); @@ -363,11 +363,11 @@ pagedresults_get_current_be(Connection *conn, int index) slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_get_current_be", "=> idx=%d\n", index); if (conn && (index > -1)) { - PR_EnterMonitor(conn->c_mutex); + pthread_mutex_lock(&(conn->c_mutex)); if (index < conn->c_pagedresults.prl_maxlen) { be = conn->c_pagedresults.prl_list[index].pr_current_be; } - PR_ExitMonitor(conn->c_mutex); + pthread_mutex_unlock(&(conn->c_mutex)); } slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_get_current_be", "<= %p\n", be); @@ -382,13 +382,13 @@ pagedresults_set_current_be(Connection *conn, Slapi_Backend *be, int index, int "pagedresults_set_current_be", "=> idx=%d\n", index); if (conn && (index > -1)) { if (!nolock) - PR_EnterMonitor(conn->c_mutex); + pthread_mutex_lock(&(conn->c_mutex)); if (index < conn->c_pagedresults.prl_maxlen) { conn->c_pagedresults.prl_list[index].pr_current_be = be; } rc = 0; if (!nolock) - PR_ExitMonitor(conn->c_mutex); + pthread_mutex_unlock(&(conn->c_mutex)); } slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_set_current_be", "<= %d\n", rc); @@ -407,13 +407,13 @@ pagedresults_get_search_result(Connection *conn, Operation *op, int locked, int locked ? "locked" : "not locked", index); if (conn && (index > -1)) { if (!locked) { - PR_EnterMonitor(conn->c_mutex); + pthread_mutex_lock(&(conn->c_mutex)); } if (index < conn->c_pagedresults.prl_maxlen) { sr = conn->c_pagedresults.prl_list[index].pr_search_result_set; } if (!locked) { - PR_ExitMonitor(conn->c_mutex); + pthread_mutex_unlock(&(conn->c_mutex)); } } slapi_log_err(SLAPI_LOG_TRACE, @@ -433,7 +433,7 @@ pagedresults_set_search_result(Connection *conn, Operation *op, void *sr, int lo index, sr); if (conn && (index > -1)) { if (!locked) - PR_EnterMonitor(conn->c_mutex); + pthread_mutex_lock(&(conn->c_mutex)); if (index < conn->c_pagedresults.prl_maxlen) { PagedResults *prp = conn->c_pagedresults.prl_list + index; if (!(prp->pr_flags & CONN_FLAG_PAGEDRESULTS_ABANDONED) || !sr) { @@ -443,7 +443,7 @@ pagedresults_set_search_result(Connection *conn, Operation *op, void *sr, int lo rc = 0; } if (!locked) - PR_ExitMonitor(conn->c_mutex); + pthread_mutex_unlock(&(conn->c_mutex)); } slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_set_search_result", "=> %d\n", rc); @@ -460,11 +460,11 @@ pagedresults_get_search_result_count(Connection *conn, Operation *op, int index) slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_get_search_result_count", "=> idx=%d\n", index); if (conn && (index > -1)) { - PR_EnterMonitor(conn->c_mutex); + pthread_mutex_lock(&(conn->c_mutex)); if (index < conn->c_pagedresults.prl_maxlen) { count = conn->c_pagedresults.prl_list[index].pr_search_result_count; } - PR_ExitMonitor(conn->c_mutex); + pthread_mutex_unlock(&(conn->c_mutex)); } slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_get_search_result_count", "<= %d\n", count); @@ -481,11 +481,11 @@ pagedresults_set_search_result_count(Connection *conn, Operation *op, int count, slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_set_search_result_count", "=> idx=%d\n", index); if (conn && (index > -1)) { - PR_EnterMonitor(conn->c_mutex); + pthread_mutex_lock(&(conn->c_mutex)); if (index < conn->c_pagedresults.prl_maxlen) { conn->c_pagedresults.prl_list[index].pr_search_result_count = count; } - PR_ExitMonitor(conn->c_mutex); + pthread_mutex_unlock(&(conn->c_mutex)); rc = 0; } slapi_log_err(SLAPI_LOG_TRACE, @@ -506,11 +506,11 @@ pagedresults_get_search_result_set_size_estimate(Connection *conn, "pagedresults_get_search_result_set_size_estimate", "=> idx=%d\n", index); if (conn && (index > -1)) { - PR_EnterMonitor(conn->c_mutex); + pthread_mutex_lock(&(conn->c_mutex)); if (index < conn->c_pagedresults.prl_maxlen) { count = conn->c_pagedresults.prl_list[index].pr_search_result_set_size_estimate; } - PR_ExitMonitor(conn->c_mutex); + pthread_mutex_unlock(&(conn->c_mutex)); } slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_get_search_result_set_size_estimate", "<= %d\n", @@ -532,11 +532,11 @@ pagedresults_set_search_result_set_size_estimate(Connection *conn, "pagedresults_set_search_result_set_size_estimate", "=> idx=%d\n", index); if (conn && (index > -1)) { - PR_EnterMonitor(conn->c_mutex); + pthread_mutex_lock(&(conn->c_mutex)); if (index < conn->c_pagedresults.prl_maxlen) { conn->c_pagedresults.prl_list[index].pr_search_result_set_size_estimate = count; } - PR_ExitMonitor(conn->c_mutex); + pthread_mutex_unlock(&(conn->c_mutex)); rc = 0; } slapi_log_err(SLAPI_LOG_TRACE, @@ -555,11 +555,11 @@ pagedresults_get_with_sort(Connection *conn, Operation *op, int index) slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_get_with_sort", "=> idx=%d\n", index); if (conn && (index > -1)) { - PR_EnterMonitor(conn->c_mutex); + pthread_mutex_lock(&(conn->c_mutex)); if (index < conn->c_pagedresults.prl_maxlen) { flags = conn->c_pagedresults.prl_list[index].pr_flags & CONN_FLAG_PAGEDRESULTS_WITH_SORT; } - PR_ExitMonitor(conn->c_mutex); + pthread_mutex_unlock(&(conn->c_mutex)); } slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_get_with_sort", "<= %d\n", flags); @@ -576,14 +576,14 @@ pagedresults_set_with_sort(Connection *conn, Operation *op, int flags, int index slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_set_with_sort", "=> idx=%d\n", index); if (conn && (index > -1)) { - PR_EnterMonitor(conn->c_mutex); + pthread_mutex_lock(&(conn->c_mutex)); if (index < conn->c_pagedresults.prl_maxlen) { if (flags & OP_FLAG_SERVER_SIDE_SORTING) { conn->c_pagedresults.prl_list[index].pr_flags |= CONN_FLAG_PAGEDRESULTS_WITH_SORT; } } - PR_ExitMonitor(conn->c_mutex); + pthread_mutex_unlock(&(conn->c_mutex)); rc = 0; } slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_set_with_sort", "<= %d\n", rc); @@ -600,11 +600,11 @@ pagedresults_get_unindexed(Connection *conn, Operation *op, int index) slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_get_unindexed", "=> idx=%d\n", index); if (conn && (index > -1)) { - PR_EnterMonitor(conn->c_mutex); + pthread_mutex_lock(&(conn->c_mutex)); if (index < conn->c_pagedresults.prl_maxlen) { flags = conn->c_pagedresults.prl_list[index].pr_flags & CONN_FLAG_PAGEDRESULTS_UNINDEXED; } - PR_ExitMonitor(conn->c_mutex); + pthread_mutex_unlock(&(conn->c_mutex)); } slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_get_unindexed", "<= %d\n", flags); @@ -621,12 +621,12 @@ pagedresults_set_unindexed(Connection *conn, Operation *op, int index) slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_set_unindexed", "=> idx=%d\n", index); if (conn && (index > -1)) { - PR_EnterMonitor(conn->c_mutex); + pthread_mutex_lock(&(conn->c_mutex)); if (index < conn->c_pagedresults.prl_maxlen) { conn->c_pagedresults.prl_list[index].pr_flags |= CONN_FLAG_PAGEDRESULTS_UNINDEXED; } - PR_ExitMonitor(conn->c_mutex); + pthread_mutex_unlock(&(conn->c_mutex)); rc = 0; } slapi_log_err(SLAPI_LOG_TRACE, @@ -644,11 +644,11 @@ pagedresults_get_sort_result_code(Connection *conn, Operation *op, int index) slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_get_sort_result_code", "=> idx=%d\n", index); if (conn && (index > -1)) { - PR_EnterMonitor(conn->c_mutex); + pthread_mutex_lock(&(conn->c_mutex)); if (index < conn->c_pagedresults.prl_maxlen) { code = conn->c_pagedresults.prl_list[index].pr_sort_result_code; } - PR_ExitMonitor(conn->c_mutex); + pthread_mutex_unlock(&(conn->c_mutex)); } slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_get_sort_result_code", "<= %d\n", code); @@ -665,11 +665,11 @@ pagedresults_set_sort_result_code(Connection *conn, Operation *op, int code, int slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_set_sort_result_code", "=> idx=%d\n", index); if (conn && (index > -1)) { - PR_EnterMonitor(conn->c_mutex); + pthread_mutex_lock(&(conn->c_mutex)); if (index < conn->c_pagedresults.prl_maxlen) { conn->c_pagedresults.prl_list[index].pr_sort_result_code = code; } - PR_ExitMonitor(conn->c_mutex); + pthread_mutex_unlock(&(conn->c_mutex)); rc = 0; } slapi_log_err(SLAPI_LOG_TRACE, @@ -687,11 +687,11 @@ pagedresults_set_timelimit(Connection *conn, Operation *op, time_t timelimit, in slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_set_timelimit", "=> idx=%d\n", index); if (conn && (index > -1)) { - PR_EnterMonitor(conn->c_mutex); + pthread_mutex_lock(&(conn->c_mutex)); if (index < conn->c_pagedresults.prl_maxlen) { slapi_timespec_expire_at(timelimit, &(conn->c_pagedresults.prl_list[index].pr_timelimit_hr)); } - PR_ExitMonitor(conn->c_mutex); + pthread_mutex_unlock(&(conn->c_mutex)); rc = 0; } slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_set_timelimit", "<= %d\n", rc); @@ -746,7 +746,7 @@ pagedresults_cleanup(Connection *conn, int needlock) } if (needlock) { - PR_EnterMonitor(conn->c_mutex); + pthread_mutex_lock(&(conn->c_mutex)); } for (i = 0; conn->c_pagedresults.prl_list && i < conn->c_pagedresults.prl_maxlen; @@ -765,7 +765,7 @@ pagedresults_cleanup(Connection *conn, int needlock) } conn->c_pagedresults.prl_count = 0; if (needlock) { - PR_ExitMonitor(conn->c_mutex); + pthread_mutex_unlock(&(conn->c_mutex)); } slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_cleanup", "<= %d\n", rc); return rc; @@ -792,7 +792,7 @@ pagedresults_cleanup_all(Connection *conn, int needlock) } if (needlock) { - PR_EnterMonitor(conn->c_mutex); + pthread_mutex_lock(&(conn->c_mutex)); } for (i = 0; conn->c_pagedresults.prl_list && i < conn->c_pagedresults.prl_maxlen; @@ -812,7 +812,7 @@ pagedresults_cleanup_all(Connection *conn, int needlock) conn->c_pagedresults.prl_maxlen = 0; conn->c_pagedresults.prl_count = 0; if (needlock) { - PR_ExitMonitor(conn->c_mutex); + pthread_mutex_unlock(&(conn->c_mutex)); } slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_cleanup_all", "<= %d\n", rc); return rc; @@ -831,7 +831,7 @@ pagedresults_check_or_set_processing(Connection *conn, int index) slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_check_or_set_processing", "=>\n", index); if (conn && (index > -1)) { - PR_EnterMonitor(conn->c_mutex); + pthread_mutex_lock(&(conn->c_mutex)); if (index < conn->c_pagedresults.prl_maxlen) { ret = (conn->c_pagedresults.prl_list[index].pr_flags & CONN_FLAG_PAGEDRESULTS_PROCESSING); @@ -839,7 +839,7 @@ pagedresults_check_or_set_processing(Connection *conn, int index) conn->c_pagedresults.prl_list[index].pr_flags |= CONN_FLAG_PAGEDRESULTS_PROCESSING; } - PR_ExitMonitor(conn->c_mutex); + pthread_mutex_unlock(&(conn->c_mutex)); } slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_check_or_set_processing", "<= %d\n", ret); @@ -858,7 +858,7 @@ pagedresults_reset_processing(Connection *conn, int index) slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_reset_processing", "=> idx=%d\n", index); if (conn && (index > -1)) { - PR_EnterMonitor(conn->c_mutex); + pthread_mutex_lock(&(conn->c_mutex)); if (index < conn->c_pagedresults.prl_maxlen) { ret = (conn->c_pagedresults.prl_list[index].pr_flags & CONN_FLAG_PAGEDRESULTS_PROCESSING); @@ -866,7 +866,7 @@ pagedresults_reset_processing(Connection *conn, int index) conn->c_pagedresults.prl_list[index].pr_flags &= ~CONN_FLAG_PAGEDRESULTS_PROCESSING; } - PR_ExitMonitor(conn->c_mutex); + pthread_mutex_unlock(&(conn->c_mutex)); } slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_reset_processing", "<= %d\n", ret); @@ -977,9 +977,9 @@ pagedresults_lock(Connection *conn, int index) if (!conn || (index < 0) || (index >= conn->c_pagedresults.prl_maxlen)) { return; } - PR_EnterMonitor(conn->c_mutex); + pthread_mutex_lock(&(conn->c_mutex)); prp = conn->c_pagedresults.prl_list + index; - PR_ExitMonitor(conn->c_mutex); + pthread_mutex_unlock(&(conn->c_mutex)); if (prp->pr_mutex) { PR_Lock(prp->pr_mutex); } @@ -993,9 +993,9 @@ pagedresults_unlock(Connection *conn, int index) if (!conn || (index < 0) || (index >= conn->c_pagedresults.prl_maxlen)) { return; } - PR_EnterMonitor(conn->c_mutex); + pthread_mutex_lock(&(conn->c_mutex)); prp = conn->c_pagedresults.prl_list + index; - PR_ExitMonitor(conn->c_mutex); + pthread_mutex_unlock(&(conn->c_mutex)); if (prp->pr_mutex) { PR_Unlock(prp->pr_mutex); } @@ -1010,11 +1010,11 @@ pagedresults_is_abandoned_or_notavailable(Connection *conn, int locked, int inde return 1; /* not abandoned, but do not want to proceed paged results op. */ } if (!locked) { - PR_EnterMonitor(conn->c_mutex); + pthread_mutex_lock(&(conn->c_mutex)); } prp = conn->c_pagedresults.prl_list + index; if (!locked) { - PR_ExitMonitor(conn->c_mutex); + pthread_mutex_unlock(&(conn->c_mutex)); } return prp->pr_flags & CONN_FLAG_PAGEDRESULTS_ABANDONED; } @@ -1039,13 +1039,14 @@ pagedresults_set_search_result_pb(Slapi_PBlock *pb, void *sr, int locked) "pagedresults_set_search_result_pb", "=> idx=%d, sr=%p\n", index, sr); if (conn && (index > -1)) { if (!locked) - PR_EnterMonitor(conn->c_mutex); + pthread_mutex_lock(&(conn->c_mutex)); if (index < conn->c_pagedresults.prl_maxlen) { conn->c_pagedresults.prl_list[index].pr_search_result_set = sr; rc = 0; } - if (!locked) - PR_ExitMonitor(conn->c_mutex); + if (!locked) { + pthread_mutex_unlock(&(conn->c_mutex)); + } } slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_set_search_result_pb", "<= %d\n", rc); diff --git a/ldap/servers/slapd/pblock.c b/ldap/servers/slapd/pblock.c index bc18a7b18..e489dfcd4 100644 --- a/ldap/servers/slapd/pblock.c +++ b/ldap/servers/slapd/pblock.c @@ -427,9 +427,9 @@ slapi_pblock_get(Slapi_PBlock *pblock, int arg, void *value) "slapi_pblock_get", "Connection is NULL and hence cannot access SLAPI_CONN_DN \n"); return (-1); } - PR_EnterMonitor(pblock->pb_conn->c_mutex); + pthread_mutex_lock(&(pblock->pb_conn->c_mutex)); (*(char **)value) = (NULL == pblock->pb_conn->c_dn ? NULL : slapi_ch_strdup(pblock->pb_conn->c_dn)); - PR_ExitMonitor(pblock->pb_conn->c_mutex); + pthread_mutex_unlock(&(pblock->pb_conn->c_mutex)); break; case SLAPI_CONN_AUTHTYPE: /* deprecated */ if (pblock->pb_conn == NULL) { @@ -437,9 +437,9 @@ slapi_pblock_get(Slapi_PBlock *pblock, int arg, void *value) "slapi_pblock_get", "Connection is NULL and hence cannot access SLAPI_CONN_AUTHTYPE \n"); return (-1); } - PR_EnterMonitor(pblock->pb_conn->c_mutex); + pthread_mutex_lock(&(pblock->pb_conn->c_mutex)); authtype = pblock->pb_conn->c_authtype; - PR_ExitMonitor(pblock->pb_conn->c_mutex); + pthread_mutex_unlock(&(pblock->pb_conn->c_mutex)); if (authtype == NULL) { (*(char **)value) = NULL; } else if (strcasecmp(authtype, SLAPD_AUTH_NONE) == 0) { @@ -464,44 +464,44 @@ slapi_pblock_get(Slapi_PBlock *pblock, int arg, void *value) "slapi_pblock_get", "Connection is NULL and hence cannot access SLAPI_CONN_AUTHMETHOD \n"); return (-1); } - PR_EnterMonitor(pblock->pb_conn->c_mutex); + pthread_mutex_lock(&(pblock->pb_conn->c_mutex)); (*(char **)value) = pblock->pb_conn->c_authtype ? slapi_ch_strdup(pblock->pb_conn->c_authtype) : NULL; - PR_ExitMonitor(pblock->pb_conn->c_mutex); + pthread_mutex_unlock(&(pblock->pb_conn->c_mutex)); break; case SLAPI_CONN_CLIENTNETADDR: if (pblock->pb_conn == NULL) { memset(value, 0, sizeof(PRNetAddr)); break; } - PR_EnterMonitor(pblock->pb_conn->c_mutex); + pthread_mutex_lock(&(pblock->pb_conn->c_mutex)); if (pblock->pb_conn->cin_addr == NULL) { memset(value, 0, sizeof(PRNetAddr)); } else { (*(PRNetAddr *)value) = *(pblock->pb_conn->cin_addr); } - PR_ExitMonitor(pblock->pb_conn->c_mutex); + pthread_mutex_unlock(&(pblock->pb_conn->c_mutex)); break; case SLAPI_CONN_SERVERNETADDR: if (pblock->pb_conn == NULL) { memset(value, 0, sizeof(PRNetAddr)); break; } - PR_EnterMonitor(pblock->pb_conn->c_mutex); + pthread_mutex_lock(&(pblock->pb_conn->c_mutex)); if (pblock->pb_conn->cin_destaddr == NULL) { memset(value, 0, sizeof(PRNetAddr)); } else { (*(PRNetAddr *)value) = *(pblock->pb_conn->cin_destaddr); } - PR_ExitMonitor(pblock->pb_conn->c_mutex); + pthread_mutex_unlock(&(pblock->pb_conn->c_mutex)); break; case SLAPI_CONN_CLIENTIP: if (pblock->pb_conn == NULL) { memset(value, 0, sizeof(struct in_addr)); break; } - PR_EnterMonitor(pblock->pb_conn->c_mutex); + pthread_mutex_lock(&(pblock->pb_conn->c_mutex)); if (pblock->pb_conn->cin_addr == NULL) { memset(value, 0, sizeof(struct in_addr)); } else { @@ -516,14 +516,14 @@ slapi_pblock_get(Slapi_PBlock *pblock, int arg, void *value) memset(value, 0, sizeof(struct in_addr)); } } - PR_ExitMonitor(pblock->pb_conn->c_mutex); + pthread_mutex_unlock(&(pblock->pb_conn->c_mutex)); break; case SLAPI_CONN_SERVERIP: if (pblock->pb_conn == NULL) { memset(value, 0, sizeof(struct in_addr)); break; } - PR_EnterMonitor(pblock->pb_conn->c_mutex); + pthread_mutex_lock(&(pblock->pb_conn->c_mutex)); if (pblock->pb_conn->cin_destaddr == NULL) { memset(value, 0, sizeof(PRNetAddr)); } else { @@ -538,7 +538,7 @@ slapi_pblock_get(Slapi_PBlock *pblock, int arg, void *value) memset(value, 0, sizeof(struct in_addr)); } } - PR_ExitMonitor(pblock->pb_conn->c_mutex); + pthread_mutex_unlock(&(pblock->pb_conn->c_mutex)); break; case SLAPI_CONN_IS_REPLICATION_SESSION: if (pblock->pb_conn == NULL) { @@ -546,9 +546,9 @@ slapi_pblock_get(Slapi_PBlock *pblock, int arg, void *value) "slapi_pblock_get", "Connection is NULL and hence cannot access SLAPI_CONN_IS_REPLICATION_SESSION \n"); return (-1); } - PR_EnterMonitor(pblock->pb_conn->c_mutex); + pthread_mutex_lock(&(pblock->pb_conn->c_mutex)); (*(int *)value) = pblock->pb_conn->c_isreplication_session; - PR_ExitMonitor(pblock->pb_conn->c_mutex); + pthread_mutex_unlock(&(pblock->pb_conn->c_mutex)); break; case SLAPI_CONN_IS_SSL_SESSION: if (pblock->pb_conn == NULL) { @@ -556,9 +556,9 @@ slapi_pblock_get(Slapi_PBlock *pblock, int arg, void *value) "slapi_pblock_get", "Connection is NULL and hence cannot access SLAPI_CONN_IS_SSL_SESSION \n"); return (-1); } - PR_EnterMonitor(pblock->pb_conn->c_mutex); + pthread_mutex_lock(&(pblock->pb_conn->c_mutex)); (*(int *)value) = pblock->pb_conn->c_flags & CONN_FLAG_SSL; - PR_ExitMonitor(pblock->pb_conn->c_mutex); + pthread_mutex_unlock(&(pblock->pb_conn->c_mutex)); break; case SLAPI_CONN_SASL_SSF: if (pblock->pb_conn == NULL) { @@ -566,9 +566,9 @@ slapi_pblock_get(Slapi_PBlock *pblock, int arg, void *value) "slapi_pblock_get", "Connection is NULL and hence cannot access SLAPI_CONN_SASL_SSF \n"); return (-1); } - PR_EnterMonitor(pblock->pb_conn->c_mutex); + pthread_mutex_lock(&(pblock->pb_conn->c_mutex)); (*(int *)value) = pblock->pb_conn->c_sasl_ssf; - PR_ExitMonitor(pblock->pb_conn->c_mutex); + pthread_mutex_unlock(&(pblock->pb_conn->c_mutex)); break; case SLAPI_CONN_SSL_SSF: if (pblock->pb_conn == NULL) { @@ -576,9 +576,9 @@ slapi_pblock_get(Slapi_PBlock *pblock, int arg, void *value) "slapi_pblock_get", "Connection is NULL and hence cannot access SLAPI_CONN_SSL_SSF \n"); return (-1); } - PR_EnterMonitor(pblock->pb_conn->c_mutex); + pthread_mutex_lock(&(pblock->pb_conn->c_mutex)); (*(int *)value) = pblock->pb_conn->c_ssl_ssf; - PR_ExitMonitor(pblock->pb_conn->c_mutex); + pthread_mutex_unlock(&(pblock->pb_conn->c_mutex)); break; case SLAPI_CONN_LOCAL_SSF: if (pblock->pb_conn == NULL) { @@ -586,9 +586,9 @@ slapi_pblock_get(Slapi_PBlock *pblock, int arg, void *value) "slapi_pblock_get", "Connection is NULL and hence cannot access SLAPI_CONN_LOCAL_SSF \n"); return (-1); } - PR_EnterMonitor(pblock->pb_conn->c_mutex); + pthread_mutex_lock(&(pblock->pb_conn->c_mutex)); (*(int *)value) = pblock->pb_conn->c_local_ssf; - PR_ExitMonitor(pblock->pb_conn->c_mutex); + pthread_mutex_unlock(&(pblock->pb_conn->c_mutex)); break; case SLAPI_CONN_CERT: if (pblock->pb_conn == NULL) { @@ -2566,10 +2566,10 @@ slapi_pblock_set(Slapi_PBlock *pblock, int arg, void *value) "Connection is NULL and hence cannot access SLAPI_CONN_AUTHMETHOD \n"); return (-1); } - PR_EnterMonitor(pblock->pb_conn->c_mutex); + pthread_mutex_lock(&(pblock->pb_conn->c_mutex)); slapi_ch_free((void **)&pblock->pb_conn->c_authtype); pblock->pb_conn->c_authtype = slapi_ch_strdup((char *)value); - PR_ExitMonitor(pblock->pb_conn->c_mutex); + pthread_mutex_unlock(&(pblock->pb_conn->c_mutex)); break; case SLAPI_CONN_IS_REPLICATION_SESSION: if (pblock->pb_conn == NULL) { @@ -2578,9 +2578,9 @@ slapi_pblock_set(Slapi_PBlock *pblock, int arg, void *value) "Connection is NULL and hence cannot access SLAPI_CONN_IS_REPLICATION_SESSION \n"); return (-1); } - PR_EnterMonitor(pblock->pb_conn->c_mutex); + pthread_mutex_lock(&(pblock->pb_conn->c_mutex)); pblock->pb_conn->c_isreplication_session = *((int *)value); - PR_ExitMonitor(pblock->pb_conn->c_mutex); + pthread_mutex_unlock(&(pblock->pb_conn->c_mutex)); break; /* stuff related to config file processing */ @@ -4191,7 +4191,7 @@ void bind_credentials_clear(Connection *conn, PRBool lock_conn, PRBool clear_externalcreds) { if (lock_conn) { - PR_EnterMonitor(conn->c_mutex); + pthread_mutex_lock(&(conn->c_mutex)); } if (conn->c_dn != NULL) { /* a non-anonymous bind has occurred */ @@ -4217,7 +4217,7 @@ bind_credentials_clear(Connection *conn, PRBool lock_conn, PRBool clear_external } if (lock_conn) { - PR_ExitMonitor(conn->c_mutex); + pthread_mutex_unlock(&(conn->c_mutex)); } } @@ -4380,10 +4380,10 @@ slapi_pblock_set_op_stack_elem(Slapi_PBlock *pb, void *stack_elem) void bind_credentials_set(Connection *conn, char *authtype, char *normdn, char *extauthtype, char *externaldn, CERTCertificate *clientcert, Slapi_Entry *bind_target_entry) { - PR_EnterMonitor(conn->c_mutex); + pthread_mutex_lock(&(conn->c_mutex)); bind_credentials_set_nolock(conn, authtype, normdn, extauthtype, externaldn, clientcert, bind_target_entry); - PR_ExitMonitor(conn->c_mutex); + pthread_mutex_unlock(&(conn->c_mutex)); } void diff --git a/ldap/servers/slapd/psearch.c b/ldap/servers/slapd/psearch.c index 8ad268a85..6820a5d75 100644 --- a/ldap/servers/slapd/psearch.c +++ b/ldap/servers/slapd/psearch.c @@ -278,9 +278,9 @@ ps_send_results(void *arg) /* need to acquire a reference to this connection so that it will not be released or cleaned up out from under us */ - PR_EnterMonitor(pb_conn->c_mutex); + pthread_mutex_lock(&(pb_conn->c_mutex)); conn_acq_flag = connection_acquire_nolock(pb_conn); - PR_ExitMonitor(pb_conn->c_mutex); + pthread_mutex_unlock(&(pb_conn->c_mutex)); if (conn_acq_flag) { slapi_log_err(SLAPI_LOG_CONNS, "ps_send_results", @@ -397,7 +397,7 @@ ps_send_results(void *arg) conn = pb_conn; /* save to release later - connection_remove_operation_ext will NULL the pb_conn */ /* Clean up the connection structure */ - PR_EnterMonitor(conn->c_mutex); + pthread_mutex_lock(&(conn->c_mutex)); slapi_log_err(SLAPI_LOG_CONNS, "ps_send_results", "conn=%" PRIu64 " op=%d Releasing the connection and operation\n", @@ -409,7 +409,7 @@ ps_send_results(void *arg) if (conn_acq_flag == 0) { /* we acquired it, so release it */ connection_release_nolock(conn); } - PR_ExitMonitor(conn->c_mutex); + pthread_mutex_unlock(&(conn->c_mutex)); conn = NULL; PR_DestroyLock(ps->ps_lock); diff --git a/ldap/servers/slapd/saslbind.c b/ldap/servers/slapd/saslbind.c index 2dda5892b..7cad0db27 100644 --- a/ldap/servers/slapd/saslbind.c +++ b/ldap/servers/slapd/saslbind.c @@ -781,7 +781,7 @@ ids_sasl_listmech(Slapi_PBlock *pb) sasl_conn = (sasl_conn_t *)pb_conn->c_sasl_conn; if (sasl_conn != NULL) { /* sasl library mechanisms are connection dependent */ - PR_EnterMonitor(pb_conn->c_mutex); + pthread_mutex_lock(&(pb_conn->c_mutex)); if (sasl_listmech(sasl_conn, NULL, /* username */ "", ",", "", @@ -795,7 +795,7 @@ ids_sasl_listmech(Slapi_PBlock *pb) charray_free(others); slapi_ch_free((void **)&dupstr); } - PR_ExitMonitor(pb_conn->c_mutex); + pthread_mutex_unlock(&(pb_conn->c_mutex)); } } @@ -889,13 +889,13 @@ ids_sasl_check_bind(Slapi_PBlock *pb) return; } - PR_EnterMonitor(pb_conn->c_mutex); /* BIG LOCK */ + pthread_mutex_lock(&(pb_conn->c_mutex)); /* BIG LOCK */ continuing = pb_conn->c_flags & CONN_FLAG_SASL_CONTINUE; pb_conn->c_flags &= ~CONN_FLAG_SASL_CONTINUE; /* reset flag */ sasl_conn = (sasl_conn_t *)pb_conn->c_sasl_conn; if (sasl_conn == NULL) { - PR_ExitMonitor(pb_conn->c_mutex); /* BIG LOCK */ + pthread_mutex_unlock(&(pb_conn->c_mutex)); /* BIG LOCK */ send_ldap_result(pb, LDAP_AUTH_METHOD_NOT_SUPPORTED, NULL, "sasl library unavailable", 0, NULL); return; @@ -979,7 +979,7 @@ sasl_start: if (sasl_conn == NULL) { send_ldap_result(pb, LDAP_AUTH_METHOD_NOT_SUPPORTED, NULL, "sasl library unavailable", 0, NULL); - PR_ExitMonitor(pb_conn->c_mutex); /* BIG LOCK */ + pthread_mutex_unlock(&(pb_conn->c_mutex)); /* BIG LOCK */ return; } } @@ -995,7 +995,7 @@ sasl_check_result: /* retrieve the authenticated username */ if (sasl_getprop(sasl_conn, SASL_USERNAME, (const void **)&username) != SASL_OK) { - PR_ExitMonitor(pb_conn->c_mutex); /* BIG LOCK */ + pthread_mutex_unlock(&(pb_conn->c_mutex)); /* BIG LOCK */ send_ldap_result(pb, LDAP_OPERATIONS_ERROR, NULL, "could not obtain sasl username", 0, NULL); break; @@ -1016,7 +1016,7 @@ sasl_check_result: } } if (dn == NULL) { - PR_ExitMonitor(pb_conn->c_mutex); /* BIG LOCK */ + pthread_mutex_unlock(&(pb_conn->c_mutex)); /* BIG LOCK */ send_ldap_result(pb, LDAP_OPERATIONS_ERROR, NULL, "could not get auth dn from sasl", 0, NULL); break; @@ -1058,7 +1058,7 @@ sasl_check_result: slapi_ch_strdup(normdn), NULL, NULL, NULL, bind_target_entry); - PR_ExitMonitor(pb_conn->c_mutex); /* BIG LOCK */ + pthread_mutex_unlock(&(pb_conn->c_mutex)); /* BIG LOCK */ if (plugin_call_plugins(pb, SLAPI_PLUGIN_PRE_BIND_FN) != 0) { break; @@ -1116,9 +1116,9 @@ sasl_check_result: /* see if we negotiated a security layer */ if (*ssfp > 0) { /* Enable SASL I/O on the connection */ - PR_EnterMonitor(pb_conn->c_mutex); + pthread_mutex_lock(&(pb_conn->c_mutex)); connection_set_io_layer_cb(pb_conn, sasl_io_enable, NULL, NULL); - PR_ExitMonitor(pb_conn->c_mutex); + pthread_mutex_unlock(&(pb_conn->c_mutex)); } /* send successful result */ @@ -1131,7 +1131,7 @@ sasl_check_result: case SASL_CONTINUE: /* another step needed */ pb_conn->c_flags |= CONN_FLAG_SASL_CONTINUE; - PR_ExitMonitor(pb_conn->c_mutex); /* BIG LOCK */ + pthread_mutex_unlock(&(pb_conn->c_mutex)); /* BIG LOCK */ if (plugin_call_plugins(pb, SLAPI_PLUGIN_PRE_BIND_FN) != 0) { break; @@ -1153,7 +1153,7 @@ sasl_check_result: case SASL_NOMECH: - PR_ExitMonitor(pb_conn->c_mutex); /* BIG LOCK */ + pthread_mutex_unlock(&(pb_conn->c_mutex)); /* BIG LOCK */ send_ldap_result(pb, LDAP_AUTH_METHOD_NOT_SUPPORTED, NULL, "sasl mechanism not supported", 0, NULL); break; @@ -1161,7 +1161,7 @@ sasl_check_result: default: /* other error */ errstr = sasl_errdetail(sasl_conn); - PR_ExitMonitor(pb_conn->c_mutex); /* BIG LOCK */ + pthread_mutex_unlock(&(pb_conn->c_mutex)); /* BIG LOCK */ slapi_pblock_set(pb, SLAPI_PB_RESULT_TEXT, (void *)errstr); send_ldap_result(pb, LDAP_INVALID_CREDENTIALS, NULL, NULL, 0, NULL); break; diff --git a/ldap/servers/slapd/slap.h b/ldap/servers/slapd/slap.h index b3ede6f7c..e691ea9e4 100644 --- a/ldap/servers/slapd/slap.h +++ b/ldap/servers/slapd/slap.h @@ -1604,9 +1604,15 @@ typedef int (*Conn_IO_Layer_cb)(struct conn *, void *data); struct Conn_Private; typedef struct Conn_private Conn_private; +typedef enum _conn_state { + CONN_STATE_FREE = 0, + CONN_STATE_INIT = 1, +} conn_state; + typedef struct conn { Sockbuf *c_sb; /* ber connection stuff */ + conn_state c_state; /* Used in connection table and done to see what's free or not. Later we could use this for other state handlings. */ int c_sd; /* the actual socket descriptor */ int c_ldapversion; /* version of LDAP protocol */ char *c_dn; /* current DN bound to this conn */ @@ -1630,7 +1636,10 @@ typedef struct conn uint64_t c_anonlimits_set; /* default anon limits are set */ PRInt32 c_threadnumber; /* # threads used in this conn */ int c_refcnt; /* # ops refering to this conn */ - PRMonitor *c_mutex; /* protect each conn structure; need to be re-entrant */ + pthread_mutex_t c_mutex; /* protect each conn structure; need to be re-entrant */ + /* Note that c_mutex is a pthreadmutex to allow sharing + * into nunc-stans. + */ PRLock *c_pdumutex; /* only write one pdu at a time */ time_t c_idlesince; /* last time of activity on conn */ int c_idletimeout; /* local copy of idletimeout */ diff --git a/ldap/servers/slapd/start_tls_extop.c b/ldap/servers/slapd/start_tls_extop.c index 2d051a82f..bfa32b783 100644 --- a/ldap/servers/slapd/start_tls_extop.c +++ b/ldap/servers/slapd/start_tls_extop.c @@ -173,7 +173,7 @@ start_tls(Slapi_PBlock *pb) /* At least we know that the request was indeed an Start TLS one. */ slapi_pblock_get(pb, SLAPI_CONNECTION, &conn); - PR_EnterMonitor(conn->c_mutex); + pthread_mutex_lock(&(conn->c_mutex)); /* cannot call slapi_send_ldap_result with mutex locked - will deadlock if ber_flush returns error */ if (conn->c_prfd == (PRFileDesc *)NULL) { slapi_log_err(SLAPI_LOG_PLUGIN, "start_tls", @@ -249,7 +249,7 @@ start_tls(Slapi_PBlock *pb) * we send a success response back to the client. */ ldapmsg = "Start TLS request accepted.Server willing to negotiate SSL."; unlock_and_return: - PR_ExitMonitor(conn->c_mutex); + pthread_mutex_unlock(&(conn->c_mutex)); slapi_send_ldap_result(pb, ldaprc, NULL, ldapmsg, 0, NULL); return (SLAPI_PLUGIN_EXTENDED_SENT_RESULT); @@ -317,7 +317,7 @@ start_tls_graceful_closure(Connection *c, Slapi_PBlock *pb, int is_initiator) */ } - PR_EnterMonitor(c->c_mutex); + pthread_mutex_lock(&(c->c_mutex)); /* "Unimport" the socket from SSL, i.e. get rid of the upper layer of the * file descriptor stack, which represents SSL. @@ -347,7 +347,7 @@ start_tls_graceful_closure(Connection *c, Slapi_PBlock *pb, int is_initiator) bind_credentials_clear(c, PR_FALSE, PR_TRUE); - PR_ExitMonitor(c->c_mutex); + pthread_mutex_unlock(&(c->c_mutex)); return (SLAPI_PLUGIN_EXTENDED_SENT_RESULT); } diff --git a/ldap/servers/slapd/unbind.c b/ldap/servers/slapd/unbind.c index 686e27a8e..89f6ef932 100644 --- a/ldap/servers/slapd/unbind.c +++ b/ldap/servers/slapd/unbind.c @@ -75,9 +75,9 @@ do_unbind(Slapi_PBlock *pb) } /* target spec is used to decide which plugins are applicable for the operation */ - PR_EnterMonitor(pb_conn->c_mutex); + pthread_mutex_lock(&(pb_conn->c_mutex)); operation_set_target_spec_str(operation, pb_conn->c_dn); - PR_ExitMonitor(pb_conn->c_mutex); + pthread_mutex_unlock(&(pb_conn->c_mutex)); /* ONREPL - plugins should be called and passed bind dn and, possibly, other data */
0
9851f0ab1c9fd40acdf213c0f04a6087051ca130
389ds/389-ds-base
Issue 3555 - UI - Fix audit issue with npm - stylelint (#5836) Description: Update stylelint versions. Run npm audit fix to address the vulnerability in stylelint. Relates: https://github.com/389ds/389-ds-base/issues/3555 Reviewed by: @mreynolds389 (Thanks!)
commit 9851f0ab1c9fd40acdf213c0f04a6087051ca130 Author: Simon Pichugin <[email protected]> Date: Wed Jul 12 15:28:25 2023 -0700 Issue 3555 - UI - Fix audit issue with npm - stylelint (#5836) Description: Update stylelint versions. Run npm audit fix to address the vulnerability in stylelint. Relates: https://github.com/389ds/389-ds-base/issues/3555 Reviewed by: @mreynolds389 (Thanks!) diff --git a/src/cockpit/389-console/package-lock.json b/src/cockpit/389-console/package-lock.json index fd721bf5d..6362b2724 100644 --- a/src/cockpit/389-console/package-lock.json +++ b/src/cockpit/389-console/package-lock.json @@ -47,12 +47,21 @@ "qunit": "^2.9.3", "sass": "^1.61.0", "sizzle": "^2.3.3", - "stylelint": "^14.9.1", - "stylelint-config-standard": "^25.0.0", - "stylelint-config-standard-scss": "^5.0.0", + "stylelint": "^15.10.1", + "stylelint-config-standard": "^33.0.0", + "stylelint-config-standard-scss": "^10.0.0", "stylelint-formatter-pretty": "^3.2.0" } }, + "node_modules/@aashutoshrathi/word-wrap": { + "version": "1.2.6", + "resolved": "https://registry.npmjs.org/@aashutoshrathi/word-wrap/-/word-wrap-1.2.6.tgz", + "integrity": "sha512-1Yjs2SvM8TflER/OD3cOjhWWOZb58A2t7wpE2S9XfBYTiIl+XFhQG2bjy4Pu1I+EAlCNUzRDYDdFwFYUKvXcIA==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, "node_modules/@ampproject/remapping": { "version": "2.2.1", "resolved": "https://registry.npmjs.org/@ampproject/remapping/-/remapping-2.2.1.tgz", @@ -450,10 +459,32 @@ "node": ">=6.9.0" } }, - "node_modules/@csstools/selector-specificity": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/@csstools/selector-specificity/-/selector-specificity-2.2.0.tgz", - "integrity": "sha512-+OJ9konv95ClSTOJCmMZqpd5+YGsB2S+x6w3E1oaM8UuR5j8nTNHYSz8c9BEPGDOCMQYIEEGlVPj/VY64iTbGw==", + "node_modules/@csstools/css-parser-algorithms": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/@csstools/css-parser-algorithms/-/css-parser-algorithms-2.3.0.tgz", + "integrity": "sha512-dTKSIHHWc0zPvcS5cqGP+/TPFUJB0ekJ9dGKvMAFoNuBFhDPBt9OMGNZiIA5vTiNdGHHBeScYPXIGBMnVOahsA==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "engines": { + "node": "^14 || ^16 || >=18" + }, + "peerDependencies": { + "@csstools/css-tokenizer": "^2.1.1" + } + }, + "node_modules/@csstools/css-tokenizer": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/@csstools/css-tokenizer/-/css-tokenizer-2.1.1.tgz", + "integrity": "sha512-GbrTj2Z8MCTUv+52GE0RbFGM527xuXZ0Xa5g0Z+YN573uveS4G0qi6WNOMyz3yrFM/jaILTTwJ0+umx81EzqfA==", "dev": true, "engines": { "node": "^14 || ^16 || >=18" @@ -461,9 +492,51 @@ "funding": { "type": "opencollective", "url": "https://opencollective.com/csstools" + } + }, + "node_modules/@csstools/media-query-list-parser": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/@csstools/media-query-list-parser/-/media-query-list-parser-2.1.2.tgz", + "integrity": "sha512-M8cFGGwl866o6++vIY7j1AKuq9v57cf+dGepScwCcbut9ypJNr4Cj+LLTWligYUZ0uyhEoJDKt5lvyBfh2L3ZQ==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "engines": { + "node": "^14 || ^16 || >=18" + }, + "peerDependencies": { + "@csstools/css-parser-algorithms": "^2.3.0", + "@csstools/css-tokenizer": "^2.1.1" + } + }, + "node_modules/@csstools/selector-specificity": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/@csstools/selector-specificity/-/selector-specificity-3.0.0.tgz", + "integrity": "sha512-hBI9tfBtuPIi885ZsZ32IMEU/5nlZH/KOVYJCOh7gyMxaVLGmLedYqFN6Ui1LXkI8JlC8IsuC0rF0btcRZKd5g==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "engines": { + "node": "^14 || ^16 || >=18" }, "peerDependencies": { - "postcss-selector-parser": "^6.0.10" + "postcss-selector-parser": "^6.0.13" } }, "node_modules/@esbuild/android-arm": { @@ -1237,12 +1310,6 @@ "integrity": "sha512-Gj7cI7z+98M282Tqmp2K5EIsoouUEzbBJhQQzDE3jSIRk6r9gsz0oUokqIUR4u1R3dMHo0pDHM7sNOHyhulypw==", "dev": true }, - "node_modules/@types/parse-json": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/@types/parse-json/-/parse-json-4.0.0.tgz", - "integrity": "sha512-//oorEZjL6sbPcKUaCdIGlIUeH26mgzimjBB77G6XRgnDl/L5wOnpyBGRe/Mmf5CVW3PwEBE1NjiMZ/ssFh4wA==", - "dev": true - }, "node_modules/acorn": { "version": "8.8.2", "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.8.2.tgz", @@ -1436,6 +1503,15 @@ "get-intrinsic": "^1.1.3" } }, + "node_modules/arrify": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/arrify/-/arrify-1.0.1.tgz", + "integrity": "sha512-3CYzex9M9FGQjCGMGyi6/31c8GJbgb0qGyrx5HWxPd0aCwh4cB2YjMb2Xf9UuoogrMrlO9cTqnB5rI5GHZTcUA==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, "node_modules/astral-regex": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/astral-regex/-/astral-regex-2.0.0.tgz", @@ -1624,9 +1700,9 @@ } }, "node_modules/builtins/node_modules/semver": { - "version": "7.5.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.1.tgz", - "integrity": "sha512-Wvss5ivl8TMRZXXESstBA4uR5iXgEN/VC5/sOcuXdVLzcdkz4HWetIoRfG5gb5X+ij/G9rw9YoGn3QoQ8OCSpw==", + "version": "7.5.4", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz", + "integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==", "dev": true, "peer": true, "dependencies": { @@ -1669,26 +1745,30 @@ } }, "node_modules/camelcase": { - "version": "5.3.1", - "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz", - "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==", + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-6.3.0.tgz", + "integrity": "sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==", "dev": true, "engines": { - "node": ">=6" + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" } }, "node_modules/camelcase-keys": { - "version": "6.2.2", - "resolved": "https://registry.npmjs.org/camelcase-keys/-/camelcase-keys-6.2.2.tgz", - "integrity": "sha512-YrwaA0vEKazPBkn0ipTiMpSajYDSe+KjQfrjhcBMxJt/znbvlHd8Pw/Vamaz5EB4Wfhs3SUR3Z9mwRu/P3s3Yg==", + "version": "7.0.2", + "resolved": "https://registry.npmjs.org/camelcase-keys/-/camelcase-keys-7.0.2.tgz", + "integrity": "sha512-Rjs1H+A9R+Ig+4E/9oyB66UC5Mj9Xq3N//vcLf2WzgdTi/3gUu3Z9KoqmlrEG4VuuLK8wJHofxzdQXz/knhiYg==", "dev": true, "dependencies": { - "camelcase": "^5.3.1", - "map-obj": "^4.0.0", - "quick-lru": "^4.0.1" + "camelcase": "^6.3.0", + "map-obj": "^4.1.0", + "quick-lru": "^5.1.1", + "type-fest": "^1.2.1" }, "engines": { - "node": ">=8" + "node": ">=12" }, "funding": { "url": "https://github.com/sponsors/sindresorhus" @@ -1830,19 +1910,21 @@ "hasInstallScript": true }, "node_modules/cosmiconfig": { - "version": "7.1.0", - "resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-7.1.0.tgz", - "integrity": "sha512-AdmX6xUzdNASswsFtmwSt7Vj8po9IuqXm0UXz7QKPuEUmPB4XyjGfaAr2PSuELMwkRMVH1EpIkX5bTZGRB3eCA==", + "version": "8.2.0", + "resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-8.2.0.tgz", + "integrity": "sha512-3rTMnFJA1tCOPwRxtgF4wd7Ab2qvDbL8jX+3smjIbS4HlZBagTlpERbdN7iAbWlrfxE3M8c27kTwTawQ7st+OQ==", "dev": true, "dependencies": { - "@types/parse-json": "^4.0.0", "import-fresh": "^3.2.1", + "js-yaml": "^4.1.0", "parse-json": "^5.0.0", - "path-type": "^4.0.0", - "yaml": "^1.10.0" + "path-type": "^4.0.0" }, "engines": { - "node": ">=10" + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/d-fischer" } }, "node_modules/cross-spawn": { @@ -1867,6 +1949,19 @@ "node": ">=12.22" } }, + "node_modules/css-tree": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/css-tree/-/css-tree-2.3.1.tgz", + "integrity": "sha512-6Fv1DV/TYw//QF5IzQdqsNDjx/wc8TrMBZsqjL9eW01tWb7R7k/mq+/VXfJCl7SoD5emsJop9cOByJZfs8hYIw==", + "dev": true, + "dependencies": { + "mdn-data": "2.0.30", + "source-map-js": "^1.0.1" + }, + "engines": { + "node": "^10 || ^12.20.0 || ^14.13.0 || >=15.0.0" + } + }, "node_modules/cssesc": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/cssesc/-/cssesc-3.0.0.tgz", @@ -2007,12 +2102,15 @@ } }, "node_modules/decamelize": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/decamelize/-/decamelize-1.2.0.tgz", - "integrity": "sha512-z2S+W9X73hAUUki+N+9Za2lBlun89zigOyGrsax+KUQ6wKW4ZoWpEYBkGhQjwAjjDCkWxhY0VKEhk8wzY7F5cA==", + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/decamelize/-/decamelize-5.0.1.tgz", + "integrity": "sha512-VfxadyCECXgQlkoEAjeghAr5gY3Hf+IKjKb+X8tGVDtveCjN+USwprd2q3QXBR9T1+x2DG0XZF5/w+7HAtSaXA==", "dev": true, "engines": { - "node": ">=0.10.0" + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" } }, "node_modules/decamelize-keys": { @@ -2031,6 +2129,15 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/decamelize-keys/node_modules/decamelize": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/decamelize/-/decamelize-1.2.0.tgz", + "integrity": "sha512-z2S+W9X73hAUUki+N+9Za2lBlun89zigOyGrsax+KUQ6wKW4ZoWpEYBkGhQjwAjjDCkWxhY0VKEhk8wzY7F5cA==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, "node_modules/decamelize-keys/node_modules/map-obj": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/map-obj/-/map-obj-1.0.1.tgz", @@ -2722,9 +2829,9 @@ } }, "node_modules/eslint-plugin-n/node_modules/semver": { - "version": "7.5.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.1.tgz", - "integrity": "sha512-Wvss5ivl8TMRZXXESstBA4uR5iXgEN/VC5/sOcuXdVLzcdkz4HWetIoRfG5gb5X+ij/G9rw9YoGn3QoQ8OCSpw==", + "version": "7.5.4", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz", + "integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==", "dev": true, "peer": true, "dependencies": { @@ -3099,9 +3206,9 @@ "dev": true }, "node_modules/fast-glob": { - "version": "3.2.12", - "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.2.12.tgz", - "integrity": "sha512-DVj4CQIYYow0BlaelwK1pHl5n5cRSJfM60UA0zK891sVInoPri2Ekj7+e1CT3/3qxXenpI+nBBmQAcJPJgaj4w==", + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.0.tgz", + "integrity": "sha512-ChDuvbOypPuNjO8yIDf36x7BlZX1smcUMTTcyoIjycexOxd6DFsKsg21qVBzEmr3G7fUKIRy2/psii+CIUt7FA==", "dev": true, "dependencies": { "@nodelib/fs.stat": "^2.0.2", @@ -3752,12 +3859,15 @@ } }, "node_modules/indent-string": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-4.0.0.tgz", - "integrity": "sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg==", + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-5.0.0.tgz", + "integrity": "sha512-m6FAo/spmsW2Ab2fU35JTYwtOKa2yAwXSwgjSv1TJzh4Mh7mC3lzAOVLBprb72XsTrgkEIsl7YrFNAiDiRhIGg==", "dev": true, "engines": { - "node": ">=8" + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" } }, "node_modules/inflight": { @@ -4245,9 +4355,9 @@ } }, "node_modules/known-css-properties": { - "version": "0.26.0", - "resolved": "https://registry.npmjs.org/known-css-properties/-/known-css-properties-0.26.0.tgz", - "integrity": "sha512-5FZRzrZzNTBruuurWpvZnvP9pum+fe0HcK8z/ooo+U+Hmp4vtbyp1/QDsqmufirXy4egGzbaH/y2uCZf+6W5Kg==", + "version": "0.27.0", + "resolved": "https://registry.npmjs.org/known-css-properties/-/known-css-properties-0.27.0.tgz", + "integrity": "sha512-uMCj6+hZYDoffuvAJjFAPz56E9uoowFHmTkqRtRq5WyC5Q6Cu/fTZKNQpX/RbzChBYLLl3lo8CjFZBAZXq9qFg==", "dev": true }, "node_modules/levn": { @@ -4444,27 +4554,33 @@ "url": "https://github.com/sponsors/wooorm" } }, + "node_modules/mdn-data": { + "version": "2.0.30", + "resolved": "https://registry.npmjs.org/mdn-data/-/mdn-data-2.0.30.tgz", + "integrity": "sha512-GaqWWShW4kv/G9IEucWScBx9G1/vsFZZJUO+tD26M8J8z3Kw5RDQjaoZe03YAClgeS/SWPOcb4nkFBTEi5DUEA==", + "dev": true + }, "node_modules/meow": { - "version": "9.0.0", - "resolved": "https://registry.npmjs.org/meow/-/meow-9.0.0.tgz", - "integrity": "sha512-+obSblOQmRhcyBt62furQqRAQpNyWXo8BuQ5bN7dG8wmwQ+vwHKp/rCFD4CrTP8CsDQD1sjoZ94K417XEUk8IQ==", + "version": "10.1.5", + "resolved": "https://registry.npmjs.org/meow/-/meow-10.1.5.tgz", + "integrity": "sha512-/d+PQ4GKmGvM9Bee/DPa8z3mXs/pkvJE2KEThngVNOqtmljC6K7NMPxtc2JeZYTmpWb9k/TmxjeL18ez3h7vCw==", "dev": true, "dependencies": { - "@types/minimist": "^1.2.0", - "camelcase-keys": "^6.2.2", - "decamelize": "^1.2.0", + "@types/minimist": "^1.2.2", + "camelcase-keys": "^7.0.0", + "decamelize": "^5.0.0", "decamelize-keys": "^1.1.0", "hard-rejection": "^2.1.0", "minimist-options": "4.1.0", - "normalize-package-data": "^3.0.0", - "read-pkg-up": "^7.0.1", - "redent": "^3.0.0", - "trim-newlines": "^3.0.0", - "type-fest": "^0.18.0", - "yargs-parser": "^20.2.3" + "normalize-package-data": "^3.0.2", + "read-pkg-up": "^8.0.0", + "redent": "^4.0.0", + "trim-newlines": "^4.0.2", + "type-fest": "^1.2.2", + "yargs-parser": "^20.2.9" }, "engines": { - "node": ">=10" + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" }, "funding": { "url": "https://github.com/sponsors/sindresorhus" @@ -4536,15 +4652,6 @@ "node": ">= 6" } }, - "node_modules/minimist-options/node_modules/arrify": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/arrify/-/arrify-1.0.1.tgz", - "integrity": "sha512-3CYzex9M9FGQjCGMGyi6/31c8GJbgb0qGyrx5HWxPd0aCwh4cB2YjMb2Xf9UuoogrMrlO9cTqnB5rI5GHZTcUA==", - "dev": true, - "engines": { - "node": ">=0.10.0" - } - }, "node_modules/ms": { "version": "2.1.2", "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", @@ -4619,9 +4726,9 @@ } }, "node_modules/normalize-package-data/node_modules/semver": { - "version": "7.5.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.1.tgz", - "integrity": "sha512-Wvss5ivl8TMRZXXESstBA4uR5iXgEN/VC5/sOcuXdVLzcdkz4HWetIoRfG5gb5X+ij/G9rw9YoGn3QoQ8OCSpw==", + "version": "7.5.4", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz", + "integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==", "dev": true, "dependencies": { "lru-cache": "^6.0.0" @@ -4763,17 +4870,17 @@ } }, "node_modules/optionator": { - "version": "0.9.1", - "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.9.1.tgz", - "integrity": "sha512-74RlY5FCnhq4jRxVUPKDaRwrVNXMqsGsiW6AJw4XK8hmtm10wC0ypZBLw5IIp85NZMr91+qd1RvvENwg7jjRFw==", + "version": "0.9.3", + "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.9.3.tgz", + "integrity": "sha512-JjCoypp+jKn1ttEFExxhetCKeJt9zhAgAve5FXHixTvFDW/5aEktX9bufBKLRRMdU7bNtpLfcGu94B3cdEJgjg==", "dev": true, "dependencies": { + "@aashutoshrathi/word-wrap": "^1.2.3", "deep-is": "^0.1.3", "fast-levenshtein": "^2.0.6", "levn": "^0.4.1", "prelude-ls": "^1.2.1", - "type-check": "^0.4.0", - "word-wrap": "^1.2.3" + "type-check": "^0.4.0" }, "engines": { "node": ">= 0.8.0" @@ -4809,15 +4916,6 @@ "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/p-try": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", - "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==", - "dev": true, - "engines": { - "node": ">=6" - } - }, "node_modules/parent-module": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz", @@ -5129,12 +5227,15 @@ ] }, "node_modules/quick-lru": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/quick-lru/-/quick-lru-4.0.1.tgz", - "integrity": "sha512-ARhCpm70fzdcvNQfPoy49IaanKkTlRWF2JMzqhcJbhSFRZv7nPTvZJdcY7301IPmvW+/p0RgIWnQDLJxifsQ7g==", + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/quick-lru/-/quick-lru-5.1.1.tgz", + "integrity": "sha512-WuyALRjWPDGtt/wzJiadO5AXY+8hZ80hVpe6MyivgraREW751X3SbhRvG3eLKOYN+8VEvqLcf3wdnt44Z4S4SA==", "dev": true, "engines": { - "node": ">=8" + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" } }, "node_modules/qunit": { @@ -5216,134 +5317,40 @@ "integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==" }, "node_modules/read-pkg": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/read-pkg/-/read-pkg-5.2.0.tgz", - "integrity": "sha512-Ug69mNOpfvKDAc2Q8DRpMjjzdtrnv9HcSMX+4VsZxD1aZ6ZzrIE7rlzXBtWTyhULSMKg076AW6WR5iZpD0JiOg==", + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/read-pkg/-/read-pkg-6.0.0.tgz", + "integrity": "sha512-X1Fu3dPuk/8ZLsMhEj5f4wFAF0DWoK7qhGJvgaijocXxBmSToKfbFtqbxMO7bVjNA1dmE5huAzjXj/ey86iw9Q==", "dev": true, "dependencies": { "@types/normalize-package-data": "^2.4.0", - "normalize-package-data": "^2.5.0", - "parse-json": "^5.0.0", - "type-fest": "^0.6.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/read-pkg-up": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/read-pkg-up/-/read-pkg-up-7.0.1.tgz", - "integrity": "sha512-zK0TB7Xd6JpCLmlLmufqykGE+/TlOePD6qKClNW7hHDKFh/J7/7gCWGR7joEQEW1bKq3a3yUZSObOoWLFQ4ohg==", - "dev": true, - "dependencies": { - "find-up": "^4.1.0", - "read-pkg": "^5.2.0", - "type-fest": "^0.8.1" + "normalize-package-data": "^3.0.2", + "parse-json": "^5.2.0", + "type-fest": "^1.0.1" }, "engines": { - "node": ">=8" + "node": ">=12" }, "funding": { "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/read-pkg-up/node_modules/find-up": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz", - "integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==", - "dev": true, - "dependencies": { - "locate-path": "^5.0.0", - "path-exists": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/read-pkg-up/node_modules/locate-path": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz", - "integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==", - "dev": true, - "dependencies": { - "p-locate": "^4.1.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/read-pkg-up/node_modules/p-limit": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", - "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", + "node_modules/read-pkg-up": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/read-pkg-up/-/read-pkg-up-8.0.0.tgz", + "integrity": "sha512-snVCqPczksT0HS2EC+SxUndvSzn6LRCwpfSvLrIfR5BKDQQZMaI6jPRC9dYvYFDRAuFEAnkwww8kBBNE/3VvzQ==", "dev": true, "dependencies": { - "p-try": "^2.0.0" + "find-up": "^5.0.0", + "read-pkg": "^6.0.0", + "type-fest": "^1.0.1" }, "engines": { - "node": ">=6" + "node": ">=12" }, "funding": { "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/read-pkg-up/node_modules/p-locate": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-4.1.0.tgz", - "integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==", - "dev": true, - "dependencies": { - "p-limit": "^2.2.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/read-pkg-up/node_modules/type-fest": { - "version": "0.8.1", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.8.1.tgz", - "integrity": "sha512-4dbzIzqvjtgiM5rw1k5rEHtBANKmdudhGyBEajN01fEyhaAIhsoKNy6y7+IN93IfpFtwY9iqi7kD+xwKhQsNJA==", - "dev": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/read-pkg/node_modules/hosted-git-info": { - "version": "2.8.9", - "resolved": "https://registry.npmjs.org/hosted-git-info/-/hosted-git-info-2.8.9.tgz", - "integrity": "sha512-mxIDAb9Lsm6DoOJ7xH+5+X4y1LU/4Hi50L9C5sIswK3JzULS4bwk1FvjdBgvYR4bzT4tuUQiC15FE2f5HbLvYw==", - "dev": true - }, - "node_modules/read-pkg/node_modules/normalize-package-data": { - "version": "2.5.0", - "resolved": "https://registry.npmjs.org/normalize-package-data/-/normalize-package-data-2.5.0.tgz", - "integrity": "sha512-/5CMN3T0R4XTj4DcGaexo+roZSdSFW/0AOOTROrjxzCG1wrWXEsGbRKevjlIL+ZDE4sZlJr5ED4YW0yqmkK+eA==", - "dev": true, - "dependencies": { - "hosted-git-info": "^2.1.4", - "resolve": "^1.10.0", - "semver": "2 || 3 || 4 || 5", - "validate-npm-package-license": "^3.0.1" - } - }, - "node_modules/read-pkg/node_modules/semver": { - "version": "5.7.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", - "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==", - "dev": true, - "bin": { - "semver": "bin/semver" - } - }, - "node_modules/read-pkg/node_modules/type-fest": { - "version": "0.6.0", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.6.0.tgz", - "integrity": "sha512-q+MB8nYR1KDLrgr4G5yemftpMC7/QLqVndBmEEdqzmNj5dcFOO4Oo8qlwZE3ULT3+Zim1F8Kq4cBnikNhlCMlg==", - "dev": true, - "engines": { - "node": ">=8" - } - }, "node_modules/readdirp": { "version": "3.6.0", "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz", @@ -5365,16 +5372,19 @@ } }, "node_modules/redent": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/redent/-/redent-3.0.0.tgz", - "integrity": "sha512-6tDA8g98We0zd0GvVeMT9arEOnTw9qM03L9cJXaCjrip1OO764RDBLBfrB4cwzNGDj5OA5ioymC9GkizgWJDUg==", + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/redent/-/redent-4.0.0.tgz", + "integrity": "sha512-tYkDkVVtYkSVhuQ4zBgfvciymHaeuel+zFKXShfDnFP5SyVEP7qo70Rf1jTOTCx3vGNAbnEi/xFkcfQVMIBWag==", "dev": true, "dependencies": { - "indent-string": "^4.0.0", - "strip-indent": "^3.0.0" + "indent-string": "^5.0.0", + "strip-indent": "^4.0.0" }, "engines": { - "node": ">=8" + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" } }, "node_modules/regexp.prototype.flags": { @@ -5564,9 +5574,9 @@ } }, "node_modules/semver": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz", - "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==", + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", "dev": true, "bin": { "semver": "bin/semver.js" @@ -5606,10 +5616,16 @@ } }, "node_modules/signal-exit": { - "version": "3.0.7", - "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz", - "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==", - "dev": true + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-4.0.2.tgz", + "integrity": "sha512-MY2/qGx4enyjprQnFaZsHib3Yadh3IXyV2C321GY0pjGfVBu4un0uDJkwgdxqO+Rdx8JMT8IfJIRwbYVz3Ob3Q==", + "dev": true, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } }, "node_modules/sizzle": { "version": "2.3.10", @@ -5848,15 +5864,18 @@ } }, "node_modules/strip-indent": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/strip-indent/-/strip-indent-3.0.0.tgz", - "integrity": "sha512-laJTa3Jb+VQpaC6DseHhF7dXVqHTfJPCRDaEbid/drOhgitgYku/letMUqOXFoWV0zIIUbjpdH2t+tYj4bQMRQ==", + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/strip-indent/-/strip-indent-4.0.0.tgz", + "integrity": "sha512-mnVSV2l+Zv6BLpSD/8V87CW/y9EmmbYzGCIavsnsI6/nwn26DwffM/yztm30Z/I2DY9wdS3vXVCMnHDgZaVNoA==", "dev": true, "dependencies": { - "min-indent": "^1.0.0" + "min-indent": "^1.0.1" }, "engines": { - "node": ">=8" + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" } }, "node_modules/strip-json-comments": { @@ -5878,55 +5897,57 @@ "dev": true }, "node_modules/stylelint": { - "version": "14.16.1", - "resolved": "https://registry.npmjs.org/stylelint/-/stylelint-14.16.1.tgz", - "integrity": "sha512-ErlzR/T3hhbV+a925/gbfc3f3Fep9/bnspMiJPorfGEmcBbXdS+oo6LrVtoUZ/w9fqD6o6k7PtUlCOsCRdjX/A==", + "version": "15.10.1", + "resolved": "https://registry.npmjs.org/stylelint/-/stylelint-15.10.1.tgz", + "integrity": "sha512-CYkzYrCFfA/gnOR+u9kJ1PpzwG10WLVnoxHDuBA/JiwGqdM9+yx9+ou6SE/y9YHtfv1mcLo06fdadHTOx4gBZQ==", "dev": true, "dependencies": { - "@csstools/selector-specificity": "^2.0.2", + "@csstools/css-parser-algorithms": "^2.3.0", + "@csstools/css-tokenizer": "^2.1.1", + "@csstools/media-query-list-parser": "^2.1.2", + "@csstools/selector-specificity": "^3.0.0", "balanced-match": "^2.0.0", "colord": "^2.9.3", - "cosmiconfig": "^7.1.0", + "cosmiconfig": "^8.2.0", "css-functions-list": "^3.1.0", + "css-tree": "^2.3.1", "debug": "^4.3.4", - "fast-glob": "^3.2.12", + "fast-glob": "^3.3.0", "fastest-levenshtein": "^1.0.16", "file-entry-cache": "^6.0.1", "global-modules": "^2.0.0", "globby": "^11.1.0", "globjoin": "^0.1.4", - "html-tags": "^3.2.0", - "ignore": "^5.2.1", + "html-tags": "^3.3.1", + "ignore": "^5.2.4", "import-lazy": "^4.0.0", "imurmurhash": "^0.1.4", "is-plain-object": "^5.0.0", - "known-css-properties": "^0.26.0", + "known-css-properties": "^0.27.0", "mathml-tag-names": "^2.1.3", - "meow": "^9.0.0", + "meow": "^10.1.5", "micromatch": "^4.0.5", "normalize-path": "^3.0.0", "picocolors": "^1.0.0", - "postcss": "^8.4.19", - "postcss-media-query-parser": "^0.2.3", + "postcss": "^8.4.24", "postcss-resolve-nested-selector": "^0.1.1", "postcss-safe-parser": "^6.0.0", - "postcss-selector-parser": "^6.0.11", + "postcss-selector-parser": "^6.0.13", "postcss-value-parser": "^4.2.0", "resolve-from": "^5.0.0", "string-width": "^4.2.3", "strip-ansi": "^6.0.1", "style-search": "^0.1.0", - "supports-hyperlinks": "^2.3.0", + "supports-hyperlinks": "^3.0.0", "svg-tags": "^1.0.0", "table": "^6.8.1", - "v8-compile-cache": "^2.3.0", - "write-file-atomic": "^4.0.2" + "write-file-atomic": "^5.0.1" }, "bin": { - "stylelint": "bin/stylelint.js" + "stylelint": "bin/stylelint.mjs" }, "engines": { - "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + "node": "^14.13.1 || >=16.0.0" }, "funding": { "type": "opencollective", @@ -5934,81 +5955,63 @@ } }, "node_modules/stylelint-config-recommended": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/stylelint-config-recommended/-/stylelint-config-recommended-7.0.0.tgz", - "integrity": "sha512-yGn84Bf/q41J4luis1AZ95gj0EQwRX8lWmGmBwkwBNSkpGSpl66XcPTulxGa/Z91aPoNGuIGBmFkcM1MejMo9Q==", + "version": "12.0.0", + "resolved": "https://registry.npmjs.org/stylelint-config-recommended/-/stylelint-config-recommended-12.0.0.tgz", + "integrity": "sha512-x6x8QNARrGO2sG6iURkzqL+Dp+4bJorPMMRNPScdvaUK8PsynriOcMW7AFDKqkWAS5wbue/u8fUT/4ynzcmqdQ==", "dev": true, "peerDependencies": { - "stylelint": "^14.4.0" + "stylelint": "^15.5.0" } }, "node_modules/stylelint-config-recommended-scss": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/stylelint-config-recommended-scss/-/stylelint-config-recommended-scss-7.0.0.tgz", - "integrity": "sha512-rGz1J4rMAyJkvoJW4hZasuQBB7y9KIrShb20l9DVEKKZSEi1HAy0vuNlR8HyCKy/jveb/BdaQFcoiYnmx4HoiA==", + "version": "12.0.0", + "resolved": "https://registry.npmjs.org/stylelint-config-recommended-scss/-/stylelint-config-recommended-scss-12.0.0.tgz", + "integrity": "sha512-5Bb2mlGy6WLa30oNeKpZvavv2lowJUsUJO25+OA68GFTemlwd1zbFsL7q0bReKipOSU3sG47hKneZ6Nd+ctrFA==", "dev": true, "dependencies": { - "postcss-scss": "^4.0.2", - "stylelint-config-recommended": "^8.0.0", - "stylelint-scss": "^4.0.0" + "postcss-scss": "^4.0.6", + "stylelint-config-recommended": "^12.0.0", + "stylelint-scss": "^5.0.0" }, "peerDependencies": { - "stylelint": "^14.4.0" - } - }, - "node_modules/stylelint-config-recommended-scss/node_modules/stylelint-config-recommended": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/stylelint-config-recommended/-/stylelint-config-recommended-8.0.0.tgz", - "integrity": "sha512-IK6dWvE000+xBv9jbnHOnBq01gt6HGVB2ZTsot+QsMpe82doDQ9hvplxfv4YnpEuUwVGGd9y6nbaAnhrjcxhZQ==", - "dev": true, - "peerDependencies": { - "stylelint": "^14.8.0" + "postcss": "^8.3.3", + "stylelint": "^15.5.0" + }, + "peerDependenciesMeta": { + "postcss": { + "optional": true + } } }, "node_modules/stylelint-config-standard": { - "version": "25.0.0", - "resolved": "https://registry.npmjs.org/stylelint-config-standard/-/stylelint-config-standard-25.0.0.tgz", - "integrity": "sha512-21HnP3VSpaT1wFjFvv9VjvOGDtAviv47uTp3uFmzcN+3Lt+RYRv6oAplLaV51Kf792JSxJ6svCJh/G18E9VnCA==", + "version": "33.0.0", + "resolved": "https://registry.npmjs.org/stylelint-config-standard/-/stylelint-config-standard-33.0.0.tgz", + "integrity": "sha512-eyxnLWoXImUn77+ODIuW9qXBDNM+ALN68L3wT1lN2oNspZ7D9NVGlNHb2QCUn4xDug6VZLsh0tF8NyoYzkgTzg==", "dev": true, "dependencies": { - "stylelint-config-recommended": "^7.0.0" + "stylelint-config-recommended": "^12.0.0" }, "peerDependencies": { - "stylelint": "^14.4.0" + "stylelint": "^15.5.0" } }, "node_modules/stylelint-config-standard-scss": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/stylelint-config-standard-scss/-/stylelint-config-standard-scss-5.0.0.tgz", - "integrity": "sha512-zoXLibojHZYPFjtkc4STZtAJ2yGTq3Bb4MYO0oiyO6f/vNxDKRcSDZYoqN260Gv2eD5niQIr1/kr5SXlFj9kcQ==", + "version": "10.0.0", + "resolved": "https://registry.npmjs.org/stylelint-config-standard-scss/-/stylelint-config-standard-scss-10.0.0.tgz", + "integrity": "sha512-bChBEo1p3xUVWh/wenJI+josoMk21f2yuLDGzGjmKYcALfl2u3DFltY+n4UHswYiXghqXaA8mRh+bFy/q1hQlg==", "dev": true, "dependencies": { - "stylelint-config-recommended-scss": "^7.0.0", - "stylelint-config-standard": "^26.0.0" + "stylelint-config-recommended-scss": "^12.0.0", + "stylelint-config-standard": "^33.0.0" }, "peerDependencies": { - "stylelint": "^14.9.0" - } - }, - "node_modules/stylelint-config-standard-scss/node_modules/stylelint-config-recommended": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/stylelint-config-recommended/-/stylelint-config-recommended-8.0.0.tgz", - "integrity": "sha512-IK6dWvE000+xBv9jbnHOnBq01gt6HGVB2ZTsot+QsMpe82doDQ9hvplxfv4YnpEuUwVGGd9y6nbaAnhrjcxhZQ==", - "dev": true, - "peerDependencies": { - "stylelint": "^14.8.0" - } - }, - "node_modules/stylelint-config-standard-scss/node_modules/stylelint-config-standard": { - "version": "26.0.0", - "resolved": "https://registry.npmjs.org/stylelint-config-standard/-/stylelint-config-standard-26.0.0.tgz", - "integrity": "sha512-hUuB7LaaqM8abvkOO84wh5oYSkpXgTzHu2Zza6e7mY+aOmpNTjoFBRxSLlzY0uAOMWEFx0OMKzr+reG1BUtcqQ==", - "dev": true, - "dependencies": { - "stylelint-config-recommended": "^8.0.0" + "postcss": "^8.3.3", + "stylelint": "^15.5.0" }, - "peerDependencies": { - "stylelint": "^14.9.0" + "peerDependenciesMeta": { + "postcss": { + "optional": true + } } }, "node_modules/stylelint-formatter-pretty": { @@ -6038,49 +6041,15 @@ "node": ">=14" } }, - "node_modules/stylelint-formatter-pretty/node_modules/has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", - "dev": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/stylelint-formatter-pretty/node_modules/supports-color": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", - "dev": true, - "dependencies": { - "has-flag": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/stylelint-formatter-pretty/node_modules/supports-hyperlinks": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/supports-hyperlinks/-/supports-hyperlinks-3.0.0.tgz", - "integrity": "sha512-QBDPHyPQDRTy9ku4URNGY5Lah8PAaXs6tAAwp55sL5WCsSW7GIfdf6W5ixfziW+t7wh3GVvHyHHyQ1ESsoRvaA==", - "dev": true, - "dependencies": { - "has-flag": "^4.0.0", - "supports-color": "^7.0.0" - }, - "engines": { - "node": ">=14.18" - } - }, "node_modules/stylelint-scss": { - "version": "4.7.0", - "resolved": "https://registry.npmjs.org/stylelint-scss/-/stylelint-scss-4.7.0.tgz", - "integrity": "sha512-TSUgIeS0H3jqDZnby1UO1Qv3poi1N8wUYIJY6D1tuUq2MN3lwp/rITVo0wD+1SWTmRm0tNmGO0b7nKInnqF6Hg==", + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/stylelint-scss/-/stylelint-scss-5.0.1.tgz", + "integrity": "sha512-n87iCRZrr2J7//I/QFsDXxFLnHKw633U4qvWZ+mOW6KDAp/HLj06H+6+f9zOuTYy+MdGdTuCSDROCpQIhw5fvQ==", "dev": true, "dependencies": { "postcss-media-query-parser": "^0.2.3", "postcss-resolve-nested-selector": "^0.1.1", - "postcss-selector-parser": "^6.0.11", + "postcss-selector-parser": "^6.0.13", "postcss-value-parser": "^4.2.0" }, "peerDependencies": { @@ -6115,16 +6084,16 @@ } }, "node_modules/supports-hyperlinks": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/supports-hyperlinks/-/supports-hyperlinks-2.3.0.tgz", - "integrity": "sha512-RpsAZlpWcDwOPQA22aCH4J0t7L8JmAvsCxfOSEwm7cQs3LshN36QaTkwd70DnBOXDWGssw2eUoc8CaRWT0XunA==", + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/supports-hyperlinks/-/supports-hyperlinks-3.0.0.tgz", + "integrity": "sha512-QBDPHyPQDRTy9ku4URNGY5Lah8PAaXs6tAAwp55sL5WCsSW7GIfdf6W5ixfziW+t7wh3GVvHyHHyQ1ESsoRvaA==", "dev": true, "dependencies": { "has-flag": "^4.0.0", "supports-color": "^7.0.0" }, "engines": { - "node": ">=8" + "node": ">=14.18" } }, "node_modules/supports-hyperlinks/node_modules/has-flag": { @@ -6261,12 +6230,15 @@ } }, "node_modules/trim-newlines": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/trim-newlines/-/trim-newlines-3.0.1.tgz", - "integrity": "sha512-c1PTsA3tYrIsLGkJkzHF+w9F2EyxfXGo4UyJc4pFL++FMjnq0HJS69T3M7d//gKrFKwy429bouPescbjecU+Zw==", + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/trim-newlines/-/trim-newlines-4.1.1.tgz", + "integrity": "sha512-jRKj0n0jXWo6kh62nA5TEh3+4igKDXLvzBJcPpiizP7oOolUrYIxmVBG9TOtHYFHoddUk6YvAkGeGoSVTXfQXQ==", "dev": true, "engines": { - "node": ">=8" + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" } }, "node_modules/tsconfig-paths": { @@ -6311,9 +6283,9 @@ } }, "node_modules/type-fest": { - "version": "0.18.1", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.18.1.tgz", - "integrity": "sha512-OIAYXk8+ISY+qTOwkHtKqzAuxchoMiD9Udx+FSGQDuiRR+PJKJHc2NJAXlbhkGwTt/4/nKZxELY1w3ReWOL8mw==", + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-1.4.0.tgz", + "integrity": "sha512-yGSza74xk0UG8k+pLh5oeoYirvIiWo5t0/o3zHHAO2tRDiZcxWP7fywNlXhqb6/r6sWvwi+RsyQMWhVLe4BVuA==", "dev": true, "engines": { "node": ">=10" @@ -6406,12 +6378,6 @@ "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==", "dev": true }, - "node_modules/v8-compile-cache": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/v8-compile-cache/-/v8-compile-cache-2.3.0.tgz", - "integrity": "sha512-l8lCEmLcLYZh4nbunNZvQCJc5pv7+RCwa8q/LdUx8u7lsWvPDKmpodJAJNwkAhJC//dFY48KuIEmjtd4RViDrA==", - "dev": true - }, "node_modules/validate-npm-package-license": { "version": "3.0.4", "resolved": "https://registry.npmjs.org/validate-npm-package-license/-/validate-npm-package-license-3.0.4.tgz", @@ -6784,15 +6750,6 @@ "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/word-wrap": { - "version": "1.2.3", - "resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.3.tgz", - "integrity": "sha512-Hz/mrNwitNRh/HUAtM/VT/5VH+ygD6DV7mYKZAtHOrbs8U7lvPS6xf7EJKMF0uW1KJCl0H701g3ZGus+muE5vQ==", - "dev": true, - "engines": { - "node": ">=0.10.0" - } - }, "node_modules/wrap-ansi": { "version": "7.0.0", "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", @@ -6846,16 +6803,16 @@ "dev": true }, "node_modules/write-file-atomic": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/write-file-atomic/-/write-file-atomic-4.0.2.tgz", - "integrity": "sha512-7KxauUdBmSdWnmpaGFg+ppNjKF8uNLry8LyzjauQDOVONfFLNKrKvQOxZ/VuTIcS/gge/YNahf5RIIQWTSarlg==", + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/write-file-atomic/-/write-file-atomic-5.0.1.tgz", + "integrity": "sha512-+QU2zd6OTD8XWIJCbffaiQeH9U73qIqafo1x6V1snCWYGJf6cVE0cDR4D8xRzcEnfI21IFrUPzPGtcPf8AC+Rw==", "dev": true, "dependencies": { "imurmurhash": "^0.1.4", - "signal-exit": "^3.0.7" + "signal-exit": "^4.0.1" }, "engines": { - "node": "^12.13.0 || ^14.15.0 || >=16.0.0" + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" } }, "node_modules/ws": { @@ -6894,15 +6851,6 @@ "dev": true, "peer": true }, - "node_modules/yaml": { - "version": "1.10.2", - "resolved": "https://registry.npmjs.org/yaml/-/yaml-1.10.2.tgz", - "integrity": "sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg==", - "dev": true, - "engines": { - "node": ">= 6" - } - }, "node_modules/yargs": { "version": "17.7.2", "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.7.2.tgz", diff --git a/src/cockpit/389-console/package.json b/src/cockpit/389-console/package.json index 0c9beb618..281c33884 100644 --- a/src/cockpit/389-console/package.json +++ b/src/cockpit/389-console/package.json @@ -42,9 +42,9 @@ "qunit": "^2.9.3", "sass": "^1.61.0", "sizzle": "^2.3.3", - "stylelint": "^14.9.1", - "stylelint-config-standard": "^25.0.0", - "stylelint-config-standard-scss": "^5.0.0", + "stylelint": "^15.10.1", + "stylelint-config-standard": "^33.0.0", + "stylelint-config-standard-scss": "^10.0.0", "stylelint-formatter-pretty": "^3.2.0" }, "dependencies": {
0
07f9ed2c91366c49b5641c5e4903ebc8312c8ccf
389ds/389-ds-base
Bug 606545 - core schema should include numSubordinates https://bugzilla.redhat.com/show_bug.cgi?id=606545 Resolves: bug 606545 Bug Description: core schema should include numSubordinates Fix Description: The numSubordinates attribute type definition has been moved from 02common.ldif into 00core.ldif.
commit 07f9ed2c91366c49b5641c5e4903ebc8312c8ccf Author: Endi S. Dewata <[email protected]> Date: Mon Jun 21 12:09:04 2010 -0500 Bug 606545 - core schema should include numSubordinates https://bugzilla.redhat.com/show_bug.cgi?id=606545 Resolves: bug 606545 Bug Description: core schema should include numSubordinates Fix Description: The numSubordinates attribute type definition has been moved from 02common.ldif into 00core.ldif. diff --git a/ldap/schema/00core.ldif b/ldap/schema/00core.ldif index 7f5dab69c..87f14634d 100644 --- a/ldap/schema/00core.ldif +++ b/ldap/schema/00core.ldif @@ -808,3 +808,15 @@ objectClasses: ( 2.16.840.1.113719.2.142.6.1.1 NAME 'ldapSubEntry' # ################################################################################ # +attributeTypes: ( 1.3.1.1.4.1.453.16.2.103 NAME 'numSubordinates' + DESC 'count of immediate subordinates' + EQUALITY integerMatch + ORDERING integerOrderingMatch + SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 + SINGLE-VALUE + NO-USER-MODIFICATION + USAGE directoryOperation + X-ORIGIN 'numSubordinates Internet Draft' ) +# +################################################################################ +# diff --git a/ldap/schema/02common.ldif b/ldap/schema/02common.ldif index d039be783..3b0ef1947 100644 --- a/ldap/schema/02common.ldif +++ b/ldap/schema/02common.ldif @@ -117,7 +117,6 @@ attributeTypes: ( 2.16.840.1.113730.3.1.201 NAME 'changeLogMaximumSize' DESC 'Ne attributeTypes: ( 2.16.840.1.113730.3.1.205 NAME 'changeLogMaximumConcurrentWrites' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 X-ORIGIN 'Netscape Directory Server' ) attributeTypes: ( 1.3.6.1.4.1.250.1.60 NAME ( 'ttl' 'timeToLive' ) DESC 'time to live in seconds for cached objects' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 X-ORIGIN 'LDAP Caching Internet Draft' ) attributeTypes: ( 2.16.840.1.113730.3.1.612 NAME 'generation' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 X-ORIGIN 'Netscape Directory Server' ) -attributeTypes: ( 1.3.1.1.4.1.453.16.2.103 NAME 'numSubordinates' DESC 'count of immediate subordinates' EQUALITY integerMatch ORDERING integerOrderingMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE NO-USER-MODIFICATION USAGE directoryOperation X-ORIGIN 'numSubordinates Internet Draft' ) attributeTypes: ( 2.5.18.9 NAME 'hasSubordinates' DESC 'if TRUE, subordinate entries may exist' EQUALITY booleanMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE NO-USER-MODIFICATION USAGE directoryOperation X-ORIGIN 'numSubordinates Internet Draft' ) attributeTypes: ( 2.16.840.1.113730.3.1.569 NAME 'cosPriority' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-ORIGIN 'Netscape Directory Server' ) attributeTypes: ( 2.16.840.1.113730.3.1.570 NAME 'nsLookThroughLimit' DESC 'Binder-based search operation look through limit (candidate entries)' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE USAGE directoryOperation X-ORIGIN 'Netscape Directory Server' )
0
2335995140903582672d9191ef014f9a6fc00702
389ds/389-ds-base
Ticket #191 - Implement SO_KEEPALIVE in network calls Bug Description: Need to set the KeepAlive socket option on DS connections Fix Description: Set PR_SockOpt_Keepalive on DS connections Reviewed by: richm (Thanks Rich!) https://fedorahosted.org/389/ticket/191 `
commit 2335995140903582672d9191ef014f9a6fc00702 Author: Mark Reynolds <[email protected]> Date: Tue Mar 6 11:28:59 2012 -0500 Ticket #191 - Implement SO_KEEPALIVE in network calls Bug Description: Need to set the KeepAlive socket option on DS connections Fix Description: Set PR_SockOpt_Keepalive on DS connections Reviewed by: richm (Thanks Rich!) https://fedorahosted.org/389/ticket/191 ` diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c index 922802c7a..85daa2825 100644 --- a/ldap/servers/slapd/daemon.c +++ b/ldap/servers/slapd/daemon.c @@ -3033,6 +3033,17 @@ int configure_pr_socket( PRFileDesc **pr_socket, int secure, int local ) } #endif /* !_WIN32 */ + /* Set keep_alive to keep old connections from lingering */ + pr_socketoption.option = PR_SockOpt_Keepalive; + pr_socketoption.value.keep_alive = 1; + if ( PR_SetSocketOption( *pr_socket, &pr_socketoption ) == PR_FAILURE ) { + PRErrorCode prerr = PR_GetError(); + LDAPDebug( LDAP_DEBUG_ANY, + "PR_SetSocketOption(PR_SockOpt_Keepalive failed, " + SLAPI_COMPONENT_NAME_NSPR " error %d (%s)\n", + prerr, slapd_pr_strerror(prerr), 0 ); + } + if ( secure ) { pr_socketoption.option = PR_SockOpt_Nonblocking;
0
56ad9a1e7277752f70a01184e8a1fbeb0c239b8f
389ds/389-ds-base
Revert "Ticket #48755 - moving an entry could make the online init fail" This reverts commit 3606b78bacce984ab2226755c5921dffac9552c2. (cherry picked from commit 65b53fddd6acc01c21c3bc777b341b815ab66185)
commit 56ad9a1e7277752f70a01184e8a1fbeb0c239b8f Author: Noriko Hosoi <[email protected]> Date: Tue Jun 14 09:22:03 2016 -0700 Revert "Ticket #48755 - moving an entry could make the online init fail" This reverts commit 3606b78bacce984ab2226755c5921dffac9552c2. (cherry picked from commit 65b53fddd6acc01c21c3bc777b341b815ab66185) diff --git a/Makefile.am b/Makefile.am index 2468aa70c..c5567fd45 100644 --- a/Makefile.am +++ b/Makefile.am @@ -840,8 +840,7 @@ update_DATA = ldap/admin/src/scripts/exampleupdate.pl \ ldap/admin/src/scripts/50AES-pbe-plugin.ldif\ ldap/admin/src/scripts/50updateconfig.ldif \ ldap/admin/src/scripts/52updateAESplugin.pl \ - ldap/admin/src/scripts/dnaplugindepends.ldif \ - ldap/admin/src/scripts/91reindex.pl + ldap/admin/src/scripts/dnaplugindepends.ldif update_SCRIPTS = ldap/admin/src/scripts/exampleupdate.sh diff --git a/Makefile.in b/Makefile.in index 200aed38a..062d78713 100644 --- a/Makefile.in +++ b/Makefile.in @@ -2258,8 +2258,7 @@ update_DATA = ldap/admin/src/scripts/exampleupdate.pl \ ldap/admin/src/scripts/50AES-pbe-plugin.ldif\ ldap/admin/src/scripts/50updateconfig.ldif \ ldap/admin/src/scripts/52updateAESplugin.pl \ - ldap/admin/src/scripts/dnaplugindepends.ldif \ - ldap/admin/src/scripts/91reindex.pl + ldap/admin/src/scripts/dnaplugindepends.ldif update_SCRIPTS = ldap/admin/src/scripts/exampleupdate.sh diff --git a/ldap/admin/src/scripts/91reindex.pl.in b/ldap/admin/src/scripts/91reindex.pl.in deleted file mode 100644 index c861f64cf..000000000 --- a/ldap/admin/src/scripts/91reindex.pl.in +++ /dev/null @@ -1,103 +0,0 @@ -use Mozilla::LDAP::Conn; -use Mozilla::LDAP::Utils qw(normalizeDN); -use Mozilla::LDAP::API qw(:constant ldap_url_parse ldap_explode_dn); -use DSUpdate qw(isOffline); - -sub runinst { - my ($inf, $inst, $dseldif, $conn) = @_; - my $rc, @errs; - - # List of index to be reindexed - my @toreindex = qw(parentid); - # rdn-format value. See $rdn_format set below. - # If equal to or greater than this value, no need to reindex. - # If it needs to be unconditionally reindexed, set 0. - my @rdnconditions = (4) - - my $config = $conn->search("cn=config", "base", "(objectclass=*)"); - if (!$config) { - push @errs, ['error_finding_config_entry', 'cn=config', - $conn->getErrorString()]; - return @errs; - } - - ($rc, @errs) = isOffline($inf, $inst, $conn); - if (!$rc) { - return @errs; - } - - my $reindex = "@sbindir@/db2index -Z $inst"; - my @errs; - my $instconf = $conn->search("cn=ldbm database,cn=plugins,cn=config", "onelevel", "(objectclass=*)"); - if (!$instconf) { - push @errs, ['error_finding_config_entry', 'cn=*,cn=ldbm database,cn=plugins,cn=config', $conn->getErrorString()]; - return @errs; - } - - my $dbconf = $conn->search("cn=config,cn=ldbm database,cn=plugins,cn=config", "base", "(objectclass=*)"); - if (!$dbconf) { - push @errs, ['error_finding_config_entry', - 'cn=config,cn=ldbm database,cn=plugins,cn=config', - $conn->getErrorString()]; - return @errs; - } - - # Get the value of nsslapd-subtree-rename-switch. - my $switch = $dbconf->getValues('nsslapd-subtree-rename-switch'); - if ("" eq $switch) { - return (); # subtree-rename-switch does not exist; do nothing. - } elsif ("off" eq $switch || "OFF" eq $switch) { - return (); # subtree-rename-switch is OFF; do nothing. - } - - my $dbdir = $dbconf->getValues('nsslapd-directory'); - my $dbversion0 = $dbdir . "/DBVERSION"; - my $rdn_format = 0; - my $dbversionstr = ""; - if (!open(DBVERSION, "$dbversion0")) { - push @errs, ['error_opening_file', $dbversion0, $!]; - return @errs; - } else { - while (<DBVERSION>) { - if ($_ =~ /rdn-format/) { - $rdn_format = 1; - $dbversionstr = $_; - if ($_ =~ /rdn-format-1/) { - $rdn_format = 2; - } elsif ($_ =~ /rdn-format-2/) { - $rdn_format = 3; - } elsif ($_ =~ /rdn-format-3/) { - $rdn_format = 4; - } elsif ($_ =~ /rdn-format-4/) { - $rdn_format = 5; - } elsif ($_ =~ /rdn-format-5/) { - $rdn_format = 6; - } elsif ($_ =~ /rdn-format-/) { - # assume greater than -5 - $rdn_format = 7; - } - } - } - close DBVERSION; - } - - while ($instconf) { - my $backend= $instconf->getValues('cn'); - if (($backend eq "config") || ($backend eq "monitor")) { - goto NEXT; - } - - for (my $idx = 0; $ <= $#toreindex; $idx++) { - if (0 == $rdnconditions[$idx] || $rdnconditions[$idx] > $rdn_format) { - my $rc = system("$reindex -n $backend -t $idx"); - if ($rc) { - push @errs, ["error_reindexng", $idx, $backend, $rc]; - } - } - } -NEXT: - $instconf = $conn->nextEntry(); - } - - return @errs; -} diff --git a/ldap/admin/src/scripts/91subtreereindex.pl b/ldap/admin/src/scripts/91subtreereindex.pl index c4b40a3de..a031cc1a4 100644 --- a/ldap/admin/src/scripts/91subtreereindex.pl +++ b/ldap/admin/src/scripts/91subtreereindex.pl @@ -51,18 +51,14 @@ sub runinst { if ($_ =~ /rdn-format-1/) { $is_rdn_format = 2; } - elsif ($_ =~ /rdn-format-2/) { + if ($_ =~ /rdn-format-2/) { $is_rdn_format = 3; } - elsif ($_ =~ /rdn-format-/) { - # assume greater than -2 - $is_rdn_format = 4; - } } } close DBVERSION; - if (3 <= $is_rdn_format) { + if (3 == $is_rdn_format) { return (); # DB already has the new rdn format. } diff --git a/ldap/admin/src/scripts/setup-ds.res.in b/ldap/admin/src/scripts/setup-ds.res.in index e46b8583d..fa3756766 100644 --- a/ldap/admin/src/scripts/setup-ds.res.in +++ b/ldap/admin/src/scripts/setup-ds.res.in @@ -209,4 +209,3 @@ error_opening_file = Opening file '%s' failed. Error: %s\n error_format_error = '%s' has invalid format.\n error_update_not_offline = Error: offline mode selected but the server [%s] is still running.\n error_update_all = Failed to update all the Directory Server instances.\n -error_reindexing = Failed to reindex '%s' in backend '%s'. Error: %s\n diff --git a/ldap/ldif/template-dse.ldif.in b/ldap/ldif/template-dse.ldif.in index 46b416bd1..f18c08248 100644 --- a/ldap/ldif/template-dse.ldif.in +++ b/ldap/ldif/template-dse.ldif.in @@ -930,7 +930,6 @@ objectclass: nsIndex cn: parentid nssystemindex: true nsindextype: eq -nsmatchingrule: integerOrderingMatch dn: cn=seeAlso,cn=default indexes, cn=config,cn=ldbm database,cn=plugins,cn=config objectclass: top diff --git a/ldap/servers/plugins/replication/repl5_replica_config.c b/ldap/servers/plugins/replication/repl5_replica_config.c index 3330a629b..14020b7b5 100644 --- a/ldap/servers/plugins/replication/repl5_replica_config.c +++ b/ldap/servers/plugins/replication/repl5_replica_config.c @@ -405,7 +405,7 @@ replica_config_modify (Slapi_PBlock *pb, Slapi_Entry* entryBefore, Slapi_Entry* { if (apply_mods) replica_set_precise_purging(r, 0); - } + } else { *returncode = LDAP_UNWILLING_TO_PERFORM; @@ -567,7 +567,8 @@ replica_config_modify (Slapi_PBlock *pb, Slapi_Entry* entryBefore, Slapi_Entry* { if (apply_mods) { - if (config_attr_value[0]) { + if (apply_mods && config_attr_value[0]) + { PRUint64 on_off = 0; if (strcasecmp(config_attr_value, "on") == 0){ @@ -586,7 +587,7 @@ replica_config_modify (Slapi_PBlock *pb, Slapi_Entry* entryBefore, Slapi_Entry* break; } replica_set_precise_purging(r, on_off); - } else { + } else if (apply_mods) { replica_set_precise_purging(r, 0); } } diff --git a/ldap/servers/plugins/replication/repl5_tot_protocol.c b/ldap/servers/plugins/replication/repl5_tot_protocol.c index 03d0c3e31..d0c4402d1 100644 --- a/ldap/servers/plugins/replication/repl5_tot_protocol.c +++ b/ldap/servers/plugins/replication/repl5_tot_protocol.c @@ -323,10 +323,6 @@ repl5_tot_run(Private_Repl_Protocol *prp) int init_retry = 0; Replica *replica; ReplicaId rid = 0; /* Used to create the replica keep alive subentry */ - Slapi_Entry *suffix = NULL; - char **instances = NULL; - Slapi_Backend *be = NULL; - int is_entryrdn = 0; PR_ASSERT(NULL != prp); @@ -358,21 +354,21 @@ retry: */ if (rc != ACQUIRE_SUCCESS) { - int optype, ldaprc, wait_retry; - conn_get_error(prp->conn, &optype, &ldaprc); - if (rc == ACQUIRE_TRANSIENT_ERROR && INIT_RETRY_MAX > init_retry++) { - wait_retry = init_retry * INIT_RETRY_INT; - slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name, "Warning: unable to " - "acquire replica for total update, error: %d," + int optype, ldaprc, wait_retry; + conn_get_error(prp->conn, &optype, &ldaprc); + if (rc == ACQUIRE_TRANSIENT_ERROR && INIT_RETRY_MAX > init_retry++) { + wait_retry = init_retry * INIT_RETRY_INT; + slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name, "Warning: unable to " + "acquire replica for total update, error: %d," " retrying in %d seconds.\n", - ldaprc, wait_retry); - DS_Sleep(PR_SecondsToInterval(wait_retry)); - goto retry; - } else { - agmt_set_last_init_status(prp->agmt, ldaprc, - prp->last_acquire_response_code, 0, NULL); - goto done; - } + ldaprc, wait_retry); + DS_Sleep(PR_SecondsToInterval(wait_retry)); + goto retry; + } else { + agmt_set_last_init_status(prp->agmt, ldaprc, + prp->last_acquire_response_code, 0, NULL); + goto done; + } } else if (prp->terminate) { @@ -409,121 +405,48 @@ retry: and that the order implies that perent entry is always ahead of the child entry in the list. Otherwise, the consumer would not be properly updated because bulk import at the moment skips orphand entries. */ - /* XXXggood above assumption may not be valid if orphaned entry moved???? */ + /* XXXggood above assumption may not be valid if orphaned entry moved???? */ agmt_set_last_init_status(prp->agmt, 0, 0, 0, "Total update in progress"); slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name, "Beginning total update of replica " - "\"%s\".\n", agmt_get_long_name(prp->agmt)); + "\"%s\".\n", agmt_get_long_name(prp->agmt)); /* RMREPL - need to send schema here */ pb = slapi_pblock_new (); - replica = (Replica*) object_get_data(prp->replica_object); - /* - * Get the info about the entryrdn vs. entrydn from the backend. - * If NOT is_entryrdn, its ancestor entries are always found prior to an entry. - */ - rc = slapi_lookup_instance_name_by_suffix((char *)slapi_sdn_get_dn(area_sdn), NULL, &instances, 1); - if (rc || !instances) { - slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name, "Warning: unable to " - "get the instance name for the suffix \"%s\".\n", slapi_sdn_get_dn(area_sdn)); - goto done; - } - be = slapi_be_select_by_instance_name(instances[0]); - if (!be) { - slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name, "Warning: unable to " - "get the instance for the suffix \"%s\".\n", slapi_sdn_get_dn(area_sdn)); - goto done; - } - rc = slapi_back_get_info(be, BACK_INFO_IS_ENTRYRDN, (void **)&is_entryrdn); - if (is_entryrdn) { - /* - * Supporting entries out of order -- parent could have a larger id than its children. - * Entires are retireved sorted by parentid without the allid threshold. - */ - /* Get suffix */ - rc = slapi_search_internal_get_entry(area_sdn, NULL, &suffix, repl_get_plugin_identity(PLUGIN_MULTIMASTER_REPLICATION)); - if (rc) { - slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name, "Warning: unable to " - "get the suffix entry \"%s\".\n", slapi_sdn_get_dn(area_sdn)); - goto done; - } - - cb_data.prp = prp; - cb_data.rc = 0; - cb_data.num_entries = 1UL; - cb_data.sleep_on_busy = 0UL; - cb_data.last_busy = current_time (); - cb_data.flowcontrol_detection = 0; - cb_data.lock = PR_NewLock(); - - /* This allows during perform_operation to check the callback data - * especially to do flow contol on delta send msgid / recv msgid - */ - conn_set_tot_update_cb(prp->conn, (void *) &cb_data); - - /* Send suffix first. */ - rc = send_entry(suffix, (void *)&cb_data); - if (rc) { - slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name, "Warning: unable to " - "send the suffix entry \"%s\" to the consumer.\n", slapi_sdn_get_dn(area_sdn)); - goto done; - } - - /* we need to provide managedsait control so that referral entries can - be replicated */ - ctrls = (LDAPControl **)slapi_ch_calloc (3, sizeof (LDAPControl *)); - ctrls[0] = create_managedsait_control (); - ctrls[1] = create_backend_control(area_sdn); + /* we need to provide managedsait control so that referral entries can + be replicated */ + ctrls = (LDAPControl **)slapi_ch_calloc (3, sizeof (LDAPControl *)); + ctrls[0] = create_managedsait_control (); + ctrls[1] = create_backend_control(area_sdn); - /* Time to make sure it exists a keep alive subentry for that replica */ - if (replica) - { - rid = replica_get_rid(replica); - } - replica_subentry_check(area_sdn, rid); + /* Time to make sure it exists a keep alive subentry for that replica */ + replica = (Replica*) object_get_data(prp->replica_object); + if (replica) + { + rid = replica_get_rid(replica); + } + replica_subentry_check(area_sdn, rid); + + slapi_search_internal_set_pb (pb, slapi_sdn_get_dn (area_sdn), + LDAP_SCOPE_SUBTREE, "(|(objectclass=ldapsubentry)(objectclass=nstombstone)(nsuniqueid=*))", NULL, 0, ctrls, NULL, + repl_get_plugin_identity (PLUGIN_MULTIMASTER_REPLICATION), 0); + + cb_data.prp = prp; + cb_data.rc = 0; + cb_data.num_entries = 0UL; + cb_data.sleep_on_busy = 0UL; + cb_data.last_busy = current_time (); + cb_data.flowcontrol_detection = 0; + cb_data.lock = PR_NewLock(); + + /* This allows during perform_operation to check the callback data + * especially to do flow contol on delta send msgid / recv msgid + */ + conn_set_tot_update_cb(prp->conn, (void *) &cb_data); - /* Send the subtree of the suffix in the order of parentid index plus ldapsubentry and nstombstone. */ - slapi_search_internal_set_pb(pb, slapi_sdn_get_dn (area_sdn), - LDAP_SCOPE_SUBTREE, "(|(parentid>=1)(objectclass=ldapsubentry)(objectclass=nstombstone))", NULL, 0, ctrls, NULL, - repl_get_plugin_identity (PLUGIN_MULTIMASTER_REPLICATION), 0); - cb_data.num_entries = 0UL; - } else { - /* Original total update */ - /* we need to provide managedsait control so that referral entries can - be replicated */ - ctrls = (LDAPControl **)slapi_ch_calloc (3, sizeof (LDAPControl *)); - ctrls[0] = create_managedsait_control (); - ctrls[1] = create_backend_control(area_sdn); - - /* Time to make sure it exists a keep alive subentry for that replica */ - replica = (Replica*) object_get_data(prp->replica_object); - if (replica) - { - rid = replica_get_rid(replica); - } - replica_subentry_check(area_sdn, rid); - - slapi_search_internal_set_pb (pb, slapi_sdn_get_dn (area_sdn), - LDAP_SCOPE_SUBTREE, "(|(objectclass=ldapsubentry)(objectclass=nstombstone)(nsuniqueid=*))", NULL, 0, ctrls, NULL, - repl_get_plugin_identity (PLUGIN_MULTIMASTER_REPLICATION), 0); - - cb_data.prp = prp; - cb_data.rc = 0; - cb_data.num_entries = 0UL; - cb_data.sleep_on_busy = 0UL; - cb_data.last_busy = current_time (); - cb_data.flowcontrol_detection = 0; - cb_data.lock = PR_NewLock(); - - /* This allows during perform_operation to check the callback data - * especially to do flow contol on delta send msgid / recv msgid - */ - conn_set_tot_update_cb(prp->conn, (void *) &cb_data); - } - /* Before we get started on sending entries to the replica, we need to * setup things for async propagation: * 1. Create a thread that will read the LDAP results from the connection. @@ -547,7 +470,7 @@ retry: slapi_search_internal_callback_pb (pb, &cb_data /* callback data */, get_result /* result callback */, send_entry /* entry callback */, - NULL /* referral callback*/); + NULL /* referral callback*/); /* * After completing the sending operation (or optionally failing), we need to clean up diff --git a/ldap/servers/slapd/back-ldbm/back-ldbm.h b/ldap/servers/slapd/back-ldbm/back-ldbm.h index 2d77a8aec..949929205 100644 --- a/ldap/servers/slapd/back-ldbm/back-ldbm.h +++ b/ldap/servers/slapd/back-ldbm/back-ldbm.h @@ -132,7 +132,7 @@ typedef unsigned short u_int16_t; #define BDB_BACKEND "libback-ldbm" /* This backend plugin */ #define BDB_NEWIDL "newidl" /* new idl format */ #define BDB_RDNFORMAT "rdn-format" /* Subtree rename enabled */ -#define BDB_RDNFORMAT_VERSION "3" /* rdn-format version (by default, 0) */ +#define BDB_RDNFORMAT_VERSION "2" /* rdn-format version (by default, 0) */ #define BDB_DNFORMAT "dn-4514" /* DN format RFC 4514 compliant */ #define BDB_DNFORMAT_VERSION "1" /* DN format version */ @@ -808,11 +808,11 @@ typedef struct _back_search_result_set /* #define LDBM_ENTRYRDN_OID "2.16.840.1.113730.3.1.2097" */ #define LDBM_ANCESTORID_STR "ancestorid" -#define LDBM_ENTRYDN_STR SLAPI_ATTR_ENTRYDN +#define LDBM_ENTRYDN_STR "entrydn" #define LDBM_ENTRYRDN_STR "entryrdn" #define LDBM_NUMSUBORDINATES_STR "numsubordinates" #define LDBM_TOMBSTONE_NUMSUBORDINATES_STR "tombstonenumsubordinates" -#define LDBM_PARENTID_STR SLAPI_ATTR_PARENTID +#define LDBM_PARENTID_STR "parentid" /* Name of psuedo attribute used to track default indexes */ #define LDBM_PSEUDO_ATTR_DEFAULT ".default" diff --git a/ldap/servers/slapd/back-ldbm/dblayer.c b/ldap/servers/slapd/back-ldbm/dblayer.c index 93d42bed8..bc290cb2e 100644 --- a/ldap/servers/slapd/back-ldbm/dblayer.c +++ b/ldap/servers/slapd/back-ldbm/dblayer.c @@ -7315,11 +7315,6 @@ ldbm_back_get_info(Slapi_Backend *be, int cmd, void **info) } break; } - case BACK_INFO_IS_ENTRYRDN: - { - *(int *)info = entryrdn_get_switch(); - break; - } default: break; } diff --git a/ldap/servers/slapd/back-ldbm/filterindex.c b/ldap/servers/slapd/back-ldbm/filterindex.c index 9a7e7bed1..9c14de421 100644 --- a/ldap/servers/slapd/back-ldbm/filterindex.c +++ b/ldap/servers/slapd/back-ldbm/filterindex.c @@ -552,7 +552,6 @@ range_candidates( struct berval *low = NULL, *high = NULL; struct berval **lows = NULL, **highs = NULL; back_txn txn = {NULL}; - int operator = 0; LDAPDebug(LDAP_DEBUG_TRACE, "=> range_candidates attr=%s\n", type, 0, 0); @@ -579,21 +578,18 @@ range_candidates( } high = attr_value_lowest(highs, slapi_berval_cmp); } - if (entryrdn_get_switch() && !strcasecmp(type, LDBM_PARENTID_STR)) { - /* parentid is treated specially that is needed for the bulk import. (See #48755) */ - operator = SLAPI_OP_RANGE_NO_IDL_SORT|SLAPI_OP_RANGE_NO_ALLIDS; - } + if (low == NULL) { - operator |= SLAPI_OP_LESS_OR_EQUAL; - idl = index_range_read_ext(pb, be, type, (char*)indextype_EQUALITY, operator, + idl = index_range_read_ext(pb, be, type, (char*)indextype_EQUALITY, + SLAPI_OP_LESS_OR_EQUAL, high, NULL, 0, &txn, err, allidslimit); } else if (high == NULL) { - operator |= SLAPI_OP_GREATER_OR_EQUAL; - idl = index_range_read_ext(pb, be, type, (char*)indextype_EQUALITY, operator, + idl = index_range_read_ext(pb, be, type, (char*)indextype_EQUALITY, + SLAPI_OP_GREATER_OR_EQUAL, low, NULL, 0, &txn, err, allidslimit); } else { - operator |= SLAPI_OP_LESS_OR_EQUAL; - idl = index_range_read_ext(pb, be, type, (char*)indextype_EQUALITY, operator, + idl = index_range_read_ext(pb, be, type, (char*)indextype_EQUALITY, + SLAPI_OP_LESS_OR_EQUAL, low, high, 1, &txn, err, allidslimit); } diff --git a/ldap/servers/slapd/back-ldbm/idl_new.c b/ldap/servers/slapd/back-ldbm/idl_new.c index 6ca6c96fd..25b3bfa2b 100644 --- a/ldap/servers/slapd/back-ldbm/idl_new.c +++ b/ldap/servers/slapd/back-ldbm/idl_new.c @@ -350,31 +350,17 @@ error: return idl; } -typedef struct _range_id_pair { - ID key; - ID id; -} idl_range_id_pair; /* * Perform the range search in the idl layer instead of the index layer * to improve the performance. */ -/* - * NOTE: - * In the total update (bulk import), an entry requires its ancestors already added. - * To guarantee it, the range search with parentid is used with setting the flag - * SLAPI_OP_RANGE_NO_IDL_SORT in operator. - * - * If the flag is set, - * 1. the IDList is not sorted by the ID. - * 2. holding to add an ID to the IDList unless the key is found in the IDList. - */ IDList * idl_new_range_fetch( - backend *be, - DB* db, - DBT *lowerkey, + backend *be, + DB* db, + DBT *lowerkey, DBT *upperkey, - DB_TXN *txn, + DB_TXN *txn, struct attrinfo *ai, int *flag_err, int allidslimit, @@ -394,7 +380,7 @@ idl_new_range_fetch( size_t count = 0; #ifdef DB_USE_BULK_FETCH /* beware that a large buffer on the stack might cause a stack overflow on some platforms */ - char buffer[BULK_FETCH_BUFFER_SIZE]; + char buffer[BULK_FETCH_BUFFER_SIZE]; void *ptr; DBT dataret; #endif @@ -402,21 +388,15 @@ idl_new_range_fetch( struct ldbminfo *li = (struct ldbminfo *)be->be_database->plg_private; time_t curtime; void *saved_key = NULL; - int coreop = operator & SLAPI_OP_RANGE; - ID key; - ID suffix; - idl_range_id_pair *leftover = NULL; - size_t leftoverlen = 32; - int leftovercnt = 0; if (NULL == flag_err) { return NULL; } + *flag_err = 0; if (NEW_IDL_NOOP == *flag_err) { return NULL; } - dblayer_txn_init(li, &s_txn); if (txn) { dblayer_read_txn_begin(be, txn, &s_txn); @@ -480,7 +460,7 @@ idl_new_range_fetch( #ifdef DB_USE_BULK_FETCH while (cur_key.data && (upperkey && upperkey->data ? - ((coreop == SLAPI_OP_LESS) ? + ((operator == SLAPI_OP_LESS) ? DBTcmp(&cur_key, upperkey, ai->ai_key_cmp_fn) < 0 : DBTcmp(&cur_key, upperkey, ai->ai_key_cmp_fn) <= 0) : PR_TRUE /* e.g., (x > a) */)) { @@ -516,9 +496,6 @@ idl_new_range_fetch( goto error; } } - if (operator & SLAPI_OP_RANGE_NO_IDL_SORT) { - key = (ID)strtol((char *)cur_key.data+1 , (char **)NULL, 10); - } while (PR_TRUE) { DB_MULTIPLE_NEXT(ptr, &data, dataret.data, dataret.size); if (dataret.data == NULL) break; @@ -547,29 +524,7 @@ idl_new_range_fetch( /* note the last id read to check for dups */ lastid = id; /* we got another ID, add it to our IDL */ - if (operator & SLAPI_OP_RANGE_NO_IDL_SORT) { - if (!idl) { - /* First time. Keep the suffix ID. */ - suffix = key; - idl_rc = idl_append_extend(&idl, id); - } else if ((key == suffix) || idl_id_is_in_idlist(idl, key)) { - /* the parent is the suffix or already in idl. */ - idl_rc = idl_append_extend(&idl, id); - } else { - /* Otherwise, keep the {key,id} in leftover array */ - if (!leftover) { - leftover = (idl_range_id_pair *)slapi_ch_calloc(leftoverlen, sizeof(idl_range_id_pair)); - } else if (leftovercnt == leftoverlen) { - leftover = (idl_range_id_pair *)slapi_ch_realloc((char *)leftover, 2 * leftoverlen * sizeof(idl_range_id_pair)); - memset(leftover + leftovercnt, 0, leftoverlen); - leftoverlen *= 2; - } - leftover[leftovercnt].key = key; - leftover[leftovercnt++].id = id; - } - } else { - idl_rc = idl_append_extend(&idl, id); - } + idl_rc = idl_append_extend(&idl, id); if (idl_rc) { LDAPDebug1Arg(LDAP_DEBUG_ANY, "unable to extend id list (err=%d)\n", idl_rc); @@ -626,7 +581,7 @@ idl_new_range_fetch( } #else while (upperkey && upperkey->data ? - ((coreop == SLAPI_OP_LESS) ? + ((operator == SLAPI_OP_LESS) ? DBTcmp(&cur_key, upperkey, ai->ai_key_cmp_fn) < 0 : DBTcmp(&cur_key, upperkey, ai->ai_key_cmp_fn) <= 0) : PR_TRUE /* e.g., (x > a) */) { @@ -743,27 +698,9 @@ error: *flag_err = ret; /* sort idl */ - if (idl && !ALLIDS(idl) && !(operator & SLAPI_OP_RANGE_NO_IDL_SORT)) { - qsort((void *)&idl->b_ids[0], idl->b_nids, (size_t)sizeof(ID), idl_sort_cmp); - } - if (operator & SLAPI_OP_RANGE_NO_IDL_SORT) { - int i; - int left = leftovercnt; - while (left) { - for (i = 0; i < leftovercnt; i++) { - if (leftover[i].key && idl_id_is_in_idlist(idl, leftover[i].key)) { - idl_rc = idl_append_extend(&idl, leftover[i].id); - if (idl_rc) { - LDAPDebug1Arg(LDAP_DEBUG_ANY, "unable to extend id list (err=%d)\n", idl_rc); - idl_free(&idl); - return NULL; - } - leftover[i].key = 0; - left--; - } - } - } - slapi_ch_free((void **)&leftover); + if (idl && !ALLIDS(idl)) { + qsort((void *)&idl->b_ids[0], idl->b_nids, + (size_t)sizeof(ID), idl_sort_cmp); } return idl; } diff --git a/ldap/servers/slapd/back-ldbm/index.c b/ldap/servers/slapd/back-ldbm/index.c index 81d662196..00e78a7bc 100644 --- a/ldap/servers/slapd/back-ldbm/index.c +++ b/ldap/servers/slapd/back-ldbm/index.c @@ -1232,7 +1232,6 @@ index_range_read_ext( int timelimit = -1; back_search_result_set *sr = NULL; int isroot = 0; - int coreop = operator & SLAPI_OP_RANGE; if (!pb) { LDAPDebug(LDAP_DEBUG_ANY, "index_range_read: NULL pblock\n", @@ -1279,7 +1278,7 @@ index_range_read_ext( LDAPDebug1Arg(LDAP_DEBUG_TRACE, "index_range_read lookthrough_limit=%d\n", lookthrough_limit); - switch( coreop ) { + switch( operator ) { case SLAPI_OP_LESS: case SLAPI_OP_LESS_OR_EQUAL: case SLAPI_OP_GREATER_OR_EQUAL: @@ -1288,7 +1287,7 @@ index_range_read_ext( default: LDAPDebug( LDAP_DEBUG_ANY, "<= index_range_read(%s,%s) NULL (operator %i)\n", - type, prefix, coreop ); + type, prefix, operator ); index_free_prefix(prefix); return( NULL ); } @@ -1344,7 +1343,7 @@ index_range_read_ext( if (range != 1) { /* open range search */ char *tmpbuf = NULL; /* this is a search with only one boundary value */ - switch( coreop ) { + switch( operator ) { case SLAPI_OP_LESS: case SLAPI_OP_LESS_OR_EQUAL: lowerkey.dptr = slapi_ch_strdup(prefix); @@ -1452,17 +1451,8 @@ index_range_read_ext( cur_key.data = lowerkey.data; cur_key.size = lowerkey.size; lowerkey.data = NULL; /* Don't need this any more, since the memory will be freed from cur_key */ - *err = 0; - if (coreop == SLAPI_OP_GREATER) { - *err = index_range_next_key(db, &cur_key, db_txn); - if (*err) { - LDAPDebug(LDAP_DEBUG_ANY, "<= index_range_read(%s,%s) op==GREATER, no next key: %i)\n", - type, prefix, *err ); - goto error; - } - } - if (operator & SLAPI_OP_RANGE_NO_ALLIDS) { - *err = NEW_IDL_NO_ALLID; + if (operator == SLAPI_OP_GREATER) { + *err = index_range_next_key(db,&cur_key,db_txn); } if (idl_get_idl_new()) { /* new idl */ idl = idl_new_range_fetch(be, db, &cur_key, &upperkey, db_txn, @@ -1472,7 +1462,7 @@ index_range_read_ext( int retry_count = 0; while (*err == 0 && (upperkey.data && - (coreop == SLAPI_OP_LESS) ? + (operator == SLAPI_OP_LESS) ? DBTcmp(&cur_key, &upperkey, ai->ai_key_cmp_fn) < 0 : DBTcmp(&cur_key, &upperkey, ai->ai_key_cmp_fn) <= 0)) { /* exit the loop when we either run off the end of the table, diff --git a/ldap/servers/slapd/back-ldbm/init.c b/ldap/servers/slapd/back-ldbm/init.c index 04cc9368f..a531abbf8 100644 --- a/ldap/servers/slapd/back-ldbm/init.c +++ b/ldap/servers/slapd/back-ldbm/init.c @@ -36,7 +36,7 @@ ldbm_back_add_schema( Slapi_PBlock *pb ) SLAPI_ATTR_FLAG_NOUSERMOD ); rc |= slapi_add_internal_attr_syntax( LDBM_PARENTID_STR, - LDBM_PARENTID_OID, INTEGER_SYNTAX_OID, INTEGERMATCH_NAME, + LDBM_PARENTID_OID, DIRSTRING_SYNTAX_OID, CASEIGNOREMATCH_NAME, SLAPI_ATTR_FLAG_SINGLE|SLAPI_ATTR_FLAG_NOUSERMOD ); rc |= slapi_add_internal_attr_syntax( "entryid", diff --git a/ldap/servers/slapd/back-ldbm/misc.c b/ldap/servers/slapd/back-ldbm/misc.c index 77c1e70fe..fe3d01bfb 100644 --- a/ldap/servers/slapd/back-ldbm/misc.c +++ b/ldap/servers/slapd/back-ldbm/misc.c @@ -79,7 +79,6 @@ static const char *systemIndexes[] = { SLAPI_ATTR_NSCP_ENTRYDN, ATTR_NSDS5_REPLCONFLICT, SLAPI_ATTR_ENTRYUSN, - SLAPI_ATTR_PARENTID, NULL }; diff --git a/ldap/servers/slapd/entry.c b/ldap/servers/slapd/entry.c index d38f9709b..3124ff6af 100644 --- a/ldap/servers/slapd/entry.c +++ b/ldap/servers/slapd/entry.c @@ -3429,8 +3429,8 @@ slapi_entry_rename(Slapi_Entry *e, const char *newrdn, int deleteoldrdn, Slapi_D /* We remove the parentid and entrydn since the backend will change these. * We don't want to give the caller an inconsistent entry. */ - slapi_entry_attr_delete(e, SLAPI_ATTR_PARENTID); - slapi_entry_attr_delete(e, SLAPI_ATTR_ENTRYDN); + slapi_entry_attr_delete(e, "parentid"); + slapi_entry_attr_delete(e, "entrydn"); /* Build new DN. If newsuperior is set, just use "newrdn,newsuperior". If * newsuperior is not set, need to add newrdn to old superior. */ diff --git a/ldap/servers/slapd/slap.h b/ldap/servers/slapd/slap.h index b74f9bdfa..645cdd519 100644 --- a/ldap/servers/slapd/slap.h +++ b/ldap/servers/slapd/slap.h @@ -536,16 +536,12 @@ typedef int (*SyntaxEnumFunc)(char **names, Slapi_PluginDesc *plugindesc, /* OIDs for some commonly used matching rules */ #define DNMATCH_OID "2.5.13.1" /* distinguishedNameMatch */ #define CASEIGNOREMATCH_OID "2.5.13.2" /* caseIgnoreMatch */ -#define INTEGERMATCH_OID "2.5.13.14" /* integerMatch */ -#define INTEGERORDERINGMATCH_OID "2.5.13.15" /* integerOrderingMatch */ #define INTFIRSTCOMPMATCH_OID "2.5.13.29" /* integerFirstComponentMatch */ #define OIDFIRSTCOMPMATCH_OID "2.5.13.30" /* objectIdentifierFirstComponentMatch */ /* Names for some commonly used matching rules */ #define DNMATCH_NAME "distinguishedNameMatch" #define CASEIGNOREMATCH_NAME "caseIgnoreMatch" -#define INTEGERMATCH_NAME "integerMatch" -#define INTEGERORDERINGMATCH_NAME "integerOrderingMatch" #define INTFIRSTCOMPMATCH_NAME "integerFirstComponentMatch" #define OIDFIRSTCOMPMATCH_NAME "objectIdentifierFirstComponentMatch" diff --git a/ldap/servers/slapd/slapi-plugin.h b/ldap/servers/slapd/slapi-plugin.h index a7e544ad3..d3a6b25b8 100644 --- a/ldap/servers/slapd/slapi-plugin.h +++ b/ldap/servers/slapd/slapi-plugin.h @@ -483,7 +483,6 @@ NSPR_API(PRUint32) PR_fprintf(struct PRFileDesc* fd, const char *fmt, ...) #define SLAPI_ATTR_ENTRYDN "entrydn" #define SLAPI_ATTR_DN "dn" #define SLAPI_ATTR_RDN "rdn" -#define SLAPI_ATTR_PARENTID "parentid" #define SLAPI_ATTR_UNIQUEID_LENGTH 10 #define SLAPI_ATTR_OBJECTCLASS_LENGTH 11 #define SLAPI_ATTR_VALUE_TOMBSTONE_LENGTH 11 @@ -495,7 +494,6 @@ NSPR_API(PRUint32) PR_fprintf(struct PRFileDesc* fd, const char *fmt, ...) #define SLAPI_ATTR_ENTRYDN_LENGTH 7 #define SLAPI_ATTR_DN_LENGTH 2 #define SLAPI_ATTR_RDN_LENGTH 3 -#define SLAPI_ATTR_PARENTID_LENGTH 8 /* plugin shared config area */ #define SLAPI_PLUGIN_SHARED_CONFIG_AREA "nsslapd-pluginConfigArea" @@ -7026,9 +7024,6 @@ typedef struct slapi_plugindesc { #define SLAPI_OP_GREATER_OR_EQUAL 4 #define SLAPI_OP_GREATER 5 #define SLAPI_OP_SUBSTRING 6 -#define SLAPI_OP_RANGE 0xff -#define SLAPI_OP_RANGE_NO_IDL_SORT 0x100 -#define SLAPI_OP_RANGE_NO_ALLIDS 0x200 /* Defined values of SLAPI_PLUGIN_MR_USAGE: */ #define SLAPI_PLUGIN_MR_USAGE_INDEX 0 @@ -7612,8 +7607,7 @@ enum BACK_INFO_CRYPT_ENCRYPT_VALUE, /* Ctrl: clcrypt_encrypt_value */ BACK_INFO_CRYPT_DECRYPT_VALUE, /* Ctrl: clcrypt_decrypt_value */ BACK_INFO_DIRECTORY, /* Get the directory path */ - BACK_INFO_LOG_DIRECTORY, /* Get the txn log directory */ - BACK_INFO_IS_ENTRYRDN /* Get the flag for entryrdn */ + BACK_INFO_LOG_DIRECTORY /* Get the txn log directory */ }; struct _back_info_crypt_init {
0
71e894627fe94d939681c3ebe413d4238e87c01b
389ds/389-ds-base
fix licensing in sasl.m4
commit 71e894627fe94d939681c3ebe413d4238e87c01b Author: Rich Megginson <[email protected]> Date: Fri Feb 2 21:05:26 2007 +0000 fix licensing in sasl.m4 diff --git a/compile b/compile index 80b645b01..1b1d23216 100755 --- a/compile +++ b/compile @@ -1,9 +1,9 @@ #! /bin/sh # Wrapper for compilers which do not understand `-c -o'. -scriptversion=2004-09-10.20 +scriptversion=2005-05-14.22 -# Copyright (C) 1999, 2000, 2003, 2004 Free Software Foundation, Inc. +# Copyright (C) 1999, 2000, 2003, 2004, 2005 Free Software Foundation, Inc. # Written by Tom Tromey <[email protected]>. # # This program is free software; you can redistribute it and/or modify @@ -18,7 +18,7 @@ scriptversion=2004-09-10.20 # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software -# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # As a special exception to the GNU General Public License, if you # distribute this file as part of a program that contains a @@ -47,11 +47,11 @@ right script to run: please start by reading the file `INSTALL'. Report bugs to <[email protected]>. EOF - exit 0 + exit $? ;; -v | --v*) echo "compile $scriptversion" - exit 0 + exit $? ;; esac @@ -125,6 +125,8 @@ ret=$? if test -f "$cofile"; then mv "$cofile" "$ofile" +elif test -f "${cofile}bj"; then + mv "${cofile}bj" "$ofile" fi rmdir "$lockdir" diff --git a/configure b/configure index 85c3b9fd0..27c68e503 100755 --- a/configure +++ b/configure @@ -23628,10 +23628,27 @@ fi +# BEGIN COPYRIGHT BLOCK +# Copyright (C) 2007 Red Hat, Inc. +# All rights reserved. +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either version 2 +# of the License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +# +# END COPYRIGHT BLOCK # -*- tab-width: 4; -*- # Configure paths for SASL -# Public domain - Nathan Kinder <[email protected]> 2006-06-26 -# Based upon svrcore.m4 (also PD) by Rich Megginson <[email protected]> { echo "$as_me:$LINENO: checking for sasl..." >&5 echo "$as_me: checking for sasl..." >&6;} diff --git a/depcomp b/depcomp index 11e2d3bfe..04701da53 100755 --- a/depcomp +++ b/depcomp @@ -1,9 +1,9 @@ #! /bin/sh # depcomp - compile a program generating dependencies as side-effects -scriptversion=2004-05-31.23 +scriptversion=2005-07-09.11 -# Copyright (C) 1999, 2000, 2003, 2004 Free Software Foundation, Inc. +# Copyright (C) 1999, 2000, 2003, 2004, 2005 Free Software Foundation, Inc. # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -17,8 +17,8 @@ scriptversion=2004-05-31.23 # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software -# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA -# 02111-1307, USA. +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA +# 02110-1301, USA. # As a special exception to the GNU General Public License, if you # distribute this file as part of a program that contains a @@ -50,11 +50,11 @@ Environment variables: Report bugs to <[email protected]>. EOF - exit 0 + exit $? ;; -v | --v*) echo "depcomp $scriptversion" - exit 0 + exit $? ;; esac @@ -287,36 +287,43 @@ tru64) base=`echo "$object" | sed -e 's|^.*/||' -e 's/\.o$//' -e 's/\.lo$//'` if test "$libtool" = yes; then - # Dependencies are output in .lo.d with libtool 1.4. - # With libtool 1.5 they are output both in $dir.libs/$base.o.d - # and in $dir.libs/$base.o.d and $dir$base.o.d. We process the - # latter, because the former will be cleaned when $dir.libs is - # erased. - tmpdepfile1="$dir.libs/$base.lo.d" - tmpdepfile2="$dir$base.o.d" - tmpdepfile3="$dir.libs/$base.d" + # With Tru64 cc, shared objects can also be used to make a + # static library. This mecanism is used in libtool 1.4 series to + # handle both shared and static libraries in a single compilation. + # With libtool 1.4, dependencies were output in $dir.libs/$base.lo.d. + # + # With libtool 1.5 this exception was removed, and libtool now + # generates 2 separate objects for the 2 libraries. These two + # compilations output dependencies in in $dir.libs/$base.o.d and + # in $dir$base.o.d. We have to check for both files, because + # one of the two compilations can be disabled. We should prefer + # $dir$base.o.d over $dir.libs/$base.o.d because the latter is + # automatically cleaned when .libs/ is deleted, while ignoring + # the former would cause a distcleancheck panic. + tmpdepfile1=$dir.libs/$base.lo.d # libtool 1.4 + tmpdepfile2=$dir$base.o.d # libtool 1.5 + tmpdepfile3=$dir.libs/$base.o.d # libtool 1.5 + tmpdepfile4=$dir.libs/$base.d # Compaq CCC V6.2-504 "$@" -Wc,-MD else - tmpdepfile1="$dir$base.o.d" - tmpdepfile2="$dir$base.d" - tmpdepfile3="$dir$base.d" + tmpdepfile1=$dir$base.o.d + tmpdepfile2=$dir$base.d + tmpdepfile3=$dir$base.d + tmpdepfile4=$dir$base.d "$@" -MD fi stat=$? if test $stat -eq 0; then : else - rm -f "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3" + rm -f "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3" "$tmpdepfile4" exit $stat fi - if test -f "$tmpdepfile1"; then - tmpdepfile="$tmpdepfile1" - elif test -f "$tmpdepfile2"; then - tmpdepfile="$tmpdepfile2" - else - tmpdepfile="$tmpdepfile3" - fi + for tmpdepfile in "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3" "$tmpdepfile4" + do + test -f "$tmpdepfile" && break + done if test -f "$tmpdepfile"; then sed -e "s,^.*\.[a-z]*:,$object:," < "$tmpdepfile" > "$depfile" # That's a tab and a space in the []. @@ -460,7 +467,8 @@ cpp) done "$@" -E | - sed -n '/^# [0-9][0-9]* "\([^"]*\)".*/ s:: \1 \\:p' | + sed -n -e '/^# [0-9][0-9]* "\([^"]*\)".*/ s:: \1 \\:p' \ + -e '/^#line [0-9][0-9]* "\([^"]*\)".*/ s:: \1 \\:p' | sed '$ s: \\$::' > "$tmpdepfile" rm -f "$depfile" echo "$object : \\" > "$depfile" diff --git a/install-sh b/install-sh index dd97db7aa..4d4a9519e 100755 --- a/install-sh +++ b/install-sh @@ -1,7 +1,7 @@ #!/bin/sh # install - install a program, script, or datafile -scriptversion=2004-09-10.20 +scriptversion=2005-05-14.22 # This originates from X11R5 (mit/util/scripts/install.sh), which was # later released in X11R6 (xc/config/util/install.sh) with the @@ -109,7 +109,7 @@ while test -n "$1"; do shift continue;; - --help) echo "$usage"; exit 0;; + --help) echo "$usage"; exit $?;; -m) chmodcmd="$chmodprog $2" shift @@ -134,7 +134,7 @@ while test -n "$1"; do shift continue;; - --version) echo "$0 $scriptversion"; exit 0;; + --version) echo "$0 $scriptversion"; exit $?;; *) # When -d is used, all remaining arguments are directories to create. # When -t is used, the destination is already specified. @@ -213,7 +213,7 @@ do fi # This sed command emulates the dirname command. - dstdir=`echo "$dst" | sed -e 's,[^/]*$,,;s,/$,,;s,^$,.,'` + dstdir=`echo "$dst" | sed -e 's,/*$,,;s,[^/]*$,,;s,/*$,,;s,^$,.,'` # Make sure that the destination directory exists. @@ -226,7 +226,8 @@ do oIFS=$IFS # Some sh's can't handle IFS=/ for some reason. IFS='%' - set - `echo "$dstdir" | sed -e 's@/@%@g' -e 's@^%@/@'` + set x `echo "$dstdir" | sed -e 's@/@%@g' -e 's@^%@/@'` + shift IFS=$oIFS pathcomp= @@ -295,7 +296,7 @@ do || $doit $mvcmd -f "$dstdir/$dstfile" "$rmtmp" 2>/dev/null \ || { echo "$0: cannot unlink or rename $dstdir/$dstfile" >&2 - (exit 1); exit + (exit 1); exit 1 } else : @@ -306,12 +307,12 @@ do $doit $mvcmd "$dsttmp" "$dstdir/$dstfile" } } - fi || { (exit 1); exit; } + fi || { (exit 1); exit 1; } done # The final little trick to "correctly" pass the exit status to the exit trap. { - (exit 0); exit + (exit 0); exit 0 } # Local variables: diff --git a/m4/sasl.m4 b/m4/sasl.m4 index beb96a3ab..00b2f2afd 100644 --- a/m4/sasl.m4 +++ b/m4/sasl.m4 @@ -1,7 +1,24 @@ +# BEGIN COPYRIGHT BLOCK +# Copyright (C) 2007 Red Hat, Inc. +# All rights reserved. +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either version 2 +# of the License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +# +# END COPYRIGHT BLOCK # -*- tab-width: 4; -*- # Configure paths for SASL -# Public domain - Nathan Kinder <[email protected]> 2006-06-26 -# Based upon svrcore.m4 (also PD) by Rich Megginson <[email protected]> dnl ======================================================== dnl = sasl is used to support various authentication mechanisms diff --git a/missing b/missing index 64b5f901d..894e786e1 100755 --- a/missing +++ b/missing @@ -1,9 +1,9 @@ #! /bin/sh # Common stub for a few missing GNU programs while installing. -scriptversion=2004-09-07.08 +scriptversion=2005-06-08.21 -# Copyright (C) 1996, 1997, 1999, 2000, 2002, 2003, 2004 +# Copyright (C) 1996, 1997, 1999, 2000, 2002, 2003, 2004, 2005 # Free Software Foundation, Inc. # Originally by Fran,cois Pinard <[email protected]>, 1996. @@ -19,8 +19,8 @@ scriptversion=2004-09-07.08 # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software -# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA -# 02111-1307, USA. +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA +# 02110-1301, USA. # As a special exception to the GNU General Public License, if you # distribute this file as part of a program that contains a @@ -87,12 +87,12 @@ Supported PROGRAM values: yacc create \`y.tab.[ch]', if possible, from existing .[ch] Send bug reports to <[email protected]>." - exit 0 + exit $? ;; -v|--v|--ve|--ver|--vers|--versi|--versio|--version) echo "missing $scriptversion (GNU Automake)" - exit 0 + exit $? ;; -*) @@ -288,11 +288,18 @@ WARNING: \`$1' is $msg. You should only need it if call might also be the consequence of using a buggy \`make' (AIX, DU, IRIX). You might want to install the \`Texinfo' package or the \`GNU make' package. Grab either from any GNU archive site." + # The file to touch is that specified with -o ... file=`echo "$*" | sed -n 's/.*-o \([^ ]*\).*/\1/p'` if test -z "$file"; then - file=`echo "$*" | sed 's/.* \([^ ]*\) *$/\1/'` - file=`sed -n '/^@setfilename/ { s/.* \([^ ]*\) *$/\1/; p; q; }' $file` + # ... or it is the one specified with @setfilename ... + infile=`echo "$*" | sed 's/.* \([^ ]*\) *$/\1/'` + file=`sed -n '/^@setfilename/ { s/.* \([^ ]*\) *$/\1/; p; q; }' $infile` + # ... or it is derived from the source name (dir/f.texi becomes f.info) + test -z "$file" && file=`echo "$infile" | sed 's,.*/,,;s,.[^.]*$,,'`.info fi + # If the file does not exist, the user really needs makeinfo; + # let's fail without touching anything. + test -f $file || exit 1 touch $file ;;
0
84243ab8178edaa89d27c46a5b647ac99b99dc08
389ds/389-ds-base
Issue 50370 - CleanAllRUV task crashing during server shutdown Description: Added test case to check if CleanAllRUV task didn't crash during server shutdown. This code is not in a mergeable state yet. I need review, if my steps are correct, because it is a timing issue to reproduce the bug. https://pagure.io/389-ds-base/issue/50370 Reviewed by: mreynolds (Thanks!)
commit 84243ab8178edaa89d27c46a5b647ac99b99dc08 Author: Barbora Smejkalová <[email protected]> Date: Fri Jun 7 14:10:13 2019 +0200 Issue 50370 - CleanAllRUV task crashing during server shutdown Description: Added test case to check if CleanAllRUV task didn't crash during server shutdown. This code is not in a mergeable state yet. I need review, if my steps are correct, because it is a timing issue to reproduce the bug. https://pagure.io/389-ds-base/issue/50370 Reviewed by: mreynolds (Thanks!) diff --git a/dirsrvtests/tests/suites/replication/cleanallruv_test.py b/dirsrvtests/tests/suites/replication/cleanallruv_test.py index 09805d6b2..ac96d3aab 100644 --- a/dirsrvtests/tests/suites/replication/cleanallruv_test.py +++ b/dirsrvtests/tests/suites/replication/cleanallruv_test.py @@ -13,7 +13,7 @@ import random from lib389 import DirSrv from lib389.tasks import * from lib389.utils import * -from lib389.topologies import topology_m4 +from lib389.topologies import topology_m4, topology_m2 from lib389._constants import * from lib389.idm.directorymanager import DirectoryManager @@ -21,6 +21,8 @@ from lib389.replica import ReplicationManager, Replicas from lib389.tasks import CleanAllRUVTask from lib389.idm.user import UserAccounts from lib389.config import LDBMConfig +from lib389.config import CertmapLegacy +from lib389.idm.services import ServiceAccounts pytestmark = pytest.mark.tier1 @@ -721,6 +723,104 @@ def test_multiple_tasks_with_force(topology_m4, m4rid): log.fatal('test_abort: CleanAllRUV task was not aborted') assert False + [email protected] [email protected] +def test_clean_shutdown_crash(topology_m2): + """Check that server didn't crash after shutdown when running CleanAllRUV task + + :id: c34d0b40-3c3e-4f53-8656-5e4c2a310aaf + :setup: Replication setup with two masters + :steps: + 1. Enable TLS on both masters + 2. Reconfigure both agreements to use TLS Client auth + 3. Stop master2 + 4. Run the CleanAllRUV task + 5. Restart master1 + 6. Check if master1 didn't crash + 7. Restart master1 again + 8. Check if master1 didn't crash + + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + 8. Success + """ + + m1 = topology_m2.ms["master1"] + m2 = topology_m2.ms["master2"] + + repl = ReplicationManager(DEFAULT_SUFFIX) + + cm_m1 = CertmapLegacy(m1) + cm_m2 = CertmapLegacy(m2) + + certmaps = cm_m1.list() + certmaps['default']['DNComps'] = None + certmaps['default']['CmapLdapAttr'] = 'nsCertSubjectDN' + + cm_m1.set(certmaps) + cm_m2.set(certmaps) + + log.info('Enabling TLS') + [i.enable_tls() for i in topology_m2] + + log.info('Creating replication dns') + services = ServiceAccounts(m1, DEFAULT_SUFFIX) + repl_m1 = services.get('%s:%s' % (m1.host, m1.sslport)) + repl_m1.set('nsCertSubjectDN', m1.get_server_tls_subject()) + + repl_m2 = services.get('%s:%s' % (m2.host, m2.sslport)) + repl_m2.set('nsCertSubjectDN', m2.get_server_tls_subject()) + + log.info('Changing auth type') + replica_m1 = Replicas(m1).get(DEFAULT_SUFFIX) + agmt_m1 = replica_m1.get_agreements().list()[0] + agmt_m1.replace_many( + ('nsDS5ReplicaBindMethod', 'SSLCLIENTAUTH'), + ('nsDS5ReplicaTransportInfo', 'SSL'), + ('nsDS5ReplicaPort', '%s' % m2.sslport), + ) + + agmt_m1.remove_all('nsDS5ReplicaBindDN') + + replica_m2 = Replicas(m2).get(DEFAULT_SUFFIX) + agmt_m2 = replica_m2.get_agreements().list()[0] + + agmt_m2.replace_many( + ('nsDS5ReplicaBindMethod', 'SSLCLIENTAUTH'), + ('nsDS5ReplicaTransportInfo', 'SSL'), + ('nsDS5ReplicaPort', '%s' % m1.sslport), + ) + agmt_m2.remove_all('nsDS5ReplicaBindDN') + + log.info('Stopping master2') + m2.stop() + + log.info('Run the cleanAllRUV task') + cruv_task = CleanAllRUVTask(m1) + cruv_task.create(properties={ + 'replica-id': repl.get_rid(m1), + 'replica-base-dn': DEFAULT_SUFFIX, + 'replica-force-cleaning': 'no', + 'replica-certify-all': 'yes' + }) + + m1.restart() + + log.info('Check if master1 crashed') + assert not m1.detectDisorderlyShutdown() + + log.info('Repeat') + m1.restart() + assert not m1.detectDisorderlyShutdown() + + if __name__ == '__main__': # Run isolated # -s for DEBUG mode
0
43959232f792db2b79e614f6db78f7569920fdc1
389ds/389-ds-base
Ticket 47613 - Issues setting allowed mechanisms Bug Description: Adding an empty value for nsslapd-allowed-sasl-mechanisms blocks all sasl authentication. Also changing the allowed sasl mechansism does require a restart after making a change. Fix Description: Reject an empty values for nsslapd-allowed-sasl-mechanisms, and allow config changes to occur without restarting the server. https://fedorahosted.org/389/ticket/47613 Reviewed by: nhosoi(Thanks!)
commit 43959232f792db2b79e614f6db78f7569920fdc1 Author: Mark Reynolds <[email protected]> Date: Thu Dec 12 12:48:08 2013 -0500 Ticket 47613 - Issues setting allowed mechanisms Bug Description: Adding an empty value for nsslapd-allowed-sasl-mechanisms blocks all sasl authentication. Also changing the allowed sasl mechansism does require a restart after making a change. Fix Description: Reject an empty values for nsslapd-allowed-sasl-mechanisms, and allow config changes to occur without restarting the server. https://fedorahosted.org/389/ticket/47613 Reviewed by: nhosoi(Thanks!) diff --git a/ldap/servers/slapd/configdse.c b/ldap/servers/slapd/configdse.c index bd1566ef8..b54062d5b 100644 --- a/ldap/servers/slapd/configdse.c +++ b/ldap/servers/slapd/configdse.c @@ -81,7 +81,6 @@ static const char *requires_restart[] = { #endif "cn=config:" CONFIG_RETURN_EXACT_CASE_ATTRIBUTE, "cn=config:" CONFIG_SCHEMA_IGNORE_TRAILING_SPACES, - "cn=config:nsslapd-allowed-sasl-mechanisms", "cn=config,cn=ldbm:nsslapd-idlistscanlimit", "cn=config,cn=ldbm:nsslapd-parentcheck", "cn=config,cn=ldbm:nsslapd-dbcachesize", diff --git a/ldap/servers/slapd/libglobs.c b/ldap/servers/slapd/libglobs.c index f2697426d..9165e0843 100644 --- a/ldap/servers/slapd/libglobs.c +++ b/ldap/servers/slapd/libglobs.c @@ -6817,8 +6817,7 @@ config_set_allowed_sasl_mechs(const char *attrname, char *value, char *errorbuf, { slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); - if(!apply || slapdFrontendConfig->allowed_sasl_mechs){ - /* we only set this at startup, if we try again just return SUCCESS */ + if(!apply){ return LDAP_SUCCESS; } @@ -6833,6 +6832,7 @@ config_set_allowed_sasl_mechs(const char *attrname, char *value, char *errorbuf, } CFG_LOCK_WRITE(slapdFrontendConfig); + slapi_ch_free_string(&slapdFrontendConfig->allowed_sasl_mechs); slapdFrontendConfig->allowed_sasl_mechs = slapi_ch_strdup(value); CFG_UNLOCK_WRITE(slapdFrontendConfig); @@ -7619,7 +7619,11 @@ invalid_sasl_mech(char *str) int i; if(str == NULL){ - return 0; + return 1; + } + if(strlen(str) < 1){ + /* ignore empty values */ + return 1; } /*
0
5685c0430e764e62a06a0486952b4f9029b4d2b9
389ds/389-ds-base
Resolves: bug 233642 Bug Description: MMR breaks with time skew errors Reviewed by: nhosoi, nkinder (Thanks!) Fix Description: CSN remote offset generation seems broken. We seem to accumulate a remote offset that keeps growing until we hit the limit of 1 day, then replication stops. The idea behind the remote offset is that servers may be seconds or minutes off. When replication starts, one of the itmes in the payload of the start extop is the latest CSN from the supplier. The CSN timestamp field is (sampled_time + local offset + remote offset). Sampled time comes from the time thread in the server that updates the time once per second. This allows the consumer, if also a master, to adjust its CSN generation so as not to generate duplicates or CSNs less than those from the supplier. However, the logic in csngen_adjust_time appears to be wrong: remote_offset = remote_time - gen->state.sampled_time; That is, remote_offset = (remote sampled_time + remote local offset + remote remote offset) - gen->state.sampled_time It should be remote_offset = remote_time - (sampled_time + local offset + remote offset) Since the sampled time is not the actual current time, it may be off by 1 second. So the new remote_offset will be at least 1 second more than it should be. Since this is the same remote_offset used to generate the CSN to send back to the other master, this offset would keep increasing and increasing over time. The script attached to the bug helps measure this effect. The new code also attempts to refresh the sampled time while adjusting to make sure we have as current a sampled_time as possible. In the old code, the remote_offset is "sent" back and forth between the masters, carried along in the CSN timestamp generation. In the new code, this can happen too, but to a far less extent, and should max out at (real offset + N seconds) where N is the number of masters. In the old code, you could only call csngen_adjust_time if you first made sure the remote timestamp >= local timestamp. I have removed this restriction and moved that logic into csngen_adjust_time. I also cleaned up the code in the consumer extop - I combined the checking of the CSN from the extop with the max CSN from the supplier RUV - now we only adjust the time once based on the max of all of these CSNs sent by the supplier. Finally, I cleaned up the error handling in a few places that assumed all errors were time skew errors. Follow up - I found a bug in my previous patch - _csngen_adjust_local_time must not be called when the sampled time == the current time. So I fixed that where I was calling _csngen_adjust_local_time, and I also changed _csngen_adjust_local_time so that time_diff == 0 is a no-op. Platforms tested: RHEL5, F8, F9 Flag Day: no Doc impact: no QA impact: Should test MMR and use the script to measure the offset effect.
commit 5685c0430e764e62a06a0486952b4f9029b4d2b9 Author: Rich Megginson <[email protected]> Date: Tue Jun 24 22:22:10 2008 +0000 Resolves: bug 233642 Bug Description: MMR breaks with time skew errors Reviewed by: nhosoi, nkinder (Thanks!) Fix Description: CSN remote offset generation seems broken. We seem to accumulate a remote offset that keeps growing until we hit the limit of 1 day, then replication stops. The idea behind the remote offset is that servers may be seconds or minutes off. When replication starts, one of the itmes in the payload of the start extop is the latest CSN from the supplier. The CSN timestamp field is (sampled_time + local offset + remote offset). Sampled time comes from the time thread in the server that updates the time once per second. This allows the consumer, if also a master, to adjust its CSN generation so as not to generate duplicates or CSNs less than those from the supplier. However, the logic in csngen_adjust_time appears to be wrong: remote_offset = remote_time - gen->state.sampled_time; That is, remote_offset = (remote sampled_time + remote local offset + remote remote offset) - gen->state.sampled_time It should be remote_offset = remote_time - (sampled_time + local offset + remote offset) Since the sampled time is not the actual current time, it may be off by 1 second. So the new remote_offset will be at least 1 second more than it should be. Since this is the same remote_offset used to generate the CSN to send back to the other master, this offset would keep increasing and increasing over time. The script attached to the bug helps measure this effect. The new code also attempts to refresh the sampled time while adjusting to make sure we have as current a sampled_time as possible. In the old code, the remote_offset is "sent" back and forth between the masters, carried along in the CSN timestamp generation. In the new code, this can happen too, but to a far less extent, and should max out at (real offset + N seconds) where N is the number of masters. In the old code, you could only call csngen_adjust_time if you first made sure the remote timestamp >= local timestamp. I have removed this restriction and moved that logic into csngen_adjust_time. I also cleaned up the code in the consumer extop - I combined the checking of the CSN from the extop with the max CSN from the supplier RUV - now we only adjust the time once based on the max of all of these CSNs sent by the supplier. Finally, I cleaned up the error handling in a few places that assumed all errors were time skew errors. Follow up - I found a bug in my previous patch - _csngen_adjust_local_time must not be called when the sampled time == the current time. So I fixed that where I was calling _csngen_adjust_local_time, and I also changed _csngen_adjust_local_time so that time_diff == 0 is a no-op. Platforms tested: RHEL5, F8, F9 Flag Day: no Doc impact: no QA impact: Should test MMR and use the script to measure the offset effect. diff --git a/ldap/servers/plugins/replication/repl5.h b/ldap/servers/plugins/replication/repl5.h index ed27f4e7a..4a7f0db94 100644 --- a/ldap/servers/plugins/replication/repl5.h +++ b/ldap/servers/plugins/replication/repl5.h @@ -486,6 +486,7 @@ void replica_flush(Replica *r); void replica_get_referrals(const Replica *r, char ***referrals); void replica_set_referrals(Replica *r,const Slapi_ValueSet *vs); int replica_update_csngen_state (Replica *r, const RUV *ruv); +int replica_update_csngen_state_ext (Replica *r, const RUV *ruv, const CSN *extracsn); CSN *replica_get_purge_csn(const Replica *r); int replica_log_ruv_elements (const Replica *r); void replica_enumerate_replicas (FNEnumReplica fn, void *arg); diff --git a/ldap/servers/plugins/replication/repl5_inc_protocol.c b/ldap/servers/plugins/replication/repl5_inc_protocol.c index e04734383..b92904109 100644 --- a/ldap/servers/plugins/replication/repl5_inc_protocol.c +++ b/ldap/servers/plugins/replication/repl5_inc_protocol.c @@ -1100,13 +1100,20 @@ repl5_inc_run(Private_Repl_Protocol *prp) rc = replica_update_csngen_state (replica, ruv); object_release (prp->replica_object); replica = NULL; - if (rc != 0) /* too much skew */ + if (rc == CSN_LIMIT_EXCEEDED) /* too much skew */ { slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name, "%s: Incremental protocol: fatal error - too much time skew between replicas!\n", agmt_get_long_name(prp->agmt)); next_state = STATE_STOP_FATAL_ERROR; } + else if (rc != 0) /* internal error */ + { + slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name, + "%s: Incremental protocol: fatal internal error updating the CSN generator!\n", + agmt_get_long_name(prp->agmt)); + next_state = STATE_STOP_FATAL_ERROR; + } else { rc = send_updates(prp, ruv, &num_changes_sent); diff --git a/ldap/servers/plugins/replication/repl5_replica.c b/ldap/servers/plugins/replication/repl5_replica.c index 702754e3e..87b8b6d3e 100644 --- a/ldap/servers/plugins/replication/repl5_replica.c +++ b/ldap/servers/plugins/replication/repl5_replica.c @@ -1043,7 +1043,7 @@ replica_set_referrals(Replica *r,const Slapi_ValueSet *vs) } int -replica_update_csngen_state (Replica *r, const RUV *ruv) +replica_update_csngen_state_ext (Replica *r, const RUV *ruv, const CSN *extracsn) { int rc = 0; CSNGen *gen; @@ -1057,34 +1057,42 @@ replica_update_csngen_state (Replica *r, const RUV *ruv) return -1; } - if (csn == NULL) /* ruv contains no csn - we are done */ + if ((csn == NULL) && (extracsn == NULL)) /* ruv contains no csn and no extra - we are done */ { return 0; } + if (csn_compare(extracsn, csn) > 0) /* extracsn > csn */ + { + csn_free (&csn); /* free */ + csn = (CSN*)extracsn; /* use this csn to do the update */ + } + PR_Lock(r->repl_lock); gen = (CSNGen *)object_get_data (r->repl_csngen); PR_ASSERT (gen); rc = csngen_adjust_time (gen, csn); - if (rc != CSN_SUCCESS) - { - rc = -1; - goto done; - } - - rc = 0; + /* rc will be either CSN_SUCCESS (0) or clock skew */ done: PR_Unlock(r->repl_lock); - if (csn) + if (csn != extracsn) /* do not free the given csn */ + { csn_free (&csn); + } return rc; } +int +replica_update_csngen_state (Replica *r, const RUV *ruv) +{ + return replica_update_csngen_state_ext(r, ruv, NULL); +} + /* * dumps replica state for debugging purpose */ diff --git a/ldap/servers/plugins/replication/repl_extop.c b/ldap/servers/plugins/replication/repl_extop.c index 0abc64171..0cdba7591 100644 --- a/ldap/servers/plugins/replication/repl_extop.c +++ b/ldap/servers/plugins/replication/repl_extop.c @@ -550,7 +550,6 @@ multimaster_extop_StartNSDS50ReplicationRequest(Slapi_PBlock *pb) Replica *replica = NULL; void *conn; consumer_connection_extension *connext = NULL; - CSN *mycsn = NULL; char *replicacsnstr = NULL; CSN *replicacsn = NULL; int zero = 0; @@ -703,55 +702,37 @@ multimaster_extop_StartNSDS50ReplicationRequest(Slapi_PBlock *pb) gen = object_get_data(gen_obj); if (NULL != gen) { - if (csngen_new_csn(gen, &mycsn, PR_FALSE /* notify */) == CSN_SUCCESS) + replicacsn = csn_new_by_string(replicacsnstr); + if (NULL != replicacsn) { - replicacsn = csn_new_by_string(replicacsnstr); - if (NULL != replicacsn) + /* ONREPL - we used to manage clock skew here. However, csn generator + code already does it. The csngen also manages local skew caused by + system clock reset, so to keep it consistent, I removed code from here */ + /* update the state of the csn generator */ + rc = replica_update_csngen_state_ext (replica, supplier_ruv, replicacsn); /* too much skew */ + if (rc == CSN_LIMIT_EXCEEDED) { - /* ONREPL - we used to manage clock skew here. However, csn generator - code already does it. The csngen also manages local skew caused by - system clock reset, so to keep it consistent, I removed code from here */ - time_t diff = 0L; - diff = csn_time_difference(mycsn, replicacsn); - if (diff > 0) - { - /* update the state of the csn generator */ - rc = csngen_adjust_time (gen, replicacsn); - if (rc == CSN_LIMIT_EXCEEDED) /* too much skew */ - { - response = NSDS50_REPL_EXCESSIVE_CLOCK_SKEW; - goto send_response; - } - } - else if (diff <= 0) - { - /* Supplier's clock is behind ours */ - /* XXXggood check if CSN smaller than purge point */ - /* response = NSDS50_REPL_BELOW_PURGEPOINT; */ - /* goto send_response; */ - } + response = NSDS50_REPL_EXCESSIVE_CLOCK_SKEW; + slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name, + "conn=%d op=%d repl=\"%s\": " + "Excessive clock skew from supplier RUV\n", + connid, opid, repl_root); + goto send_response; } - else + else if (rc != 0) { - /* Oops, csnstr couldn't be converted */ + /* Oops, problem csn or ruv format, or memory, or .... */ response = NSDS50_REPL_INTERNAL_ERROR; goto send_response; } + } else { - /* Oops, csn generator failed */ + /* Oops, csnstr couldn't be converted */ response = NSDS50_REPL_INTERNAL_ERROR; goto send_response; } - - /* update csn generator's state from the supplier's ruv */ - rc = replica_update_csngen_state (replica, supplier_ruv); /* too much skew */ - if (rc != 0) - { - response = NSDS50_REPL_EXCESSIVE_CLOCK_SKEW; - goto send_response; - } } else { @@ -988,11 +969,6 @@ send_response: { object_release(gen_obj); } - /* mycsn */ - if (NULL != mycsn) - { - csn_free(&mycsn); - } /* replicacsn */ if (NULL != replicacsn) { diff --git a/ldap/servers/plugins/replication/windows_inc_protocol.c b/ldap/servers/plugins/replication/windows_inc_protocol.c index adcc417b3..c02e6610b 100644 --- a/ldap/servers/plugins/replication/windows_inc_protocol.c +++ b/ldap/servers/plugins/replication/windows_inc_protocol.c @@ -796,13 +796,20 @@ windows_inc_run(Private_Repl_Protocol *prp) rc = replica_update_csngen_state (replica, ruv); object_release (prp->replica_object); replica = NULL; - if (rc != 0) /* too much skew */ + if (rc == CSN_LIMIT_EXCEEDED) /* too much skew */ { - slapi_log_error(SLAPI_LOG_FATAL, windows_repl_plugin_name, + slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name, "%s: Incremental protocol: fatal error - too much time skew between replicas!\n", agmt_get_long_name(prp->agmt)); next_state = STATE_STOP_FATAL_ERROR; } + else if (rc != 0) /* internal error */ + { + slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name, + "%s: Incremental protocol: fatal internal error updating the CSN generator!\n", + agmt_get_long_name(prp->agmt)); + next_state = STATE_STOP_FATAL_ERROR; + } else { rc = send_updates(prp, ruv, &num_changes_sent); diff --git a/ldap/servers/slapd/csngen.c b/ldap/servers/slapd/csngen.c index 7fcd379dd..137b995c0 100644 --- a/ldap/servers/slapd/csngen.c +++ b/ldap/servers/slapd/csngen.c @@ -60,6 +60,9 @@ #define STATE_FORMAT "%8x%8x%8x%4hx%4hx" #define STATE_LENGTH 32 #define MAX_VAL(x,y) ((x)>(y)?(x):(y)) +#define CSN_CALC_TSTAMP(gen) ((gen)->state.sampled_time + \ + (gen)->state.local_offset + \ + (gen)->state.remote_offset) /* * ************************************************************************** @@ -273,8 +276,7 @@ csngen_new_csn (CSNGen *gen, CSN **csn, PRBool notify) gen->state.seq_num = 0; } - (*csn)->tstamp = gen->state.sampled_time + gen->state.local_offset + - gen->state.remote_offset; + (*csn)->tstamp = CSN_CALC_TSTAMP(gen); (*csn)->seqnum = gen->state.seq_num ++; (*csn)->rid = gen->state.rid; (*csn)->subseqnum = 0; @@ -308,8 +310,9 @@ void csngen_abort_csn (CSNGen *gen, const CSN *csn) of time so that it does not generate smaller csns */ int csngen_adjust_time (CSNGen *gen, const CSN* csn) { - time_t remote_time, remote_offset; + time_t remote_time, remote_offset, cur_time; PRUint16 remote_seqnum; + int rc; if (gen == NULL || csn == NULL) return CSN_INVALID_PARAMETER; @@ -319,21 +322,38 @@ int csngen_adjust_time (CSNGen *gen, const CSN* csn) PR_RWLock_Wlock (gen->lock); - if (remote_seqnum > gen->state.seq_num ) - { - if (remote_seqnum < CSN_MAX_SEQNUM) - { - gen->state.seq_num = remote_seqnum + 1; - } - else - { - remote_time++; - } - } + /* make sure we have the current time */ + csngen_update_time(); + cur_time = g_sampled_time; + + /* make sure sampled_time is current */ + /* must only call adjust_local_time if the current time is greater than + the generator state time */ + if ((cur_time > gen->state.sampled_time) && + (CSN_SUCCESS != (rc = _csngen_adjust_local_time(gen, cur_time)))) + { + /* _csngen_adjust_local_time will log error */ + PR_RWLock_Unlock (gen->lock); + csngen_dump_state(gen); + return rc; + } - if (remote_time >= gen->state.sampled_time) + cur_time = CSN_CALC_TSTAMP(gen); + if (remote_time >= cur_time) { - remote_offset = remote_time - gen->state.sampled_time; + if (remote_seqnum > gen->state.seq_num ) + { + if (remote_seqnum < CSN_MAX_SEQNUM) + { + gen->state.seq_num = remote_seqnum + 1; + } + else + { + remote_time++; + } + } + + remote_offset = remote_time - cur_time; if (remote_offset > gen->state.remote_offset) { if (remote_offset <= CSN_MAX_TIME_ADJUST) @@ -346,10 +366,18 @@ int csngen_adjust_time (CSNGen *gen, const CSN* csn) "adjustment limit exceeded; value - %ld, limit - %ld\n", remote_offset, (long)CSN_MAX_TIME_ADJUST); PR_RWLock_Unlock (gen->lock); + csngen_dump_state(gen); return CSN_LIMIT_EXCEEDED; } } - } + } + else if (gen->state.remote_offset > 0) + { + /* decrease remote offset? */ + /* how to decrease remote offset but ensure that we don't + generate a duplicate CSN, or a CSN smaller than one we've already + generated? */ + } PR_RWLock_Unlock (gen->lock); @@ -576,7 +604,14 @@ _csngen_adjust_local_time (CSNGen *gen, time_t cur_time) { time_t time_diff = cur_time - gen->state.sampled_time; - if (time_diff > 0) + if (time_diff == 0) { + /* This is a no op - _csngen_adjust_local_time should never be called + in this case, because there is nothing to adjust - but just return + here to protect ourselves + */ + return CSN_SUCCESS; + } + else if (time_diff > 0) { gen->state.sampled_time = cur_time; if (time_diff > gen->state.local_offset) @@ -588,7 +623,7 @@ _csngen_adjust_local_time (CSNGen *gen, time_t cur_time) return CSN_SUCCESS; } - else /* time was turend back */ + else /* time was turned back */ { if (abs (time_diff) > CSN_MAX_TIME_ADJUST) {
0
50d92a4a495d3743397b967c52444bdfd7c687e3
389ds/389-ds-base
Bug(s) fixed: 175098 Bug Description: The dsgw cookie directory needs to be writable by the admin server uid Reviewed by: Nathan (Thanks!) Fix Description: DS Gateway authentication breaks because the admin server uid cannot write to the bin/slapd/authck directory. This fix makes sure that directory is owned by the correct uid. I've also put a similar fix into the ds spec file %post section to fix this when upgrading from fds10 to fds101. Platforms tested: Fedora Core 4 Flag Day: no Doc impact: no
commit 50d92a4a495d3743397b967c52444bdfd7c687e3 Author: Rich Megginson <[email protected]> Date: Wed Dec 7 21:29:14 2005 +0000 Bug(s) fixed: 175098 Bug Description: The dsgw cookie directory needs to be writable by the admin server uid Reviewed by: Nathan (Thanks!) Fix Description: DS Gateway authentication breaks because the admin server uid cannot write to the bin/slapd/authck directory. This fix makes sure that directory is owned by the correct uid. I've also put a similar fix into the ds spec file %post section to fix this when upgrading from fds10 to fds101. Platforms tested: Fedora Core 4 Flag Day: no Doc impact: no diff --git a/ldap/admin/src/create_instance.c b/ldap/admin/src/create_instance.c index 08136ccf5..2e68f753d 100644 --- a/ldap/admin/src/create_instance.c +++ b/ldap/admin/src/create_instance.c @@ -1921,6 +1921,7 @@ char *ds_cre_subdirs(char *sroot, server_config_s *cf, char *cs_path, PR_snprintf(subdir, sizeof(subdir), "%s%cbin%cslapd%cauthck", sroot, FILE_PATHSEP, FILE_PATHSEP, FILE_PATHSEP); if( (t = create_instance_mkdir_p(subdir, NEWDIR_MODE)) ) return(t); + chownfile (pw, subdir); return (t); } diff --git a/ldap/cm/newinst/ns-update b/ldap/cm/newinst/ns-update index 18b579de2..16f92f387 100755 --- a/ldap/cm/newinst/ns-update +++ b/ldap/cm/newinst/ns-update @@ -200,4 +200,13 @@ if [ -f fix_secmod_db_64 ]; then ./fix_secmod_db_64 $sroot/alias $sroot/shared32/bin fi +# chown the cookie directory - bug 175098 +if [ "$ssuser" ] ; then + if [ "$ssgrp" ] ; then + chown $ssuser:$ssgrp $sroot/bin/slapd/authck + else + chown $ssuser $sroot/bin/slapd/authck + fi +fi + exit $rc diff --git a/ldapserver.spec.tmpl b/ldapserver.spec.tmpl index f6d040986..3ab788d13 100644 --- a/ldapserver.spec.tmpl +++ b/ldapserver.spec.tmpl @@ -44,8 +44,8 @@ %define __os_install_post %{nil} Summary: @COMPANY-PRODUCT-NAME@ Name: @LCASE-COMPANY-NAME-NOSP@-ds -Version: @GEN-VERSION@ -Release: 2.@PLATFORM@ +Version: @NOSP-VERSION@ +Release: 1.@PLATFORM@ License: GPL plus extensions Group: System Environment/Daemons URL: @COMPANY-URL@ @@ -78,6 +78,8 @@ Prefix: /opt/%{name} # the echo yes is for dsktune to continue # the second echo yes is for some platforms that need it (echo yes ; echo yes) | ./setup -b $RPM_BUILD_ROOT/%{prefix} +# remove the setup log files so they aren't packaged +rm -f $RPM_BUILD_ROOT/%{prefix}/setup/*/*.log %clean if [ -z "$RPM_INSTALL_PREFIX" ]; then @@ -120,7 +122,37 @@ echo "" if [ -z "$RPM_INSTALL_PREFIX" ]; then RPM_INSTALL_PREFIX=%{prefix} fi -echo "Install finished. Please run $RPM_INSTALL_PREFIX/setup/setup to set up the servers." +if [ "$1" -ge 1 ] ; then +# patch file to upgrade admin server from 1.0 to 1.0.1 + if [ -f $RPM_INSTALL_PREFIX/setup/adminserver10to101.patch ] ; then + patch -d $RPM_INSTALL_PREFIX -p0 < $RPM_INSTALL_PREFIX/setup/adminserver10to101.patch + fi +# patch file to fix start-admin on Fedora Core 2 + if [ -f $RPM_INSTALL_PREFIX/setup/adminserver-start-admin.patch ] ; then + patch -d $RPM_INSTALL_PREFIX -p0 < $RPM_INSTALL_PREFIX/setup/adminserver-start-admin.patch + fi +# fix up file permissions + testfile=$RPM_INSTALL_PREFIX/admin-serv/config/nss.conf + if [ ! -f $testfile ] ; then + testfile=$RPM_INSTALL_PREFIX/admin-serv/config/adm.conf + fi + if [ -f $testfile ] ; then + usergroup=`ls -l $testfile | awk '{print $3":"$4}'` + if [ -d $RPM_INSTALL_PREFIX/admin-serv/config ] ; then + chown $usergroup $RPM_INSTALL_PREFIX/admin-serv/config + fi + if [ -d $RPM_INSTALL_PREFIX/bin/slapd/authck ] ; then + chown $usergroup $RPM_INSTALL_PREFIX/bin/slapd/authck + fi + if [ -d $RPM_INSTALL_PREFIX/alias ] ; then + chown $usergroup $RPM_INSTALL_PREFIX/alias + fi + fi + echo "Install finished. Please restart your directory servers first," + echo "then the admin server. Do not run setup." +else + echo "Install finished. Please run $RPM_INSTALL_PREFIX/setup/setup to set up the servers." +fi %preun # only run uninstall if this is the last version of the package @@ -133,6 +165,11 @@ if [ "$1" = 0 ] ; then fi %changelog +* Tue Dec 6 2005 Rich Megginson <[email protected]> - 1.0.1-1 +- Use nosp version instead of gen version to get patch version numbers +- Patch the admin server in the post install section +- Remove the unnecessary log files after setup so they aren't packaged + * Wed Nov 09 2005 Nathan Kinder <[email protected]> 7.1-2 - Changed cyrus-sasl dependency to >= 2.1.15 for RHEL3 compatibility
0
4b53c3122bf2720941cf49c9be2a31808405399a
389ds/389-ds-base
Issue: 50082 - Port state test suite Description: Port state test suite issue: https://pagure.io/389-ds-base/issue/50082 Reviewed by: Mark Reynolds
commit 4b53c3122bf2720941cf49c9be2a31808405399a Author: Anuj Borah <[email protected]> Date: Wed Dec 12 11:11:46 2018 +0530 Issue: 50082 - Port state test suite Description: Port state test suite issue: https://pagure.io/389-ds-base/issue/50082 Reviewed by: Mark Reynolds diff --git a/dirsrvtests/tests/suites/stat/__init__.py b/dirsrvtests/tests/suites/stat/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/dirsrvtests/tests/suites/stat/mmt_state_test.py b/dirsrvtests/tests/suites/stat/mmt_state_test.py new file mode 100644 index 000000000..22273891c --- /dev/null +++ b/dirsrvtests/tests/suites/stat/mmt_state_test.py @@ -0,0 +1,354 @@ +import os +import logging +import ldap +import pytest +from lib389.idm.user import UserAccounts +from lib389.topologies import topology_m2 as topo +from lib389._constants import * + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + +BINVALUE1 = 'thedeadbeef1' +BINVALUE2 = 'thedeadbeef2' +BINVALUE3 = 'thedeadbeef3' + +USER_PROPERTIES = { + 'uid': 'state1usr', + 'cn': 'state1usr', + 'sn': 'state1usr', + 'uidNumber': '1001', + 'gidNumber': '2001', + 'userpassword': PASSWORD, + 'homeDirectory': '/home/testuser' +} + + +def _check_user_oper_attrs(topo, tuser, attr_name, attr_value, oper_type, exp_values, oper_attr): + """Check if list of operational attributes present for a given entry""" + + log.info('Checking if operational attrs vucsn, adcsn and vdcsn present for: {}'.format(tuser)) + entry = topo.ms["master1"].search_s(tuser.dn, ldap.SCOPE_BASE, 'objectclass=*',['nscpentrywsi']) + if oper_attr: + for line in str(entry).split('\n'): + if attr_name + ';' in line: + if not 'DELETE' in oper_type: + assert any(attr in line for attr in exp_values) and oper_attr in line + else: + assert 'deleted' in line and oper_attr in line and attr_value in line + + [email protected]("attr_name, attr_value, oper_type, exp_values, oper_attr", + [('description', 'Test1usr1', 'ldap.MOD_ADD', ['Test1usr1'], 'vucsn'), + ('description', 'Test1usr2', 'ldap.MOD_ADD', ['Test1usr1', + 'Test1usr2'], 'vucsn'), + ('description', 'Test1usr3', 'ldap.MOD_ADD', + ['Test1usr1', 'Test1usr2', 'Test1usr3'], 'vucsn'), + ('description', 'Test1usr4', 'ldap.MOD_REPLACE', ['Test1usr4'], + 'adcsn'), + ('description', 'Test1usr4', 'ldap.MOD_DELETE', [], 'vdcsn')]) +def test_check_desc_attr_state(topo, attr_name, attr_value, oper_type, exp_values, oper_attr): + """Modify user's description attribute and check if description attribute is + added/modified/deleted and operational attributes vucsn, adcsn and vdcsn are present. + + :id: f0830538-02cf-11e9-8be0-8c16451d917b + :setup: Replication with two masters. + :steps: 1. Add user to Master1 without description attribute. + 2. Add description attribute to user. + 3. Check if only one description attribute exist. + 4. Check if operational attribute vucsn exist. + 5. Add second description attribute to user. + 6. Check if two description attributes exist. + 7. Check if operational attribute vucsn exist. + 8. Add third description attribute to user. + 9. Check if three description attributes exist. + 10. Check if operational attribute vucsn exist. + 11. Replace description attribute for the user. + 12. Check if only one description attribute exist. + 13. Check if operational attribute adcsn exist. + 14. Delete description attribute for the user. + 15. Check if no description attribute exist. + 16. Check if no operational attribute vdcsn exist. + :expectedresults: + 1. Add user to M1 should PASS. + 2. Adding description attribute should PASS + 3. Only one description attribute should be present. + 4. Vucsn attribute should be present. + 5. Adding a new description attribute should PASS + 6. Two description attribute should be present. + 7. Vucsn attribute should be present. + 8. Adding a new description attribute should PASS + 9. Three description attribute should be present. + 10. Vucsn attribute should be present. + 11. Replacing new description attribute should PASS + 12. Only one description attribute should be present. + 13. Adcsn attribute should be present. + 14. Deleting description attribute should PASS + 15. No description attribute should be present. + 16. Vdcsn attribute should be present. + """ + + test_entry = 'state1test' + log.info('Add user: {}'.format(test_entry)) + users = UserAccounts(topo.ms['master1'], DEFAULT_SUFFIX) + try: + tuser = users.get(test_entry) + except ldap.NO_SUCH_OBJECT: + USER_PROPERTIES.update(dict.fromkeys(['uid', 'cn'], test_entry)) + tuser = users.create(properties=USER_PROPERTIES) + tuser.set(attr_name, attr_value, eval(oper_type)) + log.info('Check if list of description attrs present for: {}'.format(test_entry)) + assert sorted([i.decode() for i in tuser.get_attr_vals(attr_name)]) == sorted(exp_values) + + log.info('Checking for operational attributes') + _check_user_oper_attrs(topo, tuser, attr_name, attr_value, oper_type, exp_values, oper_attr) + + [email protected]("attr_name, attr_value, oper_type, exp_values, oper_attr", + [('cn', 'TestCN1', 'ldap.MOD_ADD', ['TestCN1', 'TestCNusr1'], 'vucsn'), + ('cn', 'TestCN2', 'ldap.MOD_ADD', ['TestCN1', + 'TestCN2', 'TestCNusr1'], 'vucsn'), + ('cn', 'TestnewCN3', 'ldap.MOD_REPLACE', ['TestnewCN3'], 'adcsn'), + ('cn', 'TestnewCN3', 'ldap.MOD_DELETE', None, None)]) +def test_check_cn_attr_state(topo, attr_name, attr_value, oper_type, exp_values, oper_attr): + """Modify user's cn attribute and check if cn attribute is added/modified/deleted and + operational attributes vucsn, adcsn and vdcsn are present. + + :id: 19614bae-02d0-11e9-a295-8c16451d917b + :setup: Replication with two masters. + :steps: 1. Add user to Master1 with cn attribute. + 2. Add a new cn attribute to user. + 3. Check if two cn attributes exist. + 4. Check if operational attribute vucsn exist for each cn attribute. + 5. Add a new cn attribute to user. + 6. Check if three cn attributes exist. + 7. Check if operational attribute vucsn exist for each cn attribute. + 8. Replace cn attribute for the user. + 9. Check if only one cn attribute exist. + 10. Check if operational attribute adcsn exist. + 11. Delete cn attribute from user and check if it fails. + :expectedresults: + 1. Add user to M1 should PASS. + 2. Adding a new cn attribute should PASS + 3. Two cn attribute should be present. + 4. Vucsn attribute should be present. + 5. Adding a new cn attribute should PASS + 6. Three cn attribute should be present. + 7. Vucsn attribute should be present. + 8. Replacing new cn attribute should PASS + 9. Only one cn attribute should be present. + 10. Operational attribute adcsn should be present. + 11. Deleting cn attribute should fail with ObjectClass violation error. + """ + + test_entry = 'TestCNusr1' + log.info('Add user: {}'.format(test_entry)) + users = UserAccounts(topo.ms['master1'], DEFAULT_SUFFIX) + try: + tuser = users.get(test_entry) + except ldap.NO_SUCH_OBJECT: + USER_PROPERTIES.update(dict.fromkeys(['uid', 'cn'], test_entry)) + tuser = users.create(properties=USER_PROPERTIES) + + if 'MOD_DELETE' in oper_type: + with pytest.raises(ldap.OBJECT_CLASS_VIOLATION): + tuser.set(attr_name, attr_value, eval(oper_type)) + else: + tuser.set(attr_name, attr_value, eval(oper_type)) + log.info('Check if list of cn attrs present for: {}'.format(test_entry)) + assert sorted([i.decode() for i in tuser.get_attr_vals(attr_name)]) == sorted(exp_values) + log.info('Checking for operational attributes') + _check_user_oper_attrs(topo, tuser, attr_name, attr_value, oper_type, exp_values, oper_attr) + + [email protected]("attr_name, attr_value, oper_type, exp_values, oper_attr", + [('preferredlanguage', 'Chinese', 'ldap.MOD_REPLACE', ['Chinese'], + 'vucsn'), + ('preferredlanguage', 'French', 'ldap.MOD_ADD', None, None), + ('preferredlanguage', 'German', 'ldap.MOD_REPLACE', ['German'], 'adcsn'), + ('preferredlanguage', 'German', 'ldap.MOD_DELETE', [], 'vdcsn')]) +def test_check_single_value_attr_state(topo, attr_name, attr_value, oper_type, + exp_values, oper_attr): + """Modify user's preferredlanguage attribute and check if preferredlanguage attribute is + added/modified/deleted and operational attributes vucsn, adcsn and vdcsn are present. + + :id: 22fd645e-02d0-11e9-a9e4-8c16451d917b + :setup: Replication with two masters. + :steps: 1. Add user to Master1 without preferredlanguage attribute. + 2. Add a new preferredlanguage attribute to user. + 3. Check if one preferredlanguage attributes exist. + 4. Check if operational attribute vucsn exist. + 5. Add a new preferredlanguage attribute for the user and check if its rejected. + 6. Replace preferredlanguage attribute for the user. + 7. Check if only one preferredlanguage attribute exist. + 8. Check if operational attribute adcsn exist with preferredlanguage. + :expectedresults: + 1. Add user to M1 should PASS. + 2. Adding a new preferredlanguage attribute should PASS + 3. Only one preferredlanguage attribute should be present. + 4. Vucsn attribute should be present. + 5. Adding a new preferredlanguage should fail with ObjectClass violation error. + 6. Replace preferredlanguage should PASS. + 7. Only one preferredlanguage attribute should be present. + 8. Operational attribute adcsn should be present with preferredlanguage. + """ + + test_entry = 'Langusr1' + log.info('Add user: {}'.format(test_entry)) + users = UserAccounts(topo.ms['master1'], DEFAULT_SUFFIX) + try: + tuser = users.get(test_entry) + except ldap.NO_SUCH_OBJECT: + USER_PROPERTIES.update(dict.fromkeys(['uid', 'cn'], test_entry)) + tuser = users.create(properties=USER_PROPERTIES) + + if 'MOD_ADD' in oper_type: + with pytest.raises(ldap.OBJECT_CLASS_VIOLATION): + tuser.set(attr_name, attr_value, eval(oper_type)) + else: + tuser.set(attr_name, attr_value, eval(oper_type)) + log.info('Check if list of cn attrs present for: {}'.format(test_entry)) + assert sorted([i.decode() for i in tuser.get_attr_vals(attr_name)]) == sorted(exp_values) + log.info('Checking for operational attributes') + _check_user_oper_attrs(topo, tuser, attr_name, attr_value, oper_type, exp_values, oper_attr) + + [email protected]("attr_name, attr_value, oper_type, exp_values, oper_attr", + [('roomnumber;office', 'Tower1', 'ldap.MOD_ADD', ['Tower1'], 'vucsn'), + ('roomnumber;office', 'Tower2', 'ldap.MOD_ADD', ['Tower1', 'Tower2'], + 'vucsn'), + ('roomnumber;office', 'Tower3', 'ldap.MOD_ADD', ['Tower1', 'Tower2', + 'Tower3'], 'vucsn'), + ('roomnumber;office', 'Tower4', 'ldap.MOD_REPLACE', ['Tower4'], 'adcsn'), + ('roomnumber;office', 'Tower4', 'ldap.MOD_DELETE', [], 'vucsn')]) +def test_check_subtype_attr_state(topo, attr_name, attr_value, oper_type, exp_values, oper_attr): + """Modify user's roomnumber;office attribute subtype and check if roomnumber;office attribute + is added/modified/deleted and operational attributes vucsn, adcsn and vdcsn are present. + + :id: 29ab87a4-02d0-11e9-b104-8c16451d917b + :setup: Replication with two masters. + :steps: 1. Add user to Master1 without roomnumber;office attribute. + 2. Add roomnumber;office attribute to user. + 3. Check if only one roomnumber;office attribute exist. + 4. Check if operational attribute vucsn exist. + 5. Add second roomnumber;office attribute to user. + 6. Check if two roomnumber;office attributes exist. + 7. Check if operational attribute vucsn exist. + 8. Add third roomnumber;office attribute to user. + 9. Check if three roomnumber;office attributes exist. + 10. Check if operational attribute vucsn exist. + 11. Replace roomnumber;office attribute for the user. + 12. Check if only one roomnumber;office attribute exist. + 13. Check if operational attribute adcsn exist. + 14. Delete roomnumber;office attribute for the user. + 15. Check if no roomnumber;office attribute exist. + 16. Check if no operational attribute vdcsn exist. + :expectedresults: + 1. Add user to M1 should PASS. + 2. Adding roomnumber;office attribute should PASS + 3. Only one roomnumber;office attribute should be present. + 4. Vucsn attribute should be present. + 5. Adding a new roomnumber;office attribute should PASS + 6. Two roomnumber;office attribute should be present. + 7. Vucsn attribute should be present. + 8. Adding a new roomnumber;office attribute should PASS + 9. Three roomnumber;office attribute should be present. + 10. Vucsn attribute should be present. + 11. Replacing new roomnumber;office attribute should PASS + 12. Only one roomnumber;office attribute should be present. + 13. Adcsn attribute should be present. + 14. Deleting roomnumber;office attribute should PASS + 15. No roomnumber;office attribute should be present. + 16. Vdcsn attribute should be present. + """ + + test_entry = 'roomoffice1usr' + log.info('Add user: {}'.format(test_entry)) + users = UserAccounts(topo.ms['master1'], DEFAULT_SUFFIX) + try: + tuser = users.get(test_entry) + except ldap.NO_SUCH_OBJECT: + USER_PROPERTIES.update(dict.fromkeys(['uid', 'cn'], test_entry)) + tuser = users.create(properties=USER_PROPERTIES) + + tuser.set(attr_name, attr_value, eval(oper_type)) + log.info('Check if list of roomnumber;office attributes are present for a given entry') + assert sorted([i.decode() for i in tuser.get_attr_vals(attr_name)]) == sorted(exp_values) + log.info('Checking if operational attributes are present for cn') + _check_user_oper_attrs(topo, tuser, attr_name, attr_value, oper_type, exp_values, oper_attr) + + [email protected]("attr_name, attr_value, oper_type, exp_values, oper_attr", + [('jpegphoto', BINVALUE1, 'ldap.MOD_ADD', [BINVALUE1], 'vucsn'), + ('jpegphoto', BINVALUE2, 'ldap.MOD_ADD', [BINVALUE1, BINVALUE2], + 'vucsn'), + ('jpegphoto', BINVALUE3, 'ldap.MOD_ADD', [BINVALUE1, BINVALUE2, + BINVALUE3], 'vucsn'), + ('jpegphoto', BINVALUE2, 'ldap.MOD_REPLACE', [BINVALUE2], 'adcsn'), + ('jpegphoto', BINVALUE2, 'ldap.MOD_DELETE', [], 'vdcsn')]) +def test_check_jpeg_attr_state(topo, attr_name, attr_value, oper_type, exp_values, oper_attr): + """Modify user's jpegphoto attribute and check if jpegphoto attribute is added/modified/deleted + and operational attributes vucsn, adcsn and vdcsn are present. + + :id: 312ac0d0-02d0-11e9-9d34-8c16451d917b + :setup: Replication with two masters. + :steps: 1. Add user to Master1 without jpegphoto attribute. + 2. Add jpegphoto attribute to user. + 3. Check if only one jpegphoto attribute exist. + 4. Check if operational attribute vucsn exist. + 5. Add second jpegphoto attribute to user. + 6. Check if two jpegphoto attributes exist. + 7. Check if operational attribute vucsn exist. + 8. Add third jpegphoto attribute to user. + 9. Check if three jpegphoto attributes exist. + 10. Check if operational attribute vucsn exist. + 11. Replace jpegphoto attribute for the user. + 12. Check if only one jpegphoto attribute exist. + 13. Check if operational attribute adcsn exist. + 14. Delete jpegphoto attribute for the user. + 15. Check if no jpegphoto attribute exist. + 16. Check if no operational attribute vdcsn exist. + :expectedresults: + 1. Add user to M1 should PASS. + 2. Adding jpegphoto attribute should PASS + 3. Only one jpegphoto attribute should be present. + 4. Vucsn attribute should be present. + 5. Adding a new jpegphoto attribute should PASS + 6. Two jpegphoto attribute should be present. + 7. Vucsn attribute should be present. + 8. Adding a new jpegphoto attribute should PASS + 9. Three jpegphoto attribute should be present. + 10. Vucsn attribute should be present. + 11. Replacing new jpegphoto attribute should PASS + 12. Only one jpegphoto attribute should be present. + 13. Adcsn attribute should be present. + 14. Deleting jpegphoto attribute should PASS + 15. No jpegphoto attribute should be present. + 16. Vdcsn attribute should be present. + """ + + test_entry = 'testJpeg1usr' + log.info('Add user: {}'.format(test_entry)) + users = UserAccounts(topo.ms['master1'], DEFAULT_SUFFIX) + try: + tuser = users.get(test_entry) + except ldap.NO_SUCH_OBJECT: + USER_PROPERTIES.update(dict.fromkeys(['uid', 'cn'], test_entry)) + tuser = users.create(properties=USER_PROPERTIES) + + tuser.set(attr_name, attr_value, eval(oper_type)) + log.info('Check if list of jpeg attributes are present for a given entry') + assert sorted([i.decode() for i in tuser.get_attr_vals(attr_name)]) == sorted(exp_values) + log.info('Checking if operational attributes are present for cn') + _check_user_oper_attrs(topo, tuser, attr_name, attr_value, oper_type, exp_values, oper_attr) + + +if __name__ == "__main__": + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s -v %s" % CURRENT_FILE) \ No newline at end of file
0
ef4052a32116494d3ce3775da8401adb799674a5
389ds/389-ds-base
Issue 4815 - RFE - Add Replication Log Analysis Tool with CLI Support (#6466) Description: Add a new ReplicationLogAnalyzer class and supporting infrastructure to analyze replication performance across multiple servers. The tool parses replication logs, tracks CSNs, calculates lag times, and generates comprehensive reports in interactive HTML, static PNG and CSV formats. Based on: https://github.com/droideck/ansible-ds389-repl-monitoring Fixes: https://github.com/389ds/389-ds-base/issues/6465 Reviewed by: @progier389, @tbordaz, @vashirov (Thanks!!!)
commit ef4052a32116494d3ce3775da8401adb799674a5 Author: Simon Pichugin <[email protected]> Date: Tue Jan 28 19:40:00 2025 -0500 Issue 4815 - RFE - Add Replication Log Analysis Tool with CLI Support (#6466) Description: Add a new ReplicationLogAnalyzer class and supporting infrastructure to analyze replication performance across multiple servers. The tool parses replication logs, tracks CSNs, calculates lag times, and generates comprehensive reports in interactive HTML, static PNG and CSV formats. Based on: https://github.com/droideck/ansible-ds389-repl-monitoring Fixes: https://github.com/389ds/389-ds-base/issues/6465 Reviewed by: @progier389, @tbordaz, @vashirov (Thanks!!!) diff --git a/dirsrvtests/tests/suites/replication/repl_log_monitoring_test.py b/dirsrvtests/tests/suites/replication/repl_log_monitoring_test.py new file mode 100644 index 000000000..b9d151540 --- /dev/null +++ b/dirsrvtests/tests/suites/replication/repl_log_monitoring_test.py @@ -0,0 +1,519 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2025 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import time +import shutil +import json +import pytest +import logging +import tempfile +from datetime import datetime, timezone + +from lib389.tasks import * +from lib389.utils import * +from lib389.backend import Backends +from lib389.topologies import topology_m4 as topo_m4 +from lib389.idm.user import UserAccount +from lib389.replica import ReplicationManager +from lib389.repltools import ReplicationLogAnalyzer +from lib389._constants import * + +pytestmark = pytest.mark.tier0 + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + + +def _generate_test_data(supplier, suffix, count, user_prefix="test_user"): + """Generate test users and modifications""" + test_users = [] + for i in range(count): + user_dn = f'uid={user_prefix}_{i},{suffix}' + test_user = UserAccount(supplier, user_dn) + test_user.create(properties={ + 'uid': f'{user_prefix}_{i}', + 'cn': f'Test User {i}', + 'sn': f'User{i}', + 'userPassword': 'password', + 'uidNumber': str(1000 + i), + 'gidNumber': '2000', + 'homeDirectory': f'/home/{user_prefix}_{i}' + }) + + # Generate modifications + for j in range(3): + test_user.add('description', f'Description {j}') + test_user.replace('cn', f'Modified User {test_user.get_attr_val("uid")}') + for j in range(3): + try: + test_user.remove('description', f'Description {j}') + except Exception: + pass + + test_users.append(test_user) + + return test_users + + +def _cleanup_test_data(test_users, tmp_dir): + """Clean up test users and temporary directory""" + for user in test_users: + try: + if user.exists(): + user.delete() + except Exception as e: + log.warning(f"Error cleaning up test user: {e}") + + try: + shutil.rmtree(tmp_dir, ignore_errors=True) + except Exception as e: + log.error(f"Error cleaning up temporary directory: {e}") + + +def _cleanup_multi_suffix_test(test_users_by_suffix, tmp_dir, suppliers, extra_suffixes): + """Clean up multi-suffix test data""" + for users in test_users_by_suffix.values(): + for user in users: + try: + if user.exists(): + user.delete() + except Exception as e: + log.warning(f"Error cleaning up test user: {e}") + + # Remove extra backends + for suffix in extra_suffixes: + for supplier in suppliers: + try: + backends = Backends(supplier) + backends.get(suffix).delete() + except Exception as e: + log.warning(f"Error removing backend for {suffix}: {e}") + + try: + shutil.rmtree(tmp_dir, ignore_errors=True) + except Exception as e: + log.error(f"Error cleaning up temporary directory: {e}") + + +def test_replication_log_monitoring_basic(topo_m4): + """Test basic replication log monitoring functionality + + :id: e62ed58b-1acd-4e7d-9cfd-948ded4cede8 + :setup: Four suppliers replication setup + :steps: + 1. Create test data with known replication patterns + 2. Configure log monitoring with basic options + 3. Generate and verify reports + 4. Validate report contents + :expectedresults: + 1. Test data should be properly replicated + 2. Reports should be generated successfully + 3. Report contents should match expected patterns + 4. Reports should contain expected data and statistics + """ + tmp_dir = tempfile.mkdtemp(prefix='repl_analysis_') + test_users = [] + suppliers = [topo_m4.ms[f"supplier{i}"] for i in range(1, 5)] + + try: + # Clear logs and restart servers + for supplier in suppliers: + supplier.deleteAccessLogs(restart=True) + + # Generate test data with known patterns + log.info('Creating test data...') + test_users = _generate_test_data(suppliers[0], DEFAULT_SUFFIX, 10) + + # Wait for replication + repl = ReplicationManager(DEFAULT_SUFFIX) + for s1 in suppliers: + for s2 in suppliers: + if s1 != s2: + repl.wait_for_replication(s1, s2) + + # Restart to flush logs + for supplier in suppliers: + supplier.restart() + + # Configure monitoring + log_dirs = [s.ds_paths.log_dir for s in suppliers] + repl_monitor = ReplicationLogAnalyzer( + log_dirs=log_dirs, + suffixes=[DEFAULT_SUFFIX], + anonymous=False, + only_fully_replicated=True + ) + + # Parse logs and generate reports + repl_monitor.parse_logs() + generated_files = repl_monitor.generate_report( + output_dir=tmp_dir, + formats=['csv', 'html', 'json'], + report_name='basic_test' + ) + + # Verify report files exist and have content + for fmt in ['csv', 'html', 'summary']: + assert os.path.exists(generated_files[fmt]) + assert os.path.getsize(generated_files[fmt]) > 0 + + # Verify CSV content + with open(generated_files['csv'], 'r') as f: + csv_content = f.read() + # Verify headers + assert 'Timestamp,Server,CSN,Suffix' in csv_content + # Verify all servers present + for supplier in suppliers: + assert supplier.serverid in csv_content + # Verify suffix + assert DEFAULT_SUFFIX in csv_content + + # Verify JSON summary + with open(generated_files['summary'], 'r') as f: + summary = json.load(f) + assert 'analysis_summary' in summary + stats = summary['analysis_summary'] + + # Verify basic stats + assert stats['total_servers'] == len(suppliers) + assert stats['total_updates'] > 0 + assert stats['updates_by_suffix'][DEFAULT_SUFFIX] > 0 + assert 'average_lag' in stats + assert 'maximum_lag' in stats + + finally: + _cleanup_test_data(test_users, tmp_dir) + + +def test_replication_log_monitoring_advanced(topo_m4): + """Test advanced replication monitoring features + + :id: 5bb8fd9f-c3ed-4118-a2f9-fd5d733230c7 + :setup: Four suppliers replication setup + :steps: + 1. Test filtering options + 2. Test time range filtering + 3. Test anonymization + 4. Verify lag calculations + :expectedresults: + 1. Filtering should work as expected + 2. Time range filtering should limit results + 3. Anonymization should hide server names + 4. Lag calculations should be accurate + """ + tmp_dir = tempfile.mkdtemp(prefix='repl_analysis_') + test_users = [] + suppliers = [topo_m4.ms[f"supplier{i}"] for i in range(1, 5)] + + try: + # Clear logs and restart servers + for supplier in suppliers: + supplier.deleteAccessLogs(restart=True) + + # Generate test data + start_time = datetime.now(timezone.utc) + test_users = _generate_test_data(suppliers[0], DEFAULT_SUFFIX, 20) + + # Force some lag by delaying operations + time.sleep(2) + for user in test_users[10:]: + user.replace('description', 'Modified after delay') + + # Wait for replication + repl = ReplicationManager(DEFAULT_SUFFIX) + for s1 in suppliers: + for s2 in suppliers: + if s1 != s2: + repl.wait_for_replication(s1, s2) + + end_time = datetime.now(timezone.utc) + + # Restart to flush logs + for supplier in suppliers: + supplier.restart() + + log_dirs = [s.ds_paths.log_dir for s in suppliers] + + # Test 1: Lag time filtering + repl_monitor = ReplicationLogAnalyzer( + log_dirs=log_dirs, + suffixes=[DEFAULT_SUFFIX], + lag_time_lowest=1.0 + ) + repl_monitor.parse_logs() + results1 = repl_monitor.build_result() + + # Verify lag filtering: + # Only consider dict values, skip the special "__hop_lags__" (if present) + for csn, server_map in results1['lag'].items(): + t_list = [ + record['logtime'] + for key, record in server_map.items() + if isinstance(record, dict) and key != '__hop_lags__' + ] + if not t_list: + # If no normal records exist, just skip + continue + + lag_time = max(t_list) - min(t_list) + # Must be strictly > 1.0 + assert lag_time > 1.0, f"Expected lag_time > 1.0, got {lag_time}" + + # Test 2: Time range filtering + repl_monitor = ReplicationLogAnalyzer( + log_dirs=log_dirs, + suffixes=[DEFAULT_SUFFIX], + time_range={'start': start_time, 'end': end_time} + ) + repl_monitor.parse_logs() + results2 = repl_monitor.build_result() + + # Verify the 'start-time' in results is within or after our start_time + utc_start_time = datetime.fromtimestamp(results2['utc-start-time'], timezone.utc) + assert utc_start_time >= start_time, ( + f"Expected start time >= {start_time}, got {utc_start_time}" + ) + + # Test 3: Anonymization + repl_monitor = ReplicationLogAnalyzer( + log_dirs=log_dirs, + suffixes=[DEFAULT_SUFFIX], + anonymous=True + ) + repl_monitor.parse_logs() + generated_files = repl_monitor.generate_report( + output_dir=tmp_dir, + formats=['csv'], + report_name='anon_test' + ) + + # Verify anonymization + with open(generated_files['csv'], 'r') as f: + content = f.read() + for supplier in suppliers: + # Original supplier.serverid should NOT appear + assert supplier.serverid not in content, ( + f"Found real server name {supplier.serverid} in CSV" + ) + # Instead, placeholders like 'server_0' should exist + assert 'server_0' in content, "Expected 'server_0' placeholder not found in CSV" + + finally: + _cleanup_test_data(test_users, tmp_dir) + + +def test_replication_log_monitoring_multi_suffix(topo_m4): + """Test multi-suffix replication monitoring + + :id: 6ef38c42-4961-476f-9e72-488d99211b8b + :setup: Four suppliers replication setup + :steps: + 1. Create multiple suffixes with different replication patterns + 2. Generate reports for all suffixes + 3. Verify suffix-specific statistics + :expectedresults: + 1. All suffixes should be monitored + 2. Reports should show correct per-suffix data + 3. Statistics should be accurate for each suffix + """ + tmp_dir = tempfile.mkdtemp(prefix='multi_suffix_repl_') + SUFFIX_2 = "dc=test2" + SUFFIX_3 = "dc=test3" + all_suffixes = [DEFAULT_SUFFIX, SUFFIX_2, SUFFIX_3] + test_users_by_suffix = {suffix: [] for suffix in all_suffixes} + suppliers = [topo_m4.ms[f"supplier{i}"] for i in range(1, 5)] + + try: + # Clear logs and restart servers + for supplier in suppliers: + supplier.deleteAccessLogs(restart=True) + + # Setup additional suffixes + for suffix in [SUFFIX_2, SUFFIX_3]: + repl = ReplicationManager(suffix) + for supplier in suppliers: + props = { + 'cn': f'userRoot_{suffix.split(",")[0][3:]}', + 'nsslapd-suffix': suffix + } + backends = Backends(supplier) + be = backends.create(properties=props) + be.create_sample_entries('001004002') + + if supplier == suppliers[0]: + repl.create_first_supplier(supplier) + else: + repl.join_supplier(suppliers[0], supplier) + + # Create full mesh + for suffix in all_suffixes: + repl = ReplicationManager(suffix) + for i, s1 in enumerate(suppliers): + for s2 in suppliers[i+1:]: + repl.ensure_agreement(s1, s2) + repl.ensure_agreement(s2, s1) + + # Generate different amounts of test data per suffix + test_users_by_suffix[DEFAULT_SUFFIX] = _generate_test_data( + suppliers[0], DEFAULT_SUFFIX, 10 + ) + test_users_by_suffix[SUFFIX_2] = _generate_test_data( + suppliers[0], SUFFIX_2, 5, user_prefix="test2_user" + ) + test_users_by_suffix[SUFFIX_3] = _generate_test_data( + suppliers[0], SUFFIX_3, 15, user_prefix="test3_user" + ) + + # Wait for replication + for suffix in all_suffixes: + repl = ReplicationManager(suffix) + for s1 in suppliers: + for s2 in suppliers: + if s1 != s2: + repl.wait_for_replication(s1, s2) + + # Restart to flush logs + for supplier in suppliers: + supplier.restart() + + # Monitor all suffixes + log_dirs = [s.ds_paths.log_dir for s in suppliers] + repl_monitor = ReplicationLogAnalyzer( + log_dirs=log_dirs, + suffixes=all_suffixes + ) + + repl_monitor.parse_logs() + generated_files = repl_monitor.generate_report( + output_dir=tmp_dir, + formats=['csv', 'html'], + report_name='multi_suffix_test' + ) + + # Verify summary statistics + with open(generated_files['summary'], 'r') as f: + summary = json.load(f) + stats = summary['analysis_summary'] + + # Verify updates by suffix + updates = stats['updates_by_suffix'] + assert len(updates) == len(all_suffixes) + for suffix in all_suffixes: + assert suffix in updates + assert updates[suffix] > 0 + + # Verify relative amounts + assert updates[SUFFIX_3] > updates[DEFAULT_SUFFIX] + assert updates[DEFAULT_SUFFIX] > updates[SUFFIX_2] + + finally: + _cleanup_multi_suffix_test( + test_users_by_suffix, + tmp_dir, + suppliers, + [SUFFIX_2, SUFFIX_3] + ) + + +def test_replication_log_monitoring_filter_combinations(topo_m4): + """Test complex combinations of filtering options and interactions + + :id: 103fc0ac-f0b8-48f1-8cdf-1f6ff57f9672 + :setup: Four suppliers replication setup + :steps: + 1. Test multiple concurrent filters + 2. Test filter interactions + 3. Verify filter precedence + :expectedresults: + 1. Multiple filters should work together correctly + 2. Filter interactions should be predictable + 3. Results should respect all applied filters + """ + tmp_dir = tempfile.mkdtemp(prefix='repl_filter_test_') + test_users = [] + suppliers = [topo_m4.ms[f"supplier{i}"] for i in range(1, 5)] + + try: + # Clear logs and restart servers + for supplier in suppliers: + supplier.deleteAccessLogs(restart=True) + + # Generate varied test data + start_time = datetime.now(timezone.utc) + test_users = _generate_test_data(suppliers[0], DEFAULT_SUFFIX, 30) + + # Create different lag patterns + for i, user in enumerate(test_users): + if i % 3 == 0: + time.sleep(0.5) # Short lag + elif i % 3 == 1: + time.sleep(1.5) # Medium lag + user.replace('description', f'Modified with lag pattern {i}') + + # Wait for replication + repl = ReplicationManager(DEFAULT_SUFFIX) + for s1 in suppliers: + for s2 in suppliers: + if s1 != s2: + repl.wait_for_replication(s1, s2) + + end_time = datetime.now(timezone.utc) + + # Restart to flush logs + for supplier in suppliers: + supplier.restart() + + log_dirs = [s.ds_paths.log_dir for s in suppliers] + + # Test combined filters + repl_monitor = ReplicationLogAnalyzer( + log_dirs=log_dirs, + suffixes=[DEFAULT_SUFFIX], + lag_time_lowest=1.0, + etime_lowest=0.1, + only_fully_replicated=True, + time_range={'start': start_time, 'end': end_time} + ) + + repl_monitor.parse_logs() + results = repl_monitor.build_result() + + # Verify filter combinations + for csn, server_map in results['lag'].items(): + t_list = [ + record['logtime'] + for key, record in server_map.items() + if isinstance(record, dict) and key != '__hop_lags__' + ] + if not t_list: + continue + + lag_time = max(t_list) - min(t_list) + + # Verify all filters were applied + assert lag_time > 1.0, "Lag time filter not applied" + assert len(t_list) == len(suppliers), "Not fully replicated" + + # Verify time range + for t in t_list: + dt = datetime.fromtimestamp(t, timezone.utc) + assert start_time <= dt <= end_time, "Time range filter violated" + finally: + _cleanup_test_data(test_users, tmp_dir) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/rpm/389-ds-base.spec.in b/rpm/389-ds-base.spec.in index 74bcb4576..7e79508fd 100644 --- a/rpm/389-ds-base.spec.in +++ b/rpm/389-ds-base.spec.in @@ -146,6 +146,9 @@ BuildRequires: python%{python3_pkgversion}-argparse-manpage BuildRequires: python%{python3_pkgversion}-policycoreutils BuildRequires: python%{python3_pkgversion}-libselinux BuildRequires: python%{python3_pkgversion}-cryptography +BuildRequires: python%{python3_pkgversion}-numpy +BuildRequires: python%{python3_pkgversion}-plotly +BuildRequires: python%{python3_pkgversion}-matplotlib # For cockpit %if %{with cockpit} @@ -321,6 +324,9 @@ Requires: python%{python3_pkgversion}-argcomplete Requires: python%{python3_pkgversion}-libselinux Requires: python%{python3_pkgversion}-setuptools Requires: python%{python3_pkgversion}-cryptography +Recommends: python%{python3_pkgversion}-numpy +Recommends: python%{python3_pkgversion}-plotly +Recommends: python%{python3_pkgversion}-matplotlib Recommends: bash-completion %{?python_provide:%python_provide python%{python3_pkgversion}-lib389} diff --git a/src/lib389/lib389/cli_base/__init__.py b/src/lib389/lib389/cli_base/__init__.py index 31ea63bbc..5892290d4 100644 --- a/src/lib389/lib389/cli_base/__init__.py +++ b/src/lib389/lib389/cli_base/__init__.py @@ -385,8 +385,17 @@ class CustomHelpFormatter(argparse.HelpFormatter): description to the full help output """ def add_arguments(self, actions): - if len(actions) > 0 and actions[0].option_strings: - actions = parent_arguments + actions + if len(actions) > 0: + # Check if this is the main options section by looking for the help action + is_main_section = any( + isinstance(action, argparse._HelpAction) + for action in actions + ) + + # Only add parent arguments to the main options section + if is_main_section: + actions = parent_arguments + actions + super(CustomHelpFormatter, self).add_arguments(actions) def _format_usage(self, usage, actions, groups, prefix): diff --git a/src/lib389/lib389/cli_conf/replication.py b/src/lib389/lib389/cli_conf/replication.py index 53a18ced6..f86569645 100644 --- a/src/lib389/lib389/cli_conf/replication.py +++ b/src/lib389/lib389/cli_conf/replication.py @@ -11,12 +11,14 @@ import os import json import ldap import stat +from datetime import datetime from shutil import copyfile from getpass import getpass from lib389._constants import ReplicaRole, DSRC_HOME from lib389.cli_base.dsrc import dsrc_to_repl_monitor from lib389.cli_base import _get_arg, CustomHelpFormatter from lib389.utils import is_a_dn, copy_with_permissions, ds_supports_new_changelog, get_passwd_from_file +from lib389.repltools import ReplicationLogAnalyzer from lib389.replica import Replicas, ReplicationMonitor, BootstrapReplicationManager, Changelog5, ChangelogLDIF, Changelog from lib389.tasks import CleanAllRUVTask, AbortCleanAllRUVTask from lib389._mapped_object import DSLdapObjects @@ -506,6 +508,88 @@ def get_repl_monitor_info(inst, basedn, log, args): log.info(json.dumps({"type": "list", "items": report_items}, indent=4)) +def generate_lag_report(inst, basedn, log, args): + """Generate detailed replication lag analysis report from server logs.""" + + # Validate input parameters + if not args.log_dirs: + raise ValueError("No log directories specified") + + # Validate log directories + for log_dir in args.log_dirs: + if not os.path.isdir(log_dir): + raise ValueError(f"Log directory not found or not accessible: {log_dir}") + + # Validate output directory + if not os.path.exists(args.output_dir): + try: + os.makedirs(args.output_dir) + except OSError as e: + raise ValueError(f"Cannot create output directory: {e}") + elif not os.access(args.output_dir, os.W_OK): + raise ValueError(f"Output directory not writable: {args.output_dir}") + + # Determine output formats + formats = [] + if args.csv: + formats.append('csv') + if args.html: + formats.append('html') + if args.png: + formats.append('png') + if not formats: # Default to PNG if no format specified + formats.append('png') + + # Parse time range if specified + time_range = {} + try: + if args.start_time: + time_range['start'] = datetime.strptime(args.start_time, '%Y-%m-%d %H:%M:%S') + if args.end_time: + time_range['end'] = datetime.strptime(args.end_time, '%Y-%m-%d %H:%M:%S') + except ValueError as e: + raise ValueError(f"Invalid time format. Use YYYY-MM-DD HH:MM:SS: {e}") + + try: + # Initialize ReplicationLogAnalyzer with enhanced options + log.info("Initializing replication log analysis...") + repl_analyzer = ReplicationLogAnalyzer( + log_dirs=args.log_dirs, + suffixes=args.suffixes, + anonymous=args.anonymous, + only_fully_replicated=args.only_fully_replicated, + only_not_replicated=args.only_not_replicated, + lag_time_lowest=args.lag_time_lowest, + etime_lowest=args.etime_lowest, + repl_lag_threshold=args.repl_lag_threshold, + utc_offset=args.utc_offset, + time_range=time_range + ) + + # Parse logs + log.info("Analyzing replication logs...") + repl_analyzer.parse_logs() + + # Generate reports + log.info("Generating analysis reports...") + generated_files = repl_analyzer.generate_report( + output_dir=args.output_dir, + formats=formats, + report_name="replication_analysis" + ) + + # Report output locations + if args.json: + log.info(json.dumps({"type": "list", "items": generated_files}, indent=4)) + else: + log.info("Generated report files:") + for fmt, path in generated_files.items(): + log.info(f" {fmt}: {path}") + + except Exception as e: + raise ValueError(f"Failed to generate replication lag report: {e}") + + # This subcommand is available when 'not ds_supports_new_changelog' def create_cl(inst, basedn, log, args): cl = Changelog5(inst) @@ -1453,6 +1537,63 @@ def create_parser(subparsers): help="Enables displaying an alias instead of host:port, if an alias is " "assigned to a host:port combination. The format: alias=host:port") + repl_lag_report_parser = repl_subcommands.add_parser('lag-report', + help='Generate detailed replication lag monitoring report', + formatter_class=CustomHelpFormatter) + repl_lag_report_parser.set_defaults(func=generate_lag_report) + + # Input options group + input_group = repl_lag_report_parser.add_argument_group('Input options') + input_group.add_argument('--log-dirs', nargs='+', required=True, + help='List of log directories to analyze') + input_group.add_argument('--suffixes', nargs='+', required=True, + help='List of suffixes to analyze') + + # Output options group + output_group = repl_lag_report_parser.add_argument_group('Output options') + output_group.add_argument('--output-dir', required=True, + help='Directory for report output files') + output_group.add_argument('--html', action='store_true', + help='Generate HTML report') + output_group.add_argument('--csv', action='store_true', + help='Generate CSV report') + output_group.add_argument('--png', action='store_true', + help='Generate PNG report (default if no format specified)') + + # Filtering options group + filter_group = repl_lag_report_parser.add_argument_group('Filtering options') + + # Create mutually exclusive group for replication filters + repl_filter_group = filter_group.add_mutually_exclusive_group() + repl_filter_group.add_argument('--only-fully-replicated', action='store_true', + help='Show only fully replicated entries') + repl_filter_group.add_argument('--only-not-replicated', action='store_true', + help='Show only entries that failed to replicate') + + # Other filtering options + filter_group.add_argument('--lag-time-lowest', type=float, + help='Filter entries with lag time above this threshold (seconds)') + filter_group.add_argument('--etime-lowest', type=float, + help='Filter entries with etime above this threshold (seconds)') + filter_group.add_argument('--repl-lag-threshold', type=float, + help='Replication lag threshold for highlighting (seconds)') + + # Time range options subgroup + time_group = repl_lag_report_parser.add_argument_group('Time range options') + time_group.add_argument('--start-time', + default='1970-01-01 00:00:00', + help='Start time for analysis (YYYY-MM-DD HH:MM:SS)') + time_group.add_argument('--end-time', + default='9999-12-31 23:59:59', + help='End time for analysis (YYYY-MM-DD HH:MM:SS)') + + # Additional options group + additional_group = repl_lag_report_parser.add_argument_group('Additional options') + additional_group.add_argument('--utc-offset', + help='UTC offset in ±HHMM format (e.g., -0400, +0530)') + additional_group.add_argument('--anonymous', action='store_true', + help='Anonymize server names in the report') + ############################################ # Replication Agmts ############################################ diff --git a/src/lib389/lib389/repltools.py b/src/lib389/lib389/repltools.py index 6f83e40a2..33c111e4e 100644 --- a/src/lib389/lib389/repltools.py +++ b/src/lib389/lib389/repltools.py @@ -1,19 +1,40 @@ # --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2021 Red Hat, Inc. +# Copyright (C) 2025 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- +from collections import defaultdict +from datetime import datetime, timezone, tzinfo, timedelta +import numpy as np +from typing import Dict, List, Optional, Tuple, Generator, Any, NamedTuple, Union import os -import os.path import re -import subprocess -import ldap +import json +import csv import logging +import ldap +import subprocess from lib389._constants import * from lib389.properties import * +from lib389.utils import normalizeDN + +try: + import plotly.graph_objs as go + import plotly.io as pio + from plotly.subplots import make_subplots + PLOTLY_AVAILABLE = True +except ImportError: + PLOTLY_AVAILABLE = False + +try: + import matplotlib.pyplot as plt + import matplotlib.dates as mdates + MATPLOTLIB_AVAILABLE = True +except ImportError: + MATPLOTLIB_AVAILABLE = False logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) @@ -304,3 +325,1137 @@ class ReplTools(object): 'passwordExpirationTime': '20381010000000Z'} server.setupBindDN(repl_manager_dn, repl_manager_pw, attrs) + +class DSLogParser: + """Base parser for Directory Server logs, focusing on replication events.""" + + REGEX_TIMESTAMP = re.compile( + r'\[(?P<day>\d*)\/(?P<month>\w*)\/(?P<year>\d*):(?P<hour>\d*):(?P<minute>\d*):(?P<second>\d*)(\.(?P<nanosecond>\d*))+\s(?P<tz>[\+\-]\d{2})(?P<tz_minute>\d{2})' + ) + REGEX_LINE = re.compile( + r'\s(?P<quoted>[^= ]+="[^"]*")|(?P<var>[^= ]+=[^\s]+)|(?P<keyword>[^\s]+)' + ) + MONTH_LOOKUP = { + 'Jan': "01", 'Feb': "02", 'Mar': "03", 'Apr': "04", + 'May': "05", 'Jun': "06", 'Jul': "07", 'Aug': "08", + 'Sep': "09", 'Oct': "10", 'Nov': "11", 'Dec': "12" + } + + class ParserResult: + """Container for parsed log line results.""" + def __init__(self): + self.keywords: List[str] = [] + self.vars: Dict[str, str] = {} + self.raw: Any = None + self.timestamp: Optional[str] = None + self.line: Optional[str] = None + + def __init__(self, logname: str, suffixes: List[str], + tz: tzinfo = timezone.utc, + start_time: Optional[datetime] = None, + end_time: Optional[datetime] = None, + batch_size: int = 1000): + """Initialize the parser with time range filtering. + + :param logname: Path to the log file + :param suffixes: Suffixes that should be tracked + :param tz: Timezone to interpret log timestamps + :param start_time: Optional start time filter + :param end_time: Optional end time filter + :param batch_size: Batch size for memory-efficient processing + """ + self.logname = logname + self.lineno = 0 + self.line: Optional[str] = None + self.tz = tz + self._suffixes = self._normalize_suffixes(suffixes) + + # Ensure start_time and end_time are timezone-aware + self.start_time = self._ensure_timezone_aware(start_time) if start_time else None + self.end_time = self._ensure_timezone_aware(end_time) if end_time else None + + self.batch_size = batch_size + self.pending_ops: Dict[Tuple[str, str], Dict[str, Any]] = {} + self._logger = logging.getLogger(__name__) + self._current_batch: List[Dict[str, Any]] = [] + + def _ensure_timezone_aware(self, dt: datetime) -> datetime: + """Ensure datetime is timezone-aware using configured timezone.""" + if dt.tzinfo is None: + return dt.replace(tzinfo=self.tz) + return dt.astimezone(self.tz) + + @staticmethod + def parse_timestamp(ts: Union[str, datetime]) -> datetime: + """Parse a timestamp into a datetime object.""" + if isinstance(ts, datetime): + return ts + + match = DSLogParser.REGEX_TIMESTAMP.match(ts) + if not match: + raise ValueError(f"Invalid timestamp format: {ts}") + + parsed = match.groupdict() + iso_ts = '{YEAR}-{MONTH}-{DAY}T{HOUR}:{MINUTE}:{SECOND}{TZH}:{TZM}'.format( + YEAR=parsed['year'], + MONTH=DSLogParser.MONTH_LOOKUP[parsed['month']], + DAY=parsed['day'], + HOUR=parsed['hour'], + MINUTE=parsed['minute'], + SECOND=parsed['second'], + TZH=parsed['tz'], + TZM=parsed['tz_minute'] + ) + + # Create timezone-aware datetime + dt = datetime.fromisoformat(iso_ts) + + # Handle nanoseconds if present + if parsed['nanosecond']: + dt = dt.replace(microsecond=int(parsed['nanosecond']) // 1000) + + return dt + + def _is_in_time_range(self, timestamp: datetime) -> bool: + """Check if timestamp is within configured time range.""" + # Ensure timestamp is timezone-aware and in the same timezone + aware_timestamp = self._ensure_timezone_aware(timestamp) + + if self.start_time and aware_timestamp < self.start_time: + return False + if self.end_time and aware_timestamp > self.end_time: + return False + return True + + def _cleanup_resources(self): + """Clean up any remaining resources.""" + self.pending_ops.clear() + self._current_batch.clear() + + def _process_operation(self, result: 'DSLogParser.ParserResult') -> Optional[Dict[str, Any]]: + """Process operation with memory optimization.""" + conn = result.vars.get('conn') + op = result.vars.get('op') + + if not conn or not op: + return None + + conn_op = (conn, op) + + # Handle completion keywords + if any(kw in result.keywords for kw in ['RESULT', 'ABANDON', 'DISCONNECT']): + if conn_op in self.pending_ops: + op_data = self.pending_ops.pop(conn_op) + return self._create_record(result, op_data) + return None + + # Manage pending operations + if conn_op not in self.pending_ops: + self.pending_ops[conn_op] = { + 'start_time': result.timestamp, + 'last_time': result.timestamp, + 'conn': conn, + 'op': op, + 'suffix': None, + 'target_dn': None + } + else: + # Update last seen time + self.pending_ops[conn_op]['last_time'] = result.timestamp + + # Check for DN and suffix + if 'dn' in result.vars: + matched_suffix = self._match_suffix(result.vars['dn']) + if matched_suffix: + self.pending_ops[conn_op]['suffix'] = matched_suffix + self.pending_ops[conn_op]['target_dn'] = result.vars['dn'] + + # Check for CSN + if 'csn' in result.vars: + self.pending_ops[conn_op]['csn'] = result.vars['csn'] + + return None + + def parse_file(self) -> Generator[Dict[str, Any], None, None]: + """Parse log file with memory-efficient batch processing.""" + try: + with open(self.logname, 'r', encoding='utf-8') as f: + for self.line in f: + self.lineno += 1 + try: + result = self.parse_line() + if result: + # Record is returned if operation is complete + record = self._process_operation(result) + if record: + self._current_batch.append(record) + + # Yield batch if full + if len(self._current_batch) >= self.batch_size: + yield from self._process_batch() + + except Exception as e: + self._logger.warning( + f"Error parsing line {self.lineno} in {self.logname}: {e}" + ) + continue + + # Process any remaining operations in the final batch + if self._current_batch: + yield from self._process_batch() + + # Handle any remaining pending operations + yield from self._process_remaining_ops() + + except (OSError, IOError) as e: + raise IOError(f"Failed to open or read log file {self.logname}: {e}") + finally: + self._cleanup_resources() + + def parse_line(self) -> Optional['DSLogParser.ParserResult']: + """Parse a single line, returning a ParserResult object if recognized.""" + line = self.line + if not line: + return None + + # Extract timestamp + timestamp_match = self.REGEX_TIMESTAMP.match(line) + if not timestamp_match: + return None + + result = DSLogParser.ParserResult() + result.raw = line + result.timestamp = timestamp_match.group(0) + + # Remove the timestamp portion from the line for parsing + after_ts = line[timestamp_match.end():].strip() + # Use REGEX_LINE to parse remaining content + for match in self.REGEX_LINE.finditer(after_ts): + if match.group('keyword'): + # Something that is not in key=value format + result.keywords.append(match.group('keyword')) + elif match.group('var'): + # key=value + var = match.group('var') + k, v = var.split('=', 1) + result.vars[k] = v.strip() + elif match.group('quoted'): + # key="value" + kv = match.group('quoted') + k, v = kv.split('=', 1) + result.vars[k] = v.strip('"') + + return result + + def _normalize_suffixes(self, suffixes: List[str]) -> List[str]: + """Normalize suffixes for matching (lowercase, remove spaces).""" + normalized = [normalizeDN(s) for s in suffixes if s] + # Sort by length descending so we match the longest suffix first + return sorted(normalized, key=len, reverse=True) + + def _match_suffix(self, dn: str) -> Optional[str]: + """Return a matched suffix if dn ends with one of our suffixes.""" + if not dn: + return None + dn_clean = normalizeDN(dn) + for sfx in self._suffixes: + if dn_clean.endswith(sfx): + return sfx + return None + + def _process_batch(self) -> Generator[Dict[str, Any], None, None]: + """Process and yield a batch of operations.""" + for record in self._current_batch: + try: + # Handle timestamp regardless of type + if isinstance(record['timestamp'], str): + timestamp = self.parse_timestamp(record['timestamp']) + else: + timestamp = record['timestamp'] + + if not self._is_in_time_range(timestamp): + continue + + record['timestamp'] = timestamp + yield record + + except ValueError as e: + self._logger.warning( + f"Error processing timestamp in batch: {e}" + ) + self._current_batch.clear() + + def _create_record(self, result: Optional['DSLogParser.ParserResult'] = None, + op_data: Dict[str, Any] = None) -> Optional[Dict[str, Any]]: + """Create a standardized record from either a parser result or operation data.""" + try: + # Determine source of data + if result and op_data: + # Active operation + timestamp = self.parse_timestamp(result.timestamp) + conn = result.vars.get('conn') + op = result.vars.get('op') + csn = result.vars.get('csn') + etime = result.vars.get('etime') + duration = self._calculate_duration(op_data['start_time'], result.timestamp) + elif op_data: + # Remaining operation + timestamp = op_data.get('last_time', op_data['start_time']) + if isinstance(timestamp, str): + timestamp = self.parse_timestamp(timestamp) + conn = op_data.get('conn') + op = op_data.get('op') + csn = op_data.get('csn') + etime = None + duration = self._calculate_duration( + op_data['start_time'], + timestamp + ) + else: + self._logger.warning("Invalid record creation attempt: no data provided") + return None + + # Validate required fields + if not all([timestamp, conn, op]): + self._logger.debug( + f"Missing required fields: timestamp={timestamp}, conn={conn}, op={op}" + ) + return None + + # Create standardized record + record = { + 'timestamp': timestamp, + 'conn': conn, + 'op': op, + 'csn': csn, + 'suffix': op_data.get('suffix'), + 'target_dn': op_data.get('target_dn'), + 'duration': duration, + 'etime': etime + } + + # Verify time range + if not self._is_in_time_range(timestamp): + return None + + return record + + except Exception as e: + self._logger.warning(f"Error creating record: {e}") + return None + + def _process_remaining_ops(self) -> Generator[Dict[str, Any], None, None]: + """Process any remaining pending operations.""" + for (conn, op), op_data in list(self.pending_ops.items()): + try: + if 'csn' in op_data and 'suffix' in op_data: + record = self._create_record(op_data=op_data) + if record: + yield record + except Exception as e: + self._logger.warning( + f"Error processing remaining operation {conn}-{op}: {e}" + ) + finally: + self.pending_ops.pop((conn, op), None) + + def _calculate_duration(self, start: Union[str, datetime], + end: Union[str, datetime]) -> float: + """Compute duration between two timestamps. """ + try: + if isinstance(start, str): + st = self.parse_timestamp(start) + else: + st = start + + if isinstance(end, str): + et = self.parse_timestamp(end) + else: + et = end + + return (et - st).total_seconds() + except (ValueError, TypeError): + return 0.0 + + +class ChartData(NamedTuple): + """Container for chart data series.""" + times: List[datetime] + lags: List[float] + durations: List[float] + hover: List[str] + +class VisualizationHelper: + """Helper class for visualization-related functionality.""" + + @staticmethod + def generate_color_palette(num_colors: int) -> List[str]: + """Generate a visually pleasing color palette. + + :param num_colors: Number of colors needed + :returns: List of rgba color strings + """ + colors = [] + for i in range(num_colors): + hue = i / num_colors + saturation = 0.7 + value = 0.9 + + # Convert HSV to RGB + c = value * saturation + x = c * (1 - abs((hue * 6) % 2 - 1)) + m = value - c + + h_sector = int(hue * 6) + if h_sector == 0: + r, g, b = c, x, 0 + elif h_sector == 1: + r, g, b = x, c, 0 + elif h_sector == 2: + r, g, b = 0, c, x + elif h_sector == 3: + r, g, b = 0, x, c + elif h_sector == 4: + r, g, b = x, 0, c + else: + r, g, b = c, 0, x + + # Convert to RGB values + rgb = [int((val + m) * 255) for val in (r, g, b)] + colors.append(f'rgba({rgb[0]},{rgb[1]},{rgb[2]},0.8)') + + return colors + + @staticmethod + def prepare_chart_data(csns: Dict[str, Dict[Union[int, str], Dict[str, Any]]]) -> Dict[Tuple[str, str], ChartData]: + """Prepare data for visualization.""" + chart_data = defaultdict(lambda: { + 'times': [], 'lags': [], 'durations': [], 'hover': [] + }) + + for csn, server_map in csns.items(): + # Gather only valid records (dict, not '__hop_lags__', must have 'logtime') + valid_records = [ + rec for key, rec in server_map.items() + if isinstance(rec, dict) + and key != '__hop_lags__' + and 'logtime' in rec + ] + if not valid_records: + continue + + # Compute global lag for this CSN (earliest vs. latest among valid records) + t_list = [rec['logtime'] for rec in valid_records] + earliest = min(t_list) + latest = max(t_list) + lag_val = latest - earliest + + # Populate chart data for each server record + for rec in valid_records: + suffix_val = rec.get('suffix', 'unknown') + server_val = rec.get('server_name', 'unknown') + + # Convert numeric UTC to a datetime + ts_dt = datetime.fromtimestamp(rec['logtime']) + + # Operation duration, defaulting to 0.0 if missing + duration_val = float(rec.get('duration', 0.0)) + + # Build the ChartData slot + data_slot = chart_data[(suffix_val, server_val)] + data_slot['times'].append(ts_dt) + data_slot['lags'].append(lag_val) # The same global-lag for all servers + data_slot['durations'].append(duration_val) + data_slot['hover'].append( + f"CSN: {csn}<br>" + f"Server: {server_val}<br>" + f"Suffix: {suffix_val}<br>" + f"Target DN: {rec.get('target_dn', '')}<br>" + f"Lag Time: {lag_val:.3f}s<br>" + f"Duration: {duration_val:.3f}s" + ) + + # Convert the dict-of-lists into your namedtuple-based ChartData + return { + key: ChartData( + times=value['times'], + lags=value['lags'], + durations=value['durations'], + hover=value['hover'] + ) + for key, value in chart_data.items() + } + + +class ReplicationLogAnalyzer: + """This class handles: + - Collecting log files from multiple directories. + - Parsing them for replication events (CSN). + - Filtering by suffix. + - Storing earliest and latest timestamps for each CSN to compute lag. + - Generating final dictionaries to be used for CSV, HTML, or JSON reporting. + """ + + def __init__(self, log_dirs: List[str], suffixes: Optional[List[str]] = None, + anonymous: bool = False, only_fully_replicated: bool = False, + only_not_replicated: bool = False, lag_time_lowest: Optional[float] = None, + etime_lowest: Optional[float] = None, repl_lag_threshold: Optional[float] = None, + utc_offset: Optional[int] = None, time_range: Optional[Dict[str, datetime]] = None): + if not log_dirs: + raise ValueError("No log directories provided for analysis.") + + self.log_dirs = log_dirs + self.suffixes = suffixes or [] + self.anonymous = anonymous + self.only_fully_replicated = only_fully_replicated + self.only_not_replicated = only_not_replicated + self.lag_time_lowest = lag_time_lowest + self.etime_lowest = etime_lowest + self.repl_lag_threshold = repl_lag_threshold + + # Set timezone + if utc_offset is not None: + try: + self.tz = self._parse_timezone_offset(utc_offset) + except ValueError as e: + raise ValueError(f"Invalid UTC offset: {e}") + else: + self.tz = timezone.utc + + self.time_range = time_range or {} + self.csns: Dict[str, Dict[Union[int, str], Dict[str, Any]]] = {} + + # Track earliest global timestamp + self.start_dt: Optional[datetime] = None + self.start_udt: Optional[float] = None + self._logger = logging.getLogger(__name__) + + def _should_include_record(self, csn: str, server_map: Dict[Union[int, str], Dict[str, Any]]) -> bool: + """Determine if a record should be included based on filtering criteria.""" + if self.only_fully_replicated and len(server_map) != len(self.log_dirs): + return False + if self.only_not_replicated and len(server_map) == len(self.log_dirs): + return False + + # Check lag time threshold + if self.lag_time_lowest is not None: + # Only consider dict items, skipping the '__hop_lags__' entry + t_list = [ + d['logtime'] + for key, d in server_map.items() + if isinstance(d, dict) and key != '__hop_lags__' + ] + if not t_list: + return False + lag_time = max(t_list) - min(t_list) + if lag_time <= self.lag_time_lowest: + return False + + # Check etime threshold + if self.etime_lowest is not None: + for key, record in server_map.items(): + if not isinstance(record, dict) or key == '__hop_lags__': + continue + if float(record.get('etime', 0)) <= self.etime_lowest: + return False + + return True + + def _collect_logs(self) -> List[Tuple[str, List[str]]]: + """For each directory in self.log_dirs, return a tuple (server_name, [logfiles]).""" + data = [] + for dpath in self.log_dirs: + if not os.path.isdir(dpath): + self._logger.warning(f"{dpath} is not a directory or not accessible.") + continue + + server_name = os.path.basename(dpath.rstrip('/')) + logfiles = [] + for fname in os.listdir(dpath): + if fname.startswith('access'): # Only parse access logs + full_path = os.path.join(dpath, fname) + if os.path.isfile(full_path) and os.access(full_path, os.R_OK): + logfiles.append(full_path) + else: + self._logger.warning(f"Cannot read file: {full_path}") + + logfiles.sort() + if logfiles: + data.append((server_name, logfiles)) + else: + self._logger.warning(f"No accessible 'access' logs found in {dpath}") + return data + + @staticmethod + def _parse_timezone_offset(offset_str: str) -> timezone: + """Parse timezone offset string in ±HHMM format.""" + if not isinstance(offset_str, str): + raise ValueError("Timezone offset must be a string in ±HHMM format") + + match = re.match(r'^([+-])(\d{2})(\d{2})$', offset_str) + if not match: + raise ValueError("Invalid timezone offset format. Use ±HHMM (e.g., -0400, +0530)") + + sign, hours, minutes = match.groups() + hours = int(hours) + minutes = int(minutes) + + if hours > 12 or minutes >= 60: + raise ValueError("Invalid timezone offset. Hours must be ≤12, minutes <60") + + total_minutes = hours * 60 + minutes + if sign == '-': + total_minutes = -total_minutes + + return timezone(timedelta(minutes=total_minutes)) + + def _compute_hop_lags(self, server_map: Dict[Union[int, str], Dict[str, Any]]) -> List[Dict[str, Any]]: + """Compute per-hop replication lags for one CSN across multiple servers.""" + arrivals = [] + for key, data in server_map.items(): + # Skip the special '__hop_lags__' and any non-dict + if not isinstance(data, dict) or key == '__hop_lags__': + continue + arrivals.append({ + 'server_name': data.get('server_name', 'unknown'), + 'logtime': data.get('logtime', 0.0), # numeric UTC timestamp + 'suffix': data.get('suffix'), + 'target_dn': data.get('target_dn'), + }) + + # Sort by ascending logtime + arrivals.sort(key=lambda x: x['logtime']) + + # Iterate pairs (supplier -> consumer) + hops = [] + for i in range(1, len(arrivals)): + supplier = arrivals[i - 1] + consumer = arrivals[i] + hop_lag = consumer['logtime'] - supplier['logtime'] # in seconds + hops.append({ + 'supplier': supplier['server_name'], + 'consumer': consumer['server_name'], + 'hop_lag': hop_lag, + 'arrival_consumer': consumer['logtime'], + 'suffix': consumer.get('suffix'), + 'target_dn': consumer.get('target_dn'), + }) + + return hops + + def parse_logs(self) -> None: + """Parse logs from all directories. Each directory is treated as one server + unless anonymized, in which case we use 'server_{index}'. + """ + server_data = self._collect_logs() + if not server_data: + raise ValueError("No valid log directories with accessible logs found.") + + for idx, (server_name, logfiles) in enumerate(server_data): + displayed_name = f"server_{idx}" if self.anonymous else server_name + + # For each log file, parse line by line + for logfile in logfiles: + parser = DSLogParser( + logname=logfile, + suffixes=self.suffixes, + tz=self.tz, + start_time=self.time_range.get('start'), + end_time=self.time_range.get('end') + ) + + for record in parser.parse_file(): + # If there's no CSN or no suffix, skip + if not record.get('csn') or not record.get('suffix'): + continue + + csn = record['csn'] + ts = record['timestamp'] + # Convert timestamp to numeric UTC + udt = ts.astimezone(timezone.utc).timestamp() + + # Track earliest global timestamp + if self.start_udt is None or udt < self.start_udt: + self.start_udt = udt + self.start_dt = ts + + if csn not in self.csns: + self.csns[csn] = {} + + # Build record for this server + self.csns[csn][idx] = { + 'logtime': udt, + 'etime': record.get('etime'), + 'server_name': displayed_name, + 'suffix': record.get('suffix'), + 'target_dn': record.get('target_dn'), + 'duration': record.get('duration', 0.0), + } + + # Apply filters after collecting all data + filtered_csns = {} + for csn, server_map in self.csns.items(): + if self._should_include_record(csn, server_map): + filtered_csns[csn] = server_map + # Compute hop-lags and store + hop_list = self._compute_hop_lags(server_map) + filtered_csns[csn]['__hop_lags__'] = hop_list + + self.csns = filtered_csns + + def build_result(self) -> Dict[str, Any]: + """Build the final dictionary object with earliest timestamp, UTC offset, and replication data.""" + if not self.start_dt: + raise ValueError("No valid replication data collected.") + + obj = { + "start-time": str(self.start_dt), + "utc-start-time": self.start_udt, + "utc-offset": self.start_dt.utcoffset().total_seconds() if self.start_dt.utcoffset() else 0, + "lag": self.csns + } + # Also record the log-files (anonymous or not) + if self.anonymous: + obj['log-files'] = list(range(len(self.log_dirs))) + else: + obj['log-files'] = self.log_dirs + return obj + + def generate_report(self, output_dir: str, + formats: List[str], + report_name: str = "replication_analysis") -> Dict[str, str]: + """Generate reports in specified formats.""" + if not os.path.exists(output_dir): + try: + os.makedirs(output_dir) + except OSError as e: + raise OSError(f"Could not create directory {output_dir}: {e}") + + if not self.csns: + raise ValueError("No CSN data available for reporting. Did you call parse_logs()?") + + results = self.build_result() + generated_files = {} + + # Always produce JSON summary + summary_path = os.path.join(output_dir, f"{report_name}_summary.json") + self._generate_summary_json(results, summary_path) + generated_files["summary"] = summary_path + + # Generate requested formats + for fmt in formats: + fmt = fmt.lower() + outfile = os.path.join(output_dir, f"{report_name}.{fmt}") + + if fmt == 'csv': + self._generate_csv(results, outfile) + generated_files["csv"] = outfile + + elif fmt == 'html': + if not PLOTLY_AVAILABLE: + self._logger.warning("Plotly not installed. Skipping HTML report.") + continue + fig = self._create_plotly_figure(results) + self._generate_html(fig, outfile) + generated_files["html"] = outfile + + elif fmt == 'png': + if not MATPLOTLIB_AVAILABLE: + self._logger.warning("Matplotlib not installed. Skipping PNG report.") + continue + fig = self._create_plotly_figure(results) + self._generate_png(fig, outfile) + generated_files["png"] = outfile + + else: + self._logger.warning(f"Unknown report format requested: {fmt}") + + return generated_files + + def _create_plotly_figure(self, results: Dict[str, Any]) -> go.Figure: + """Create a plotly figure for visualization.""" + if not PLOTLY_AVAILABLE: + raise ImportError("Plotly is required for figure creation") + + # Create figure with 3 subplots: we still generate all 3 for HTML usage + fig = make_subplots( + rows=3, cols=1, + subplot_titles=( + "Global Replication Lag Over Time", + "Operation Duration Over Time", + "Per-Hop Replication Lags" + ), + vertical_spacing=0.10, # spacing between subplots + shared_xaxes=True + ) + + # Collect all (suffix, server_name) pairs to color consistently + server_suffix_pairs = set() + for csn, server_map in self.csns.items(): + for key, rec in server_map.items(): + if not isinstance(rec, dict) or key == '__hop_lags__': + continue + + suffix_val = rec.get('suffix', 'unknown') + srv_val = rec.get('server_name', 'unknown') + server_suffix_pairs.add((suffix_val, srv_val)) + + # Generate colors + colors = VisualizationHelper.generate_color_palette(len(server_suffix_pairs)) + + # Prepare chart data for the first two subplots + chart_data = VisualizationHelper.prepare_chart_data(self.csns) + + # Plot Per-Hop Lags in row=3 (for HTML usage) + for csn, server_map in self.csns.items(): + hop_list = server_map.get('__hop_lags__', []) + for hop in hop_list: + consumer_ts = hop.get("arrival_consumer", 0.0) + consumer_dt = datetime.fromtimestamp(consumer_ts) + hop_lag = hop.get("hop_lag", 0.0) + + hover_text = ( + f"Supplier: {hop.get('supplier','unknown')}<br>" + f"Consumer: {hop.get('consumer','unknown')}<br>" + f"Hop Lag: {hop_lag:.3f}s<br>" + f"Arrival Time: {consumer_dt}" + ) + + # showlegend=False means these hop-lag traces won't crowd the legend + fig.add_trace( + go.Scatter( + x=[consumer_dt], + y=[hop_lag], + mode='markers', + marker=dict(size=7, symbol='circle'), + name=f"{hop.get('supplier','?')}→{hop.get('consumer','?')}", + text=[hover_text], + hoverinfo='text+x+y', + showlegend=False + ), + row=3, col=1 + ) + + # Plot Global Lag (row=1) and Durations (row=2) + for idx, ((sfx, srv), data) in enumerate(sorted(chart_data.items())): + color = colors[idx % len(colors)] + + # Row=1: Global Replication Lag + fig.add_trace( + go.Scatter( + x=data.times, + y=data.lags, + mode='lines+markers', + name=f"{sfx} - {srv}", + text=data.hover, + hoverinfo='text+x+y', + line=dict(color=color, width=2), + marker=dict(size=6), + showlegend=True + ), + row=1, col=1 + ) + + # Row=2: Operation Durations + fig.add_trace( + go.Scatter( + x=data.times, + y=data.durations, + mode='lines+markers', + name=f"{sfx} - {srv}", + text=data.hover, + hoverinfo='text+x+y', + line=dict(color=color, width=2, dash='solid'), + marker=dict(size=6), + showlegend=False + ), + row=2, col=1 + ) + + # Add a horizontal threshold line to the Replication Lag subplot + if self.repl_lag_threshold is not None: + fig.add_hline( + y=self.repl_lag_threshold, + line=dict(color='red', width=2, dash='dash'), + annotation=dict( + text=f"Lag Threshold = {self.repl_lag_threshold}s", + font=dict(color='red'), + showarrow=False, + x=1, + xanchor='left', + y=self.repl_lag_threshold + ), + row=1, col=1 + ) + + # Figure layout settings + fig.update_layout( + title={ + 'text': 'Replication Analysis Report', + 'y': 0.96, + 'x': 0.5, + 'xanchor': 'center', + 'yanchor': 'top' + }, + template='plotly_white', + hovermode='closest', + showlegend=True, + legend=dict( + title="Suffix / Server", + yanchor="top", + y=0.99, + xanchor="right", + x=1.15, + bgcolor='rgba(255, 255, 255, 0.8)' + ), + height=900, + margin=dict(t=100, r=200, l=80) + ) + + # X-axis styling + fig.update_xaxes(title_text="Time", gridcolor='lightgray', row=1, col=1) + fig.update_xaxes( + title_text="Time", + gridcolor='lightgray', + rangeslider_visible=True, + rangeselector=dict( + buttons=list([ + dict(count=1, label="1h", step="hour", stepmode="backward"), + dict(count=6, label="6h", step="hour", stepmode="backward"), + dict(count=1, label="1d", step="day", stepmode="backward"), + dict(count=7, label="1w", step="day", stepmode="backward"), + dict(step="all") + ]), + bgcolor='rgba(255, 255, 255, 0.8)' + ), + row=2, col=1 + ) + fig.update_xaxes(title_text="Time", gridcolor='lightgray', row=3, col=1) + + # Y-axis styling + fig.update_yaxes(title_text="Lag Time (seconds)", gridcolor='lightgray', row=1, col=1) + fig.update_yaxes(title_text="Duration (seconds)", gridcolor='lightgray', row=2, col=1) + fig.update_yaxes(title_text="Hop Lag (seconds)", gridcolor='lightgray', row=3, col=1) + + return fig + + def _generate_png(self, fig: go.Figure, outfile: str) -> None: + """Generate PNG snapshot of the plotly figure using matplotlib. + For PNG, we deliberately omit the hop-lag (3rd subplot) data. + """ + try: + # Create a matplotlib figure with 2 subplots + plt.figure(figsize=(12, 8)) + + # Extract data from the Plotly figure. + # We'll plot only the first two subplots (y-axis = 'y' or 'y2'). + for trace in fig.data: + # Check which y-axis the trace belongs to. + # 'y' => subplot row=1 + # 'y2' => subplot row=2 + # 'y3' => subplot row=3 (hop-lags) - skip those + if trace.yaxis == 'y': # Global Lag subplot + plt.subplot(2, 1, 1) + plt.plot(trace.x, trace.y, label=trace.name) + elif trace.yaxis == 'y2': # Duration subplot + plt.subplot(2, 1, 2) + plt.plot(trace.x, trace.y, label=trace.name) + else: + # This is likely the hop-lag data on subplot row=3, so skip it + continue + + # Format each subplot + for idx, title in enumerate(['Replication Lag Times', 'Operation Durations']): + plt.subplot(2, 1, idx + 1) + plt.title(title) + plt.xlabel('Time') + plt.ylabel('Seconds') + plt.grid(True) + plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left') + # Format x-axis as date/time + plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d %H:%M')) + plt.gcf().autofmt_xdate() + + plt.tight_layout() + plt.savefig(outfile, dpi=300, bbox_inches='tight') + plt.close() + + except Exception as e: + raise IOError(f"Failed to generate PNG report: {e}") + + def _generate_html(self, fig: go.Figure, outfile: str) -> None: + """Generate HTML report from the plotly figure.""" + try: + pio.write_html( + fig, + outfile, + include_plotlyjs='cdn', + full_html=True, + include_mathjax='cdn', + config={ + 'responsive': True, + 'scrollZoom': True, + 'modeBarButtonsToAdd': ['drawline', 'drawopenpath', 'eraseshape'], + 'toImageButtonOptions': { + 'format': 'png', + 'filename': 'replication_analysis', + 'height': 1000, + 'width': 1500, + 'scale': 2 + } + } + ) + except Exception as e: + raise IOError(f"Failed to write HTML report: {e}") + + def _generate_csv(self, results: Dict[str, Any], outfile: str) -> None: + """Generate a CSV report listing each replication event and its hop-lags.""" + try: + with open(outfile, 'w', newline='', encoding='utf-8') as csvfile: + writer = csv.writer(csvfile) + + # Global-lag rows + writer.writerow([ + 'Timestamp', 'Server', 'CSN', 'Suffix', 'Target DN', + 'Global Lag (s)', 'Duration (s)', 'Operation Etime' + ]) + for csn, server_map in self.csns.items(): + # Compute global-lag for normal dict entries + t_list = [ + d['logtime'] + for key, d in server_map.items() + if isinstance(d, dict) and key != '__hop_lags__' + ] + if not t_list: + continue + earliest = min(t_list) + latest = max(t_list) + global_lag = latest - earliest + + # Write lines for each normal server record + for key, data_map in server_map.items(): + if not isinstance(data_map, dict) or key == '__hop_lags__': + continue + ts_str = datetime.fromtimestamp(data_map['logtime']).strftime('%Y-%m-%d %H:%M:%S') + writer.writerow([ + ts_str, + data_map['server_name'], + csn, + data_map.get('suffix', 'unknown'), + data_map.get('target_dn', ''), + f"{global_lag:.3f}", + f"{float(data_map.get('duration', 0.0)):.3f}", + data_map.get('etime', 'N/A') + ]) + + # Hop-lag rows + writer.writerow([]) # blank line + writer.writerow(["-- Hop-Lag Data --"]) + writer.writerow([ + 'CSN', 'Supplier', 'Consumer', 'Hop Lag (s)', 'Arrival (Consumer)', 'Suffix', 'Target DN' + ]) + for csn, server_map in self.csns.items(): + hop_list = server_map.get('__hop_lags__', []) + for hop_info in hop_list: + hop_lag_str = f"{hop_info['hop_lag']:.3f}" + arrival_ts = datetime.fromtimestamp(hop_info['arrival_consumer']).strftime('%Y-%m-%d %H:%M:%S') + writer.writerow([ + csn, + hop_info['supplier'], + hop_info['consumer'], + hop_lag_str, + arrival_ts, + hop_info.get('suffix', 'unknown'), + hop_info.get('target_dn', '') + ]) + + except Exception as e: + raise IOError(f"Failed to write CSV report {outfile}: {e}") + + def _generate_summary_json(self, results: Dict[str, Any], outfile: str) -> None: + """Create a JSON summary from the final dictionary.""" + global_lag_times = [] + hop_lag_times = [] + suffix_updates = {} + + for csn, server_map in self.csns.items(): + t_list = [ + rec['logtime'] + for key, rec in server_map.items() + if isinstance(rec, dict) and key != '__hop_lags__' and 'logtime' in rec + ] + if not t_list: + continue + + # Global earliest vs. latest (for "global lag") + earliest = min(t_list) + latest = max(t_list) + global_lag = latest - earliest + global_lag_times.append(global_lag) + + # Suffix counts + for key, record in server_map.items(): + # Only process normal server records, skip the special '__hop_lags__' + if not isinstance(record, dict) or key == '__hop_lags__': + continue + + sfx = record.get('suffix', 'unknown') + suffix_updates[sfx] = suffix_updates.get(sfx, 0) + 1 + + # Hop-lag data + hop_list = server_map.get('__hop_lags__', []) + for hop_info in hop_list: + hop_lag_times.append(hop_info['hop_lag']) + + # Compute global-lag stats + if global_lag_times: + min_lag = min(global_lag_times) + max_lag = max(global_lag_times) + avg_lag = sum(global_lag_times) / len(global_lag_times) + else: + min_lag = 0.0 + max_lag = 0.0 + avg_lag = 0.0 + + # Compute hop-lag stats + if hop_lag_times: + min_hop_lag = min(hop_lag_times) + max_hop_lag = max(hop_lag_times) + avg_hop_lag = sum(hop_lag_times) / len(hop_lag_times) + total_hops = len(hop_lag_times) + else: + min_hop_lag = 0.0 + max_hop_lag = 0.0 + avg_hop_lag = 0.0 + total_hops = 0 + + # Build analysis summary + analysis_summary = { + 'total_servers': len(self.log_dirs), + 'analyzed_logs': len(self.csns), + 'total_updates': sum(suffix_updates.values()), + 'minimum_lag': min_lag, + 'maximum_lag': max_lag, + 'average_lag': avg_lag, + 'minimum_hop_lag': min_hop_lag, + 'maximum_hop_lag': max_hop_lag, + 'average_hop_lag': avg_hop_lag, + 'total_hops': total_hops, + 'updates_by_suffix': suffix_updates, + 'time_range': { + 'start': results['start-time'], + 'end': 'current' + } + } + + # Wrap it up for writing + summary = { + 'analysis_summary': analysis_summary + } + + # Write to JSON + try: + with open(outfile, 'w', encoding='utf-8') as f: + json.dump(summary, f, indent=2) + except Exception as e: + raise IOError(f"Failed to write summary JSON: {e}") diff --git a/src/lib389/requirements.txt b/src/lib389/requirements.txt index 0a95185d1..706baeaf4 100644 --- a/src/lib389/requirements.txt +++ b/src/lib389/requirements.txt @@ -7,3 +7,6 @@ python-ldap setuptools distro cryptography +plotly +matplotlib +numpy diff --git a/src/lib389/setup.py.in b/src/lib389/setup.py.in index 2175fe350..c24d0f078 100644 --- a/src/lib389/setup.py.in +++ b/src/lib389/setup.py.in @@ -98,7 +98,10 @@ setup( 'python-ldap', 'setuptools', 'distro', - 'cryptography' + 'cryptography', + 'plotly', + 'matplotlib', + 'numpy' ], cmdclass={
0
379c164d5321549a249dc0a2f2ed7d79989801a0
389ds/389-ds-base
Bug 735114 - renaming a managed entry does not update mepmanagedby When a number of different plug-ins are being used that perform internal operations to update entries, it is possible for the managed entry plug-in to fail to update the managed entry. In particular, it has been found that renaming an origin entry can trigger a plug-in other than the managed entry plug-in to rename the managed entry. This causes the managed entry plug-in to fail when it attempts to update the managed entry. This patch makes the managed entry plug-in check if the managed entry has already been renamed when processing a MODRDN operation. If it detects that the managed entry has already been renamed, it will use the new DN to perform the managed entry updates.
commit 379c164d5321549a249dc0a2f2ed7d79989801a0 Author: Nathan Kinder <[email protected]> Date: Tue Sep 6 11:08:40 2011 -0700 Bug 735114 - renaming a managed entry does not update mepmanagedby When a number of different plug-ins are being used that perform internal operations to update entries, it is possible for the managed entry plug-in to fail to update the managed entry. In particular, it has been found that renaming an origin entry can trigger a plug-in other than the managed entry plug-in to rename the managed entry. This causes the managed entry plug-in to fail when it attempts to update the managed entry. This patch makes the managed entry plug-in check if the managed entry has already been renamed when processing a MODRDN operation. If it detects that the managed entry has already been renamed, it will use the new DN to perform the managed entry updates. diff --git a/ldap/servers/plugins/mep/mep.c b/ldap/servers/plugins/mep/mep.c index 5e3aa1736..0241cd3a6 100644 --- a/ldap/servers/plugins/mep/mep.c +++ b/ldap/servers/plugins/mep/mep.c @@ -2378,6 +2378,7 @@ mep_modrdn_post_op(Slapi_PBlock *pb) Slapi_Entry *new_managed_entry = NULL; Slapi_DN *managed_sdn = NULL; Slapi_Mods *smods = NULL; + int free_managed_dn = 1; mep_config_read_lock(); @@ -2450,6 +2451,31 @@ mep_modrdn_post_op(Slapi_PBlock *pb) mods[0] = &mod; mods[1] = 0; + /* Create a new managed entry to determine what changes + * we need to make to the existing managed entry. */ + new_managed_entry = mep_create_managed_entry(config, post_e); + if (new_managed_entry == NULL) { + slapi_log_error(SLAPI_LOG_FATAL, MEP_PLUGIN_SUBSYSTEM, + "mep_modrdn_post_op: Unable to create in-memory " + "managed entry from origin entry \"%s\".\n", new_dn); + goto bailmod; + } + + /* Check if the managed entry exists. It is possible that + * it has already been renamed by another plug-in. If it + * has already been renamed, we need to use the new DN to + * perform our updates. */ + managed_sdn = slapi_sdn_new_dn_byref(managed_dn); + + if (slapi_search_internal_get_entry(managed_sdn, 0, + NULL, mep_get_plugin_id()) == LDAP_NO_SUCH_OBJECT) { + slapi_ch_free_string(&managed_dn); + /* This DN is not a copy, so we don't want to free it later. */ + managed_dn = slapi_entry_get_dn(new_managed_entry); + slapi_sdn_set_dn_byref(managed_sdn, managed_dn); + free_managed_dn = 0; + } + /* Perform the modify operation. */ slapi_log_error(SLAPI_LOG_PLUGIN, MEP_PLUGIN_SUBSYSTEM, "mep_modrdn_post_op: Updating %s pointer to \"%s\" " @@ -2465,12 +2491,7 @@ mep_modrdn_post_op(Slapi_PBlock *pb) "origin entry \"%s\" in managed entry \"%s\" " "(%s).\n", new_dn, managed_dn, ldap_err2string(result)); } else { - /* Create a new managed entry to determine what changes - * we need to make to the existing managed entry. */ - new_managed_entry = mep_create_managed_entry(config, post_e); - /* See if we need to rename the managed entry. */ - managed_sdn = slapi_sdn_new_dn_byref(managed_dn); if (slapi_sdn_compare(slapi_entry_get_sdn(new_managed_entry), managed_sdn) != 0) { /* Rename the managed entry. */ slapi_log_error(SLAPI_LOG_PLUGIN, MEP_PLUGIN_SUBSYSTEM, @@ -2510,14 +2531,17 @@ mep_modrdn_post_op(Slapi_PBlock *pb) slapi_mods_free(&smods); } - slapi_sdn_free(&managed_sdn); - slapi_entry_free(new_managed_entry); } +bailmod: + slapi_entry_free(new_managed_entry); + slapi_sdn_free(&managed_sdn); } slapi_pblock_destroy(mep_pb); - slapi_ch_free_string(&managed_dn); + if (free_managed_dn) { + slapi_ch_free_string(&managed_dn); + } mep_config_unlock(); } else { diff --git a/ldap/servers/slapd/plugin_internal_op.c b/ldap/servers/slapd/plugin_internal_op.c index 76f70e893..75d4e4a74 100644 --- a/ldap/servers/slapd/plugin_internal_op.c +++ b/ldap/servers/slapd/plugin_internal_op.c @@ -872,7 +872,8 @@ void set_common_params (Slapi_PBlock *pb) /* * Given a DN, find an entry by doing an internal search. An LDAP error - * code is returned. + * code is returned. To check if an entry exists without returning a + * copy of the entry, NULL can be passed for ret_entry. */ int slapi_search_internal_get_entry( Slapi_DN *dn, char ** attrs, Slapi_Entry **ret_entry , void * component_identity) @@ -881,7 +882,10 @@ slapi_search_internal_get_entry( Slapi_DN *dn, char ** attrs, Slapi_Entry **ret_ Slapi_PBlock *int_search_pb = NULL; int rc = 0; - *ret_entry = NULL; + if (ret_entry) { + *ret_entry = NULL; + } + int_search_pb = slapi_pblock_new (); slapi_search_internal_set_pb ( int_search_pb, slapi_sdn_get_dn(dn), LDAP_SCOPE_BASE, "(|(objectclass=*)(objectclass=ldapsubentry))", attrs , @@ -893,9 +897,12 @@ slapi_search_internal_get_entry( Slapi_DN *dn, char ** attrs, Slapi_Entry **ret_ if ( LDAP_SUCCESS == rc ) { slapi_pblock_get( int_search_pb, SLAPI_PLUGIN_INTOP_SEARCH_ENTRIES, &entries ); if ( NULL != entries && NULL != entries[ 0 ]) { - Slapi_Entry *temp_entry = NULL; - temp_entry = entries[ 0 ]; - *ret_entry = slapi_entry_dup(temp_entry); + /* Only need to dup the entry if the caller passed ret_entry in. */ + if (ret_entry) { + Slapi_Entry *temp_entry = NULL; + temp_entry = entries[ 0 ]; + *ret_entry = slapi_entry_dup(temp_entry); + } } else { /* No entry there */ rc = LDAP_NO_SUCH_OBJECT;
0
c6fc373283c0cf5957b7a82d6bebd516bee9d503
389ds/389-ds-base
Issue 6857 - uiduniq: allow specifying match rules in the filter Allow uniqueness plugin to work with attributes where uniqueness should be enforced using different matching rule than the one defined for the attribute itself. Since uniqueness plugin configuration can contain multiple attributes, add matching rule right to the attribute as it is used in the LDAP rule (e.g. 'attribute:caseIgnoreMatch:' to force 'attribute' to be searched with case-insensitive matching rule instead of the original matching rule. Fixes: https://github.com/389ds/389-ds-base/issues/6857 Signed-off-by: Alexander Bokovoy <[email protected]>
commit c6fc373283c0cf5957b7a82d6bebd516bee9d503 Author: Alexander Bokovoy <[email protected]> Date: Wed Jul 9 12:08:09 2025 +0300 Issue 6857 - uiduniq: allow specifying match rules in the filter Allow uniqueness plugin to work with attributes where uniqueness should be enforced using different matching rule than the one defined for the attribute itself. Since uniqueness plugin configuration can contain multiple attributes, add matching rule right to the attribute as it is used in the LDAP rule (e.g. 'attribute:caseIgnoreMatch:' to force 'attribute' to be searched with case-insensitive matching rule instead of the original matching rule. Fixes: https://github.com/389ds/389-ds-base/issues/6857 Signed-off-by: Alexander Bokovoy <[email protected]> diff --git a/ldap/servers/plugins/uiduniq/uid.c b/ldap/servers/plugins/uiduniq/uid.c index 053af4f9d..887e79d78 100644 --- a/ldap/servers/plugins/uiduniq/uid.c +++ b/ldap/servers/plugins/uiduniq/uid.c @@ -1030,7 +1030,14 @@ preop_add(Slapi_PBlock *pb) } for (i = 0; attrNames && attrNames[i]; i++) { + char *attr_match = strchr(attrNames[i], ':'); + if (attr_match != NULL) { + attr_match[0] = '\0'; + } err = slapi_entry_attr_find(e, attrNames[i], &attr); + if (attr_match != NULL) { + attr_match[0] = ':'; + } if (!err) { /* * Passed all the requirements - this is an operation we
0
20a4d4d200c26b2be6db9f468693628e29793ba1
389ds/389-ds-base
Resolves: 207893 Summary: Check if passwords are already hashed before sync'ing with AD.
commit 20a4d4d200c26b2be6db9f468693628e29793ba1 Author: Nathan Kinder <[email protected]> Date: Mon Aug 27 17:16:48 2007 +0000 Resolves: 207893 Summary: Check if passwords are already hashed before sync'ing with AD. diff --git a/ldap/servers/plugins/replication/windows_protocol_util.c b/ldap/servers/plugins/replication/windows_protocol_util.c index e1c402da0..e1b3d669d 100644 --- a/ldap/servers/plugins/replication/windows_protocol_util.c +++ b/ldap/servers/plugins/replication/windows_protocol_util.c @@ -1375,7 +1375,37 @@ windows_create_remote_entry(Private_Repl_Protocol *prp,Slapi_Entry *original_ent slapi_valueset_first_value(vs,&value); password_value = slapi_value_get_string(value); - *password = slapi_ch_strdup(password_value); + /* We need to check if the first character of password_value is an + * opening brace since strstr will simply return it's first argument + * if it is an empty string. */ + if (password_value && (*password_value == '{')) { + if (strchr( password_value, '}' )) { + /* A storage scheme is present. Check if it's the + * clear storage scheme. */ + if ((strlen(password_value) >= PASSWD_CLEAR_PREFIX_LEN + 1) && + (strncasecmp(password_value, PASSWD_CLEAR_PREFIX, PASSWD_CLEAR_PREFIX_LEN) == 0)) { + /* This password is in clear text. Strip off the clear prefix + * and sync it. */ + *password = slapi_ch_strdup(password_value + PASSWD_CLEAR_PREFIX_LEN); + } else { + /* This password is stored in a non-cleartext format. + * We can only sync cleartext passwords. */ + slapi_log_error(SLAPI_LOG_REPL, repl_plugin_name, + "%s: windows_create_remote_entry: " + "Password is already hashed. Not syncing.\n", + agmt_get_long_name(prp->agmt)); + } + } else { + /* This password doesn't have a storage prefix but + * just happens to start with the '{' character. We'll + * assume that it's just a cleartext password without + * the proper storage prefix. */ + *password = slapi_ch_strdup(password_value); + } + } else { + /* This password has no storage prefix, or the password is empty */ + *password = slapi_ch_strdup(password_value); + } } } @@ -1554,7 +1584,37 @@ windows_map_mods_for_replay(Private_Repl_Protocol *prp,LDAPMod **original_mods, { char *password_value = NULL; password_value = mod->mod_bvalues[0]->bv_val; - *password = slapi_ch_strdup(password_value); + /* We need to check if the first character of password_value is an + * opening brace since strstr will simply return it's first argument + * if it is an empty string. */ + if (password_value && (*password_value == '{')) { + if (strchr( password_value, '}' )) { + /* A storage scheme is present. Check if it's the + * clear storage scheme. */ + if ((strlen(password_value) >= PASSWD_CLEAR_PREFIX_LEN + 1) && + (strncasecmp(password_value, PASSWD_CLEAR_PREFIX, PASSWD_CLEAR_PREFIX_LEN) == 0)) { + /* This password is in clear text. Strip off the clear prefix + * and sync it. */ + *password = slapi_ch_strdup(password_value + PASSWD_CLEAR_PREFIX_LEN); + } else { + /* This password is stored in a non-cleartext format. + * We can only sync cleartext passwords. */ + slapi_log_error(SLAPI_LOG_REPL, repl_plugin_name, + "%s: windows_create_remote_entry: " + "Password is already hashed. Not syncing.\n", + agmt_get_long_name(prp->agmt)); + } + } else { + /* This password doesn't have a storage prefix but + * just happens to start with the '{' character. We'll + * assume that it's just a cleartext password without + * the proper storage prefix. */ + *password = slapi_ch_strdup(password_value); + } + } else { + /* This password has no storage prefix, or the password is empty */ + *password = slapi_ch_strdup(password_value); + } } } } diff --git a/ldap/servers/plugins/replication/windowsrepl.h b/ldap/servers/plugins/replication/windowsrepl.h index 5cf43a452..121fbd749 100644 --- a/ldap/servers/plugins/replication/windowsrepl.h +++ b/ldap/servers/plugins/replication/windowsrepl.h @@ -99,4 +99,6 @@ void windows_conn_set_agmt_changed(Repl_Connection *conn); #define FAKE_STREET_ATTR_NAME "in#place#of#streetaddress" /* Used to work around contrained attribute legth for initials on AD */ #define AD_INITIALS_LENGTH 6 - +/* Used to check for pre-hashed passwords when syncing */ +#define PASSWD_CLEAR_PREFIX "{clear}" +#define PASSWD_CLEAR_PREFIX_LEN 7
0
a994df48e0d3bc2ae2a6cd81337823e0822de27e
389ds/389-ds-base
Ticket 48000 - replica agreement pause/resume should have a short sleep Bug Description: There is a small window between disabling a replica agreement and it actually stopping the agreement thread. This can allow one more replication session to slip through, which can cause issues with lib389 tests. Replication logging can also widen this "window". Fix Description: Add a short sleep after pausing/resuming a replication agreement. https://fedorahosted.org/389/ticket/48000 Reviewed by: rmeggins(Thanks!)
commit a994df48e0d3bc2ae2a6cd81337823e0822de27e Author: Mark Reynolds <[email protected]> Date: Mon Jan 26 16:11:18 2015 -0500 Ticket 48000 - replica agreement pause/resume should have a short sleep Bug Description: There is a small window between disabling a replica agreement and it actually stopping the agreement thread. This can allow one more replication session to slip through, which can cause issues with lib389 tests. Replication logging can also widen this "window". Fix Description: Add a short sleep after pausing/resuming a replication agreement. https://fedorahosted.org/389/ticket/48000 Reviewed by: rmeggins(Thanks!) diff --git a/src/lib389/lib389/agreement.py b/src/lib389/lib389/agreement.py index 561e4999c..141f6dfc3 100644 --- a/src/lib389/lib389/agreement.py +++ b/src/lib389/lib389/agreement.py @@ -6,6 +6,7 @@ Created on Dec 5, 2013 import ldap import re +import time from lib389._constants import * from lib389._entry import FormatDict @@ -13,12 +14,13 @@ from lib389.utils import normalizeDN from lib389 import Entry, DirSrv, NoSuchEntryError, InvalidArgumentError from lib389.properties import * + class Agreement(object): ALWAYS = '0000-2359 0123456' NEVER = '2358-2359 0' - + proxied_methods = 'search_s getEntry'.split() - + def __init__(self, conn): """@param conn - a DirSrv instance""" self.conn = conn @@ -27,16 +29,14 @@ class Agreement(object): def __getattr__(self, name): if name in Agreement.proxied_methods: return DirSrv.__getattr__(self.conn, name) - - def status(self, agreement_dn): """Return a formatted string with the replica status. Looking like: Status for meTo_localhost.localdomain:50389 agmt localhost.localdomain:50389 Update in progress: TRUE Last Update Start: 20131121132756Z Last Update End: 0 - Num. Changes Sent: 1:10/0 + Num. Changes Sent: 1:10/0 Num. changes Skipped: None Last update Status: 0 Replica acquired successfully: Incremental update started Init in progress: None @@ -88,18 +88,18 @@ class Agreement(object): HH [0..23] MM [0..59] DAYS [0-6]{1,7} - + @param interval - interval in the format 'HHMM-HHMM D+' (D is day number [0-6]) - + @return None - + @raise ValueError - if the inteval is illegal ''' c = re.compile(re.compile('^([0-9][0-9])([0-9][0-9])-([0-9][0-9])([0-9][0-9]) ([0-6]{1,7})$')) if not c.match(interval): raise ValueError("Bad schedule format %r" % interval) schedule = c.split(interval, c.groups) - + # check the hours hour = int(schedule[1]) if ((hour < 0) or (hour > 23)): @@ -109,7 +109,7 @@ class Agreement(object): raise ValueError("Bad schedule format %r: illegal hour %d" % (interval, hour)) if int(schedule[1]) > int(schedule[3]): raise ValueError("Bad schedule (start HOUR larger than end HOUR) %r: illegal hour %d" % (interval, int(schedule[1]))) - + # check the minutes minute = int(schedule[2]) if ((minute < 0) or (minute > 59)): @@ -118,45 +118,42 @@ class Agreement(object): if ((minute < 0) or (minute > 59)): raise ValueError("Bad schedule format %r: illegal minute %d" % (interval, minute)) - - - def schedule(self, agmtdn=None, interval=ALWAYS): """Schedule the replication agreement @param agmtdn - DN of the replica agreement - @param interval - in the form + @param interval - in the form - Agreement.ALWAYS - Agreement.NEVER - or 'HHMM-HHMM D+' With D=[0123456]+ - + @return - None - + @raise ValueError - if interval is not valid ldap.NO_SUCH_OBJECT - if agmtdn does not exist """ if not agmtdn: raise InvalidArgumentError("agreement DN is missing") - + # check the validity of the interval if interval != Agreement.ALWAYS and interval != Agreement.NEVER: self._check_interval(interval) - + # Check if the replica agreement exists try: self.conn.getEntry(agmtdn, ldap.SCOPE_BASE) except ldap.NO_SUCH_OBJECT: raise - + # update it self.log.info("Schedule replication agreement %s" % agmtdn) mod = [( ldap.MOD_REPLACE, 'nsds5replicaupdateschedule', [interval])] self.conn.modify_s(agmtdn, mod) - + def getProperties(self, agmnt_dn=None, properties=None): ''' - returns a dictionary of the requested properties. If properties is missing, - it returns all the properties. + returns a dictionary of the requested properties. If properties is missing, + it returns all the properties. @param agmtdn - is the replica agreement DN @param properties - is the list of properties name Supported properties are @@ -178,17 +175,17 @@ class Agreement(object): RA_CHANGES @return - returns a dictionary of the properties - + @raise ValueError - if invalid property name ldap.NO_SUCH_OBJECT - if agmtdn does not exist InvalidArgumentError - missing mandatory argument - - - ''' - + + + ''' + if not agmnt_dn: raise InvalidArgumentError("agmtdn is a mandatory argument") - + # # prepare the attribute list to retrieve from the RA # if properties is None, all RA attributes are retrieved @@ -200,32 +197,32 @@ class Agreement(object): if not prop_attr: raise ValueError("Improper property name: %s ", prop_name) attrs.append(prop_attr) - - filt = "(objectclass=*)" + + filt = "(objectclass=*)" result = {} try: entry = self.conn.getEntry(agmnt_dn, ldap.SCOPE_BASE, filt, attrs) - + # Build the result from the returned attributes for attr in entry.getAttrs(): # given an attribute name retrieve the property name - props = [ k for k, v in RA_PROPNAME_TO_ATTRNAME.iteritems() if v.lower() == attr.lower() ] - + props = [k for k, v in RA_PROPNAME_TO_ATTRNAME.iteritems() if v.lower() == attr.lower()] + # If this attribute is present in the RA properties, adds it to result - if len(props) > 0: + if len(props) > 0: result[props[0]] = entry.getValues(attr) except ldap.NO_SUCH_OBJECT: raise - + return result - + def setProperties(self, suffix=None, agmnt_dn=None, agmnt_entry=None, properties=None): ''' Set the properties of the agreement. If an 'agmnt_entry' (Entry) is provided, it updates the entry, else it updates the entry on the server. If the 'agmnt_dn' is provided it retrieves the entry using it, else it retrieve the agreement using the 'suffix'. - + @param suffix : suffix stored in that agreement (online update) @param agmnt_dn: DN of the agreement (online update) @param agmnt_entry: Entry of a agreement (offline update) @@ -248,20 +245,20 @@ class Agreement(object): RA_TIMEOUT RA_CHANGES - + @return None - + @raise ValueError: if unknown properties ValueError: if invalid agreement_entry - ValueError: if agmnt_dn or suffix are not associated to a replica + ValueError: if agmnt_dn or suffix are not associated to a replica InvalidArgumentError: If missing mandatory parameter - - - ''' + + + ''' # No properties provided if len(properties) == 0: return - + # check that the given properties are valid for prop in properties: # skip the prefix to add/del value @@ -269,33 +266,33 @@ class Agreement(object): raise ValueError("unknown property: %s" % prop) else: self.log.debug("setProperties: %s:%s" % (prop, properties[prop])) - + # At least we need to have suffix/agmnt_dn/agmnt_entry if not suffix and not agmnt_dn and not agmnt_entry: raise InvalidArgumentError("suffix and agmnt_dn and agmnt_entry are missing") - + # TODO if suffix: raise NotImplemented - + # the caller provides a set of properties to set into a replica entry if agmnt_entry: if not isinstance(agmnt_entry, Entry): raise ValueError("invalid instance of the agmnt_entry") - + # that is fine, now set the values for prop in properties: val = rawProperty(prop) - + # for Entry update it is a replace agmnt_entry.update({RA_PROPNAME_TO_ATTRNAME[val]: properties[prop]}) - - return - + + return + # for each provided property build the mod mod = [] for prop in properties: - + # retrieve/check the property name # and if the value needs to be added/deleted/replaced if prop.startswith('+'): @@ -307,21 +304,20 @@ class Agreement(object): else: mod_type = ldap.MOD_REPLACE prop_name = prop - + attr = RA_PROPNAME_TO_ATTRNAME[prop_name] if not attr: raise ValueError("invalid property name %s" % prop_name) - + # Now do the value checking we can do if prop_name == RA_SCHEDULE: self._check_interval(properties[prop]) - + mod.append((mod_type, attr, properties[prop])) - + # Now time to run the effective modify self.conn.modify_s(agmnt_dn, mod) - - + def list(self, suffix=None, consumer_host=None, consumer_port=None, agmtdn=None): ''' Returns the search result of the replica agreement(s) under the replica (replicaRoot is 'suffix'). @@ -330,28 +326,28 @@ class Agreement(object): 'consumer_host' and 'consumer_port' are either not specified or specified both. If 'agmtdn' is specified, it returns the search result entry of that replication agreement. - else if consumer host/port are specified it returns the replica agreements toward + else if consumer host/port are specified it returns the replica agreements toward that consumer host:port. - Finally if neither 'agmtdn' nor 'consumser host/port' are specifies it returns + Finally if neither 'agmtdn' nor 'consumser host/port' are specifies it returns all the replica agreements under the replica (replicaRoot is 'suffix'). - @param - suffix is the suffix targeted by the total update + @param - suffix is the suffix targeted by the total update @param - consumer_host hostname of the consumer @param - consumer_port port of the consumer @param - agmtdn DN of the replica agreement - + @return - search result of the replica agreements - + @raise - InvalidArgument: if missing mandatory argument (agmtdn or suffix, then host and port) - ValueError - if some properties are not valid - NoSuchEntryError - If no replica defined for the suffix ''' if not suffix and not agmtdn: raise InvalidArgumentError("suffix or agmtdn are required") - + if (consumer_host and not consumer_port) or (not consumer_host and consumer_port): raise InvalidArgumentError("consumer host/port are required together") - + if agmtdn: # easy case, just return the RA filt = "objectclass=*" @@ -362,18 +358,17 @@ class Agreement(object): if not replica_entries: raise NoSuchEntryError("Error: no replica set up for suffix " + suffix) replica_entry = replica_entries[0] - + # Now returns the replica agreement for that suffix that replicates to # consumer host/port if consumer_host and consumer_port: filt = "(&(objectclass=%s)(%s=%s)(%s=%d))" % (RA_OBJECTCLASS_VALUE, - RA_PROPNAME_TO_ATTRNAME[RA_CONSUMER_HOST], consumer_host, + RA_PROPNAME_TO_ATTRNAME[RA_CONSUMER_HOST], consumer_host, RA_PROPNAME_TO_ATTRNAME[RA_CONSUMER_PORT], consumer_port) else: filt = "(objectclass=%s)" % RA_OBJECTCLASS_VALUE return self.conn.search_s(replica_entry.dn, ldap.SCOPE_ONELEVEL, filt) - - + def create(self, suffix=None, host=None, port=None, properties=None): """Create (and return) a replication agreement from self to consumer. - self is the supplier, @@ -382,42 +377,42 @@ class Agreement(object): * a DirSrv object if chaining * an object with attributes: host, port, sslport, __str__ @param suffix - eg. 'dc=babel,dc=it' - @param properties - further properties dict. + @param properties - further properties dict. Support properties RA_NAME - RA_SUFFIX - RA_BINDDN - RA_BINDPW - RA_METHOD - RA_DESCRIPTION - RA_SCHEDULE - RA_TRANSPORT_PROT - RA_FRAC_EXCLUDE - RA_FRAC_EXCLUDE_TOTAL_UPDATE - RA_FRAC_STRIP - RA_CONSUMER_PORT - RA_CONSUMER_HOST - RA_CONSUMER_TOTAL_INIT - RA_TIMEOUT - RA_CHANGES - - + RA_SUFFIX + RA_BINDDN + RA_BINDPW + RA_METHOD + RA_DESCRIPTION + RA_SCHEDULE + RA_TRANSPORT_PROT + RA_FRAC_EXCLUDE + RA_FRAC_EXCLUDE_TOTAL_UPDATE + RA_FRAC_STRIP + RA_CONSUMER_PORT + RA_CONSUMER_HOST + RA_CONSUMER_TOTAL_INIT + RA_TIMEOUT + RA_CHANGES + + @return dn_agreement - DN of the created agreement - + @raise InvalidArgumentError - If the suffix is missing @raise NosuchEntryError - if a replica doesn't exist for that suffix @raise UNWILLING_TO_PERFORM if the database was previously in read-only state. To create new agreements you need to *restart* the directory server - + """ import string - + # Check we have a suffix [ mandatory ] if not suffix: self.log.warning("create: suffix is missing") raise InvalidArgumentError('suffix is mandatory') - + if properties: binddn = properties.get(RA_BINDDN) or defaultProperties[REPLICATION_BIND_DN] bindpw = properties.get(RA_BINDPW) or defaultProperties[REPLICATION_BIND_PW] @@ -434,7 +429,7 @@ class Agreement(object): description = format transport = defaultProperties[REPLICATION_TRANSPORT] timeout = defaultProperties[REPLICATION_TIMEOUT] - + # Compute the normalized suffix to be set in RA entry nsuffix = normalizeDN(suffix) @@ -452,7 +447,7 @@ class Agreement(object): # This is probably unnecessary because # we can just raise ALREADY_EXISTS try: - + entry = self.conn.getEntry(dn_agreement, ldap.SCOPE_BASE) self.log.warn("Agreement already exists: %r" % dn_agreement) return dn_agreement @@ -474,7 +469,7 @@ class Agreement(object): RA_PROPNAME_TO_ATTRNAME[RA_METHOD]: bindmethod, RA_PROPNAME_TO_ATTRNAME[RA_DESCRIPTION]: string.Template(description).substitute({'host': host, 'port': port}) }) - + # we make a copy here because we cannot change # the passed in properties dict propertiescopy = {} @@ -525,8 +520,6 @@ class Agreement(object): return dn_agreement - - def init(self, suffix=None, consumer_host=None, consumer_port=None): """Trigger a total update of the consumer replica - self is the supplier, @@ -538,19 +531,19 @@ class Agreement(object): @raise InvalidArgument: if missing mandatory argurment (suffix/host/port) """ - # + # # check the required parameters are set # if not suffix: self.log.fatal("initAgreement: suffix is missing") raise InvalidArgumentError('suffix is mandatory argument') - + nsuffix = normalizeDN(suffix) - + if not consumer_host: self.log.fatal("initAgreement: host is missing") raise InvalidArgumentError('host is mandatory argument') - + if not consumer_port: self.log.fatal("initAgreement: port is missing") raise InvalidArgumentError('port is mandatory argument') @@ -569,7 +562,7 @@ class Agreement(object): except ldap.NO_SUCH_OBJECT: self.log.fatal("initAgreement: No replica agreement to %s:%d for suffix %s" % (consumer_host, consumer_port, nsuffix)) raise - + # # trigger the total init # @@ -581,10 +574,10 @@ class Agreement(object): """Pause this replication agreement. This replication agreement will send no more changes. Use the resume() method to "unpause". It tries to disable the replica agreement. If it fails (not implemented in all version), - it uses the schedule() with interval '2358-2359 0' + it uses the schedule() with interval '2358-2359 0' @param agmtdn - agreement dn @param interval - (default NEVER) replication schedule to use - + @return None @raise ValueError - if interval is not valid @@ -594,18 +587,21 @@ class Agreement(object): ldap.MOD_REPLACE, 'nsds5ReplicaEnabled', ['off'])] try: self.conn.modify_s(agmtdn, mod) - except ldap.LDAPError, e: + except ldap.LDAPError: # before 1.2.11, no support for nsds5ReplicaEnabled # use schedule hack self.schedule(interval) + # Allow a little time for the change to take effect + time.sleep(2) + def resume(self, agmtdn, interval=ALWAYS): """Resume a paused replication agreement, paused with the "pause" method. It tries to enabled the replica agreement. If it fails (not implemented in all version), - it uses the schedule() with interval '0000-2359 0123456' + it uses the schedule() with interval '0000-2359 0123456' @param agmtdn - agreement dn @param interval - (default ALWAYS) replication schedule to use - + @return None @raise ValueError - if interval is not valid @@ -616,17 +612,20 @@ class Agreement(object): ldap.MOD_REPLACE, 'nsds5ReplicaEnabled', ['on'])] try: self.conn.modify_s(agmtdn, mod) - except ldap.LDAPError, e: + except ldap.LDAPError: # before 1.2.11, no support for nsds5ReplicaEnabled # use schedule hack self.schedule(interval) + # Allow a little time for the change to take effect + time.sleep(2) + def changes(self, agmnt_dn): """Return a list of changes sent by this agreement.""" retval = 0 try: ent = self.conn.getEntry( - agmnt_dn, ldap.SCOPE_BASE, "(objectclass=*)", [ RA_PROPNAME_TO_ATTRNAME[RA_CHANGES] ]) + agmnt_dn, ldap.SCOPE_BASE, "(objectclass=*)", [RA_PROPNAME_TO_ATTRNAME[RA_CHANGES]]) except: raise NoSuchEntryError( "Error reading status from agreement", agmnt_dn) @@ -642,4 +641,3 @@ class Agreement(object): if ary and len(ary) > 1: retval = retval + int(ary[1].split("/")[0]) return retval - \ No newline at end of file
0
fd6b417fc53d1c97675638c5489b122e1cf4f1d6
389ds/389-ds-base
Issue 5647 - Fix unused variable warning from previous commit (#5670) * issue 5647 - memory leak in audit log when adding entries * Issue 5647 - Fix unused variable warning from previous commit
commit fd6b417fc53d1c97675638c5489b122e1cf4f1d6 Author: progier389 <[email protected]> Date: Mon Feb 20 16:14:05 2023 +0100 Issue 5647 - Fix unused variable warning from previous commit (#5670) * issue 5647 - memory leak in audit log when adding entries * Issue 5647 - Fix unused variable warning from previous commit diff --git a/ldap/servers/slapd/auditlog.c b/ldap/servers/slapd/auditlog.c index 3128e0497..0597ecc6f 100644 --- a/ldap/servers/slapd/auditlog.c +++ b/ldap/servers/slapd/auditlog.c @@ -254,7 +254,6 @@ add_entry_attrs(Slapi_Entry *entry, lenstr *l) } else { /* Return all attributes */ for (; entry_attr; entry_attr = entry_attr->a_next) { - Slapi_Value **vals = attr_get_present_values(entry_attr); char *attr = NULL; slapi_attr_get_type(entry_attr, &attr);
0
89a862674858f3457e360033690e983316a73e50
389ds/389-ds-base
bump version to 1.2.10.a7
commit 89a862674858f3457e360033690e983316a73e50 Author: Rich Megginson <[email protected]> Date: Fri Jan 13 12:03:07 2012 -0700 bump version to 1.2.10.a7 diff --git a/VERSION.sh b/VERSION.sh index 701bddcda..fce68b06f 100644 --- a/VERSION.sh +++ b/VERSION.sh @@ -14,7 +14,7 @@ VERSION_MAINT=10 # if this is a PRERELEASE, set VERSION_PREREL # otherwise, comment it out # be sure to include the dot prefix in the prerel -VERSION_PREREL=.a6 +VERSION_PREREL=.a7 # NOTES on VERSION_PREREL # use aN for an alpha release e.g. a1, a2, etc. # use rcN for a release candidate e.g. rc1, rc2, etc.
0
9093f582b410c27e197fc122c479c33c0a76fb62
389ds/389-ds-base
Ticket #47310 - Attribute "dsOnlyMemberUid" not allowed when syncing nested posix groups from AD with posixWinsync Bug description: When Posix Winsync API plug-in is configured with posixWinsyncMapMemberUid and posixWinsyncMapNestedGrouping enabled (true), Posix Group added to AD is synchronized to DS with mapped dsOnlyMemberUid and memberUid. When adding a Posix Group with the nested group member, addGroupMembership function adds "dynamicGroup" to objectClass to allow the Posix Group entry to have dsOnlyMemberUid. The add should be made against the entry in the memory since the entry is not yet stored in the database, but it was trying to modify against the backend. Fix description: This patch directly adds "dynamicGroup" to the objectclass valueset, by which the attribute "dsOnlyMemberUid" is allowed to add to the entry. In addition, 1) when reflecting the mapped memberUid on DS to AD, the logic was corrected to "if dsOnlyMemberUid matches memberUid", 2) when the Posix Group is nested in the multiple levels, the mapped memberUid was not retrieved. The code was added. Reviewed by Rich (Thank you!!) https://fedorahosted.org/389/ticket/47310
commit 9093f582b410c27e197fc122c479c33c0a76fb62 Author: Noriko Hosoi <[email protected]> Date: Wed Aug 14 16:09:48 2013 -0700 Ticket #47310 - Attribute "dsOnlyMemberUid" not allowed when syncing nested posix groups from AD with posixWinsync Bug description: When Posix Winsync API plug-in is configured with posixWinsyncMapMemberUid and posixWinsyncMapNestedGrouping enabled (true), Posix Group added to AD is synchronized to DS with mapped dsOnlyMemberUid and memberUid. When adding a Posix Group with the nested group member, addGroupMembership function adds "dynamicGroup" to objectClass to allow the Posix Group entry to have dsOnlyMemberUid. The add should be made against the entry in the memory since the entry is not yet stored in the database, but it was trying to modify against the backend. Fix description: This patch directly adds "dynamicGroup" to the objectclass valueset, by which the attribute "dsOnlyMemberUid" is allowed to add to the entry. In addition, 1) when reflecting the mapped memberUid on DS to AD, the logic was corrected to "if dsOnlyMemberUid matches memberUid", 2) when the Posix Group is nested in the multiple levels, the mapped memberUid was not retrieved. The code was added. Reviewed by Rich (Thank you!!) https://fedorahosted.org/389/ticket/47310 diff --git a/ldap/servers/plugins/posix-winsync/posix-group-func.c b/ldap/servers/plugins/posix-winsync/posix-group-func.c index aa76d6c90..4e2dae538 100644 --- a/ldap/servers/plugins/posix-winsync/posix-group-func.c +++ b/ldap/servers/plugins/posix-winsync/posix-group-func.c @@ -59,18 +59,8 @@ addDynamicGroupIfNecessary(Slapi_Entry *entry, Slapi_Mods *smods) { if (slapi_attr_value_find(oc_attr, slapi_value_get_berval(voc)) != 0) { if (smods) { slapi_mods_add_string(smods, LDAP_MOD_ADD, "objectClass", "dynamicGroup"); - } - else { - smods = slapi_mods_new(); - slapi_mods_add_string(smods, LDAP_MOD_ADD, "objectClass", "dynamicGroup"); - - Slapi_PBlock *mod_pb = slapi_pblock_new(); - slapi_modify_internal_set_pb_ext(mod_pb, slapi_entry_get_sdn(entry), slapi_mods_get_ldapmods_passout(smods), 0, 0, - posix_winsync_get_plugin_identity(), 0); - slapi_modify_internal_pb(mod_pb); - slapi_pblock_destroy(mod_pb); - - slapi_mods_free(&smods); + } else { + slapi_entry_add_string(entry, "objectClass", "dynamicGroup"); } } @@ -392,7 +382,7 @@ getMembershipFromDownward(Slapi_Entry *entry, Slapi_ValueSet *muid_vs, Slapi_Val } else { /* PosixGroups except for the top one are already fully mapped out */ - if ((!hasObjectClass(entry, "posixGroup") || depth == 0) && + if ((!hasObjectClass(entry, "posixGroup") || (depth == 0)) && (hasObjectClass(child, "ntGroup") || hasObjectClass(child, "posixGroup"))) { /* Recurse downward */ @@ -405,6 +395,20 @@ getMembershipFromDownward(Slapi_Entry *entry, Slapi_ValueSet *muid_vs, Slapi_Val if (slapi_entry_attr_find(child, "uid", &uid_attr) == 0) { slapi_attr_first_value(uid_attr, &v); + if (v && !slapi_valueset_find(uid_attr, muid_vs, v)) { + slapi_log_error(SLAPI_LOG_PLUGIN, POSIX_WINSYNC_PLUGIN_NAME, + "getMembershipFromDownward: adding member: %s\n", + slapi_value_get_string(v)); + slapi_valueset_add_value(muid_vs, v); + slapi_valueset_add_value(muid_nested_vs, v); + } + } + } else if (hasObjectClass(child, "posixGroup")) { + Slapi_Attr *uid_attr = NULL; + Slapi_Value *v = NULL; + if (slapi_entry_attr_find(child, "memberuid", &uid_attr) == 0) { + slapi_attr_first_value(uid_attr, &v); + if (v && !slapi_valueset_find(uid_attr, muid_vs, v)) { slapi_log_error(SLAPI_LOG_PLUGIN, POSIX_WINSYNC_PLUGIN_NAME, "getMembershipFromDownward: adding member: %s\n", diff --git a/ldap/servers/plugins/posix-winsync/posix-winsync.c b/ldap/servers/plugins/posix-winsync/posix-winsync.c index 5b3d6e721..a9a3b442a 100644 --- a/ldap/servers/plugins/posix-winsync/posix-winsync.c +++ b/ldap/servers/plugins/posix-winsync/posix-winsync.c @@ -694,7 +694,8 @@ posix_winsync_pre_ad_mod_group_cb(void *cbdata, const Slapi_Entry *rawentry, Sla int j; for (j = slapi_attr_first_value(attr, &v); j != -1; j = slapi_attr_next_value(attr, i, &v)) { - if (!slapi_valueset_find(dsmuid_attr, dsmuid_vs, v)) { + /* If dsOnlyMemberUid matches memberUid, add it to AD */ + if (slapi_valueset_find(dsmuid_attr, dsmuid_vs, v)) { slapi_valueset_add_value(vs, v); } } @@ -726,8 +727,7 @@ posix_winsync_pre_ad_mod_group_cb(void *cbdata, const Slapi_Entry *rawentry, Sla valueset_get_valuearray(vs)); *do_modify = 1; } - } else { - + } else if (!slapi_valueset_isempty(vs)) { slapi_mods_add_mod_values(smods, LDAP_MOD_ADD, ad_type, valueset_get_valuearray(vs)); if (0 == slapi_attr_type_cmp(type, "gidNumber", SLAPI_TYPE_CMP_SUBTYPE)) {
0
07b5f941afb8817c145b8fc73e91c5ea92482948
389ds/389-ds-base
387681 - Fix errors in mapping AD tombstones The AD tombstone mapping code is not behaving correctly if a cn contains a comma (such as a "last, first" type value). The code is supposed to locate the first ":" in the tombstone DN, then scan for the first "," after that. Everything between is the GUID. The problem is that the code is starting at the beginning of the string when searching for the "," instead of starting at the ":" that was previously found. This causes the "," in the cn to be found instead, which makes us fail to find the GUID. The fix is to simply start searching for the "," from the ":" in the tombstone DN.
commit 07b5f941afb8817c145b8fc73e91c5ea92482948 Author: Nathan Kinder <[email protected]> Date: Fri Oct 30 10:28:09 2009 -0700 387681 - Fix errors in mapping AD tombstones The AD tombstone mapping code is not behaving correctly if a cn contains a comma (such as a "last, first" type value). The code is supposed to locate the first ":" in the tombstone DN, then scan for the first "," after that. Everything between is the GUID. The problem is that the code is starting at the beginning of the string when searching for the "," instead of starting at the ":" that was previously found. This causes the "," in the cn to be found instead, which makes us fail to find the GUID. The fix is to simply start searching for the "," from the ":" in the tombstone DN. diff --git a/ldap/servers/plugins/replication/windows_protocol_util.c b/ldap/servers/plugins/replication/windows_protocol_util.c index ed5b8a32a..2c31c4f91 100644 --- a/ldap/servers/plugins/replication/windows_protocol_util.c +++ b/ldap/servers/plugins/replication/windows_protocol_util.c @@ -2615,10 +2615,13 @@ extract_guid_from_tombstone_dn(const char *dn) "CN=WDel Userdb1\\\nDEL:551706bc-ecf2-4b38-9284-9a8554171d69,CN=Deleted Objects,DC=magpie,DC=com" */ /* First find the 'DEL:' */ - colon_offset = strchr(dn,':'); - /* Then scan forward to the next ',' */ - comma_offset = strchr(dn,','); - /* The characters inbetween are the GUID, copy them to a new string and return to the caller */ + if (colon_offset = strchr(dn,':')) { + /* Then scan forward to the next ',' */ + comma_offset = strchr(colon_offset,','); + } + + /* The characters inbetween are the GUID, copy them + * to a new string and return to the caller */ if (comma_offset && colon_offset && comma_offset > colon_offset) { guid = slapi_ch_malloc(comma_offset - colon_offset); strncpy(guid,colon_offset+1,(comma_offset-colon_offset)-1);
0
bc98362705bfef62e06d482b0f4622f2c48fda3b
389ds/389-ds-base
Ticket #47832 - attrcrypt_generate_key calls slapd_pk11_TokenKeyGenWithFlags with improper macro Description: attrcrypt_generate_key was passing an improper macro CKA_DECRYPT, which is a super set of the correct one CKF_DECRYPT. Thanks to Bob Relyea ([email protected]) for finding it out. https://fedorahosted.org/389/ticket/47832 Reviewed by [email protected].
commit bc98362705bfef62e06d482b0f4622f2c48fda3b Author: Noriko Hosoi <[email protected]> Date: Thu Jul 10 13:54:35 2014 -0700 Ticket #47832 - attrcrypt_generate_key calls slapd_pk11_TokenKeyGenWithFlags with improper macro Description: attrcrypt_generate_key was passing an improper macro CKA_DECRYPT, which is a super set of the correct one CKF_DECRYPT. Thanks to Bob Relyea ([email protected]) for finding it out. https://fedorahosted.org/389/ticket/47832 Reviewed by [email protected]. diff --git a/ldap/servers/slapd/back-ldbm/ldbm_attrcrypt.c b/ldap/servers/slapd/back-ldbm/ldbm_attrcrypt.c index f4a5d1a03..41856d58a 100644 --- a/ldap/servers/slapd/back-ldbm/ldbm_attrcrypt.c +++ b/ldap/servers/slapd/back-ldbm/ldbm_attrcrypt.c @@ -344,7 +344,7 @@ attrcrypt_generate_key(attrcrypt_cipher_state *acs,PK11SymKey **symmetric_key) 0 /*param*/, acs->ace->key_size, NULL /*keyid*/, - CKA_DECRYPT/*op*/, + CKF_DECRYPT/*op*/, CKF_ENCRYPT/*attr*/, NULL); if (new_symmetric_key) {
0
84acf5dd59a8bc1412f77750ca96fe35b19204d8
389ds/389-ds-base
Add dereference request control to lib389 for testing plugins.
commit 84acf5dd59a8bc1412f77750ca96fe35b19204d8 Author: William Brown <[email protected]> Date: Tue Aug 25 15:38:11 2015 +0930 Add dereference request control to lib389 for testing plugins. diff --git a/src/lib389/lib389/__init__.py b/src/lib389/lib389/__init__.py index 281f48f68..0f5a279c1 100644 --- a/src/lib389/lib389/__init__.py +++ b/src/lib389/lib389/__init__.py @@ -74,6 +74,7 @@ MAJOR, MINOR, _, _, _ = sys.version_info if MAJOR >= 3 or (MAJOR == 2 and MINOR >= 7): from ldap.controls.simple import GetEffectiveRightsControl + from lib389._controls import DereferenceControl RE_DBMONATTR = re.compile(r'^([a-zA-Z]+)-([1-9][0-9]*)$') RE_DBMONATTRSUN = re.compile(r'^([a-zA-Z]+)-([a-zA-Z]+)$') @@ -2459,6 +2460,46 @@ class DirSrv(SimpleLDAPObject): self.set_option(ldap.OPT_SERVER_CONTROLS, []) return ldap_result + # Is there a better name for this function? + def dereference(self, deref, base=DEFAULT_SUFFIX, scope=ldap.SCOPE_SUBTREE, *args, **kwargs): + """ + Perform a search which dereferences values from attributes such as member + or unique member. + For arguments to this function, please see LDAPObject.search_s. For example: + + @param deref - Dereference query + @param base - Base DN of the suffix to check + @param scope - search scope + @param args - + @param kwargs - + @return - ldap result + + LDAPObject.search_s(base, scope[, filterstr='(objectClass=*)'[, attrlist=None[, attrsonly=0]]]) -> list|None + + A deref query is of the format: + + "<attribute to derference>:<deref attr1>,<deref attr2>..." + + "uniqueMember:dn,objectClass" + + This will return the dn's and objectClasses of the dereferenced members of the group. + """ + if not (MAJOR >= 3 or (MAJOR == 2 and MINOR >= 7)): + raise Exception("UNSUPPORTED EXTENDED OPERATION ON THIS VERSION OF PYTHON") + ldap_result = None + # This may not be thread safe. Is there a better way to do this? + try: + drc = DereferenceControl(True, deref=deref.encode('UTF-8')) + sctrl = [drc] + self.set_option(ldap.OPT_SERVER_CONTROLS, sctrl) + + #ldap_result = self.search_s(base, scope, *args, **kwargs) + res = self.search(base, scope, *args, **kwargs) + resp_type, resp_data, resp_msgid, decoded_resp_ctrls, _, _ = self.result4(res, add_ctrls=1, resp_ctrl_classes={CONTROL_DEREF: DereferenceControl}) + finally: + self.set_option(ldap.OPT_SERVER_CONTROLS, []) + return resp_data, decoded_resp_ctrls + def buildLDIF(self, num, ldif_file, suffix='dc=example,dc=com'): """Generate a simple ldif file using the dbgen.pl script, and set the ownership and permissions to match the user that the server runs as. diff --git a/src/lib389/lib389/_constants.py b/src/lib389/lib389/_constants.py index 79f00cbcd..02072e447 100644 --- a/src/lib389/lib389/_constants.py +++ b/src/lib389/lib389/_constants.py @@ -115,6 +115,12 @@ RDN_REPLICA = "cn=replica" RETROCL_SUFFIX = "cn=changelog" +################################## +### +### Request Control OIDS +### +################################## +CONTROL_DEREF = '1.3.6.1.4.1.4203.666.5.16' ################################## ### diff --git a/src/lib389/lib389/_controls.py b/src/lib389/lib389/_controls.py new file mode 100644 index 000000000..b09f61e93 --- /dev/null +++ b/src/lib389/lib389/_controls.py @@ -0,0 +1,122 @@ +""" +Lib389 python ldap request controls. + +These should be upstreamed into python ldap when possible. +""" + +from lib389._constants import * + +from ldap.controls import LDAPControl + +from pyasn1.type import namedtype,univ +from pyasn1.codec.ber import encoder,decoder +from pyasn1.type import tag +from pyasn1_modules.rfc2251 import AttributeDescription, LDAPDN, AttributeValue +from pyasn1 import debug + +# Could use AttributeDescriptionList + +""" + controlValue ::= SEQUENCE OF derefSpec DerefSpec + + DerefSpec ::= SEQUENCE { + derefAttr attributeDescription, ; with DN syntax + attributes AttributeList } + + AttributeList ::= SEQUENCE OF attr AttributeDescription + + Needs to be matched by ber_scanf(ber, "{a{v}}", ... ) +""" +class AttributeList(univ.SequenceOf): + componentType = AttributeDescription() + +class DerefSpec(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('derefAttr', AttributeDescription()), + namedtype.NamedType('attributes', AttributeList()), + ) + +class DerefControlValue(univ.SequenceOf): + componentType = DerefSpec() + +""" + controlValue ::= SEQUENCE OF derefRes DerefRes + + DerefRes ::= SEQUENCE { + derefAttr AttributeDescription, + derefVal LDAPDN, + attrVals [0] PartialAttributeList OPTIONAL } + + PartialAttributeList ::= SEQUENCE OF + partialAttribute PartialAttribute +""" + +class Vals(univ.SetOf): + componentType = AttributeValue() + +class PartialAttribute(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('type', AttributeDescription()), + namedtype.NamedType('vals', Vals()), + ) + +class PartialAttributeList(univ.SequenceOf): + componentType = PartialAttribute() + tagSet = univ.Sequence.tagSet.tagImplicitly(tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)) + +class DerefRes(univ.Sequence): + componentType = namedtype.NamedTypes( + namedtype.NamedType('derefAttr', AttributeDescription()), + namedtype.NamedType('derefVal', LDAPDN()), + namedtype.OptionalNamedType('attrVals', PartialAttributeList()), + ) + +class DerefResultControlValue(univ.SequenceOf): + componentType = DerefRes() + +class DereferenceControl(LDAPControl): + """ + Dereference Control + """ + + def __init__(self, criticality=True, deref=None): + LDAPControl.__init__(self, CONTROL_DEREF, criticality) + self.deref = deref + + def encodeControlValue(self): + cv = DerefControlValue() + cvi = 0 + for derefSpec in self.deref.split(';'): + derefAttr, attributes = derefSpec.split(':') + attributes = attributes.split(',') + al = AttributeList() + i = 0 + while len(attributes) > 0: + al.setComponentByPosition(i, attributes.pop()) + i += 1 + ds = DerefSpec() + ds.setComponentByName('derefAttr', derefAttr) + ds.setComponentByName('attributes', al) + cv.setComponentByPosition(cvi, ds) + cvi += 1 + return encoder.encode(cv) + + def decodeControlValue(self,encodedControlValue): + self.entry = [] + #debug.setLogger(debug.flagAll) + decodedValue,_ = decoder.decode(encodedControlValue,asn1Spec=DerefResultControlValue()) + + for derefres in decodedValue: + result = {} + result['derefAttr'] = str(derefres.getComponentByName('derefAttr')) + result['derefVal'] = str(derefres.getComponentByName('derefVal')) + result['attrVals'] = [] + for attrval in derefres.getComponentByName('attrVals'): + av = {} + av['type'] = str(attrval.getComponentByName('type')) + av['vals'] = [] + for val in attrval.getComponentByName('vals'): + av['vals'].append(str(val)) + result['attrVals'].append(av) + self.entry.append(result) + diff --git a/src/lib389/tests/dereference_test.py b/src/lib389/tests/dereference_test.py new file mode 100644 index 000000000..f45a41ba6 --- /dev/null +++ b/src/lib389/tests/dereference_test.py @@ -0,0 +1,70 @@ +''' +Created on Aug 1, 2015 + +@author: William Brown +''' +from lib389._constants import * +from lib389 import DirSrv,Entry +import ldap + +INSTANCE_PORT = 54321 +INSTANCE_SERVERID = 'dereferenceds' + +class Test_dereference(): + def setUp(self): + instance = DirSrv(verbose=False) + instance.log.debug("Instance allocated") + args = {SER_HOST: LOCALHOST, + SER_PORT: INSTANCE_PORT, + SER_SERVERID_PROP: INSTANCE_SERVERID + } + instance.allocate(args) + if instance.exists(): + instance.delete() + instance.create() + instance.open() + self.instance = instance + + def tearDown(self): + if self.instance.exists(): + #self.instance.db2ldif(bename='userRoot', suffixes=[DEFAULT_SUFFIX], excludeSuffixes=[], encrypt=False, \ + #repl_data=False, outputfile='%s/ldif/%s.ldif' % (self.instance.dbdir,INSTANCE_SERVERID )) + #self.instance.clearBackupFS() + #self.instance.backupFS() + self.instance.delete() + + def add_user(self): + # Create a user entry + for i in range(0,2): + uentry = Entry('uid=test%s,%s' % (i, DEFAULT_SUFFIX)) + uentry.setValues('objectclass', 'top', 'extensibleobject') + uentry.setValues('uid', 'test') + self.instance.add_s(uentry) + + def add_group(self): + gentry = Entry('cn=testgroup,%s' % DEFAULT_SUFFIX) + gentry.setValues('objectclass', 'top', 'extensibleobject') + gentry.setValues('cn', 'testgroup') + for i in range(0,2): + gentry.setValues('uniqueMember', 'uid=test%s,%s' % (i,DEFAULT_SUFFIX)) + self.instance.add_s(gentry) + + def test_dereference(self): + try: + result, control_response = self.instance.dereference('uniqueMember:dn,uid;uniqueMember:dn,uid', filterstr='(cn=testgroup)') + assert False + except ldap.UNAVAILABLE_CRITICAL_EXTENSION: + # This is a good thing! It means our deref Control Value is correctly formatted. + pass + result, control_response = self.instance.dereference('uniqueMember:cn,uid,objectclass', filterstr='(cn=testgroup)') + + assert result[0][2][0].entry == [{'derefVal': 'uid=test1,dc=example,dc=com', 'derefAttr': 'uniqueMember', 'attrVals': [{'vals': ['top', 'extensibleobject'], 'type': 'objectclass'}, {'vals': ['test', 'test1'], 'type': 'uid'}]}] + +if __name__ == "__main__": + test = Test_dereference() + test.setUp() + test.add_user() + test.add_group() + test.test_dereference() + test.tearDown() +
0
b23e560cee6df2fc4d886ea4bca74dc32b942296
389ds/389-ds-base
Ticket 49294 - radiusd before in unit file Bug Description: freeradiusd often won't start correctly with dirsrv due to a race condition. Fix Description: Force dirsrv to have a "before" line to start before freeradiusd does. https://pagure.io/389-ds-base/issue/49294 Author: wibrown Review by: mreynolds (Thanks!)
commit b23e560cee6df2fc4d886ea4bca74dc32b942296 Author: William Brown <[email protected]> Date: Tue Jun 20 12:18:28 2017 +1000 Ticket 49294 - radiusd before in unit file Bug Description: freeradiusd often won't start correctly with dirsrv due to a race condition. Fix Description: Force dirsrv to have a "before" line to start before freeradiusd does. https://pagure.io/389-ds-base/issue/49294 Author: wibrown Review by: mreynolds (Thanks!) diff --git a/wrappers/systemd.group.in b/wrappers/systemd.group.in index 53abc1349..6022f7e27 100644 --- a/wrappers/systemd.group.in +++ b/wrappers/systemd.group.in @@ -1,6 +1,7 @@ [Unit] Description=@capbrand@ Directory Server After=chronyd.service ntpd.service network-online.target syslog.target +Before=radiusd.service [Install] WantedBy=multi-user.target diff --git a/wrappers/systemd.template.asan.service.in b/wrappers/systemd.template.asan.service.in index 0fe0bad6a..1fe321ccb 100644 --- a/wrappers/systemd.template.asan.service.in +++ b/wrappers/systemd.template.asan.service.in @@ -16,6 +16,7 @@ Description=@capbrand@ Directory Server with ASAN %i. PartOf=@systemdgroupname@ After=chronyd.service ntpd.service network-online.target syslog.target +Before=radiusd.service [Service] Type=notify diff --git a/wrappers/systemd.template.service.in b/wrappers/systemd.template.service.in index 972be7d6c..30b9e4b78 100644 --- a/wrappers/systemd.template.service.in +++ b/wrappers/systemd.template.service.in @@ -16,6 +16,7 @@ Description=@capbrand@ Directory Server %i. PartOf=@systemdgroupname@ After=chronyd.service ntpd.service network-online.target syslog.target +Before=radiusd.service [Service] Type=notify
0
202c9831194558a7439da815f6627993ce1a17db
389ds/389-ds-base
Ticket 47383 - connections attribute in cn=snmp,cn=monitor is counted twice Bug Description: In disconnect_server_nomutex() we increment, instead of decrement the connection counter. Fix Description: Decrement the counter. Also did some code cleanup. https://fedorahosted.org/389/ticket/47383 Reviewed by: Richm(Thanks!)
commit 202c9831194558a7439da815f6627993ce1a17db Author: Mark Reynolds <[email protected]> Date: Fri Jun 7 11:41:38 2013 -0400 Ticket 47383 - connections attribute in cn=snmp,cn=monitor is counted twice Bug Description: In disconnect_server_nomutex() we increment, instead of decrement the connection counter. Fix Description: Decrement the counter. Also did some code cleanup. https://fedorahosted.org/389/ticket/47383 Reviewed by: Richm(Thanks!) diff --git a/ldap/servers/slapd/connection.c b/ldap/servers/slapd/connection.c index 359dabe3b..687bf4202 100644 --- a/ldap/servers/slapd/connection.c +++ b/ldap/servers/slapd/connection.c @@ -2891,77 +2891,78 @@ void disconnect_server_nomutex( Connection *conn, PRUint64 opconnid, int opid, PRErrorCode reason, PRInt32 error ) { if ( ( conn->c_sd != SLAPD_INVALID_SOCKET && - conn->c_connid == opconnid ) && !(conn->c_flags & CONN_FLAG_CLOSING) ) { - - /* - * PR_Close must be called before anything else is done because - * of NSPR problem on NT which requires that the socket on which - * I/O timed out is closed before any other I/O operation is - * attempted by the thread. - * WARNING : As of today the current code does not fulfill the - * requirements above. - */ - - /* Mark that the socket should be closed on this connection. - * We don't want to actually close the socket here, because - * the listener thread could be PR_Polling over it right now. - * The last thread to stop using the connection will do the closing. - */ - conn->c_flags |= CONN_FLAG_CLOSING; - g_decrement_current_conn_count(); + conn->c_connid == opconnid ) && !(conn->c_flags & CONN_FLAG_CLOSING) ) + { + /* + * PR_Close must be called before anything else is done because + * of NSPR problem on NT which requires that the socket on which + * I/O timed out is closed before any other I/O operation is + * attempted by the thread. + * WARNING : As of today the current code does not fulfill the + * requirements above. + */ - /* - * Print the error captured above. - */ - if (error && (EPIPE != error) ) { - slapi_log_access( LDAP_DEBUG_STATS, - "conn=%" NSPRIu64 " op=%d fd=%d closed error %d (%s) - %s\n", - conn->c_connid, opid, conn->c_sd, error, - slapd_system_strerror(error), - slapd_pr_strerror(reason)); - } else { - slapi_log_access( LDAP_DEBUG_STATS, - "conn=%" NSPRIu64 " op=%d fd=%d closed - %s\n", - conn->c_connid, opid, conn->c_sd, - slapd_pr_strerror(reason)); - } + /* Mark that the socket should be closed on this connection. + * We don't want to actually close the socket here, because + * the listener thread could be PR_Polling over it right now. + * The last thread to stop using the connection will do the closing. + */ + conn->c_flags |= CONN_FLAG_CLOSING; + g_decrement_current_conn_count(); - if (! config_check_referral_mode()) { - slapi_counter_increment(g_get_global_snmp_vars()->ops_tbl.dsConnections); - } + /* + * Print the error captured above. + */ + if (error && (EPIPE != error) ) { + slapi_log_access( LDAP_DEBUG_STATS, + "conn=%" NSPRIu64 " op=%d fd=%d closed error %d (%s) - %s\n", + conn->c_connid, opid, conn->c_sd, error, + slapd_system_strerror(error), + slapd_pr_strerror(reason)); + } else { + slapi_log_access( LDAP_DEBUG_STATS, + "conn=%" NSPRIu64 " op=%d fd=%d closed - %s\n", + conn->c_connid, opid, conn->c_sd, + slapd_pr_strerror(reason)); + } - conn->c_gettingber = 0; - connection_abandon_operations( conn ); - /* needed here to ensure simple paged results timeout properly and - * don't impact subsequent ops */ - pagedresults_reset_timedout_nolock(conn); + if (! config_check_referral_mode()) { + slapi_counter_decrement(g_get_global_snmp_vars()->ops_tbl.dsConnections); + } - if (! config_check_referral_mode()) { - /* - * If any of the outstanding operations on this - * connection were persistent searches, then - * ding all the persistent searches to get them - * to notice that their operations have been abandoned. - */ - int found_ps = 0; - Operation *o; + conn->c_gettingber = 0; + connection_abandon_operations( conn ); + /* needed here to ensure simple paged results timeout properly and + * don't impact subsequent ops */ + pagedresults_reset_timedout_nolock(conn); - for ( o = conn->c_ops; !found_ps && o != NULL; o = o->o_next ) { - if ( o->o_flags & OP_FLAG_PS ) { - found_ps = 1; - } - } - if ( found_ps ) { - if ( NULL == ps_wakeup_all_fn ) { - if ( get_entry_point( ENTRY_POINT_PS_WAKEUP_ALL, - (caddr_t *)(&ps_wakeup_all_fn )) == 0 ) { - (ps_wakeup_all_fn)(); - } - } else { - (ps_wakeup_all_fn)(); + if (! config_check_referral_mode()) { + /* + * If any of the outstanding operations on this + * connection were persistent searches, then + * ding all the persistent searches to get them + * to notice that their operations have been abandoned. + */ + int found_ps = 0; + Operation *o; + + for ( o = conn->c_ops; !found_ps && o != NULL; o = o->o_next ) { + if ( o->o_flags & OP_FLAG_PS ) { + found_ps = 1; + } + } + if ( found_ps ) { + if ( NULL == ps_wakeup_all_fn ) { + if ( get_entry_point( ENTRY_POINT_PS_WAKEUP_ALL, + (caddr_t *)(&ps_wakeup_all_fn )) == 0 ) + { + (ps_wakeup_all_fn)(); + } + } else { + (ps_wakeup_all_fn)(); + } + } } - } - } } }
0
0f0725c9296228b8d3a6729572ab60c4b1721791
389ds/389-ds-base
Issue 49593 - NDN cache stats should be under the global stats Bug description: The Normalized DN cache stats are listed under backend stats, but the cache is global across all backends so they should just be listed under the global ldbm monitor. Fix description: Move NDN stats to global ldbm monitor. Change lib389 MonitorLDBM and MonitorBackend objects accordingly. Fix dbmon.sh tool so it shows the stats like this: Wed Mar 7 12:50:14 EST 2018 dbcachefree 52150272 free% 99.812 roevicts 0 hit% 100 pagein 0 pageout 0 dbname count free free% size hit_ratio global:ndn 69 20960096 99.946 165.565 73 userroot:ent 2 201318969 100.0 3811.5 62.0 userroot:dn 2 67108738 100.0 63.0 0.0 https://pagure.io/389-ds-base/issue/49593 Reviewed by: mreynolds, wibrown (Thanks!)
commit 0f0725c9296228b8d3a6729572ab60c4b1721791 Author: Simon Pichugin <[email protected]> Date: Wed Mar 7 18:29:22 2018 +0100 Issue 49593 - NDN cache stats should be under the global stats Bug description: The Normalized DN cache stats are listed under backend stats, but the cache is global across all backends so they should just be listed under the global ldbm monitor. Fix description: Move NDN stats to global ldbm monitor. Change lib389 MonitorLDBM and MonitorBackend objects accordingly. Fix dbmon.sh tool so it shows the stats like this: Wed Mar 7 12:50:14 EST 2018 dbcachefree 52150272 free% 99.812 roevicts 0 hit% 100 pagein 0 pageout 0 dbname count free free% size hit_ratio global:ndn 69 20960096 99.946 165.565 73 userroot:ent 2 201318969 100.0 3811.5 62.0 userroot:dn 2 67108738 100.0 63.0 0.0 https://pagure.io/389-ds-base/issue/49593 Reviewed by: mreynolds, wibrown (Thanks!) diff --git a/ldap/admin/src/scripts/dbmon.sh.in b/ldap/admin/src/scripts/dbmon.sh.in index 8dad687b2..55aaff51d 100644 --- a/ldap/admin/src/scripts/dbmon.sh.in +++ b/ldap/admin/src/scripts/dbmon.sh.in @@ -55,6 +55,10 @@ parseldif() { /^dbcachepageout/ { dbcachepageout=$2 } /^nsslapd-db-page-ro-evict-rate/ { dbroevict=$2 } /^nsslapd-db-pages-in-use/ { dbpages=$2 } + /^normalizeddncachehitratio/ { ndnratio=$2 } + /^currentnormalizeddncachesize/ { ndncursize=$2 ; havendnstats=1 } + /^maxnormalizeddncachesize/ { ndnmaxsize=$2 } + /^currentnormalizeddncachecount/ { ndncount=$2 } /^dn: cn=monitor, *cn=[a-zA-Z0-9][a-zA-Z0-9_\.\-]*, *cn=ldbm database, *cn=plugins, *cn=config/ { idxnum=-1 idxname="" @@ -72,10 +76,6 @@ parseldif() { /^maxdncachesize/ { stats[dbname,"dnmax"]=$2 } /^currentdncachecount/ { stats[dbname,"dncnt"]=$2 } /^dncachehitratio/ { stats[dbname,"dnratio"]=$2 } - /^normalizeddncachehitratio/ { stats[dbname,"ndnratio"]=$2 } - /^currentnormalizeddncachesize/ { stats[dbname,"ndncursize"]=$2 ; havendnstats=1 } - /^maxnormalizeddncachesize/ { stats[dbname,"ndnmaxsize"]=$2 } - /^currentnormalizeddncachecount/ { stats[dbname,"ndncount"]=$2 } /^dbfilename-/ { #rhds @@ -142,7 +142,9 @@ parseldif() { maxdbnamelen += 4 # :ent dbentext = ":ent" dbdnext = ":dn " - dbndnext = ":ndn" + if (havendnstats) { + dbndnext = ":ndn" + } } else { dbentext = "" dbdnext = "" @@ -156,6 +158,16 @@ parseldif() { fmtstr = sprintf("%%%d.%ds %%10.10s %%13.13s %%6.6s %%7.7s %%10.9s\n", maxdbnamelen, maxdbnamelen) printf fmtstr, "dbname", "count", "free", "free%", "size", "hit_ratio%" } + + fmtstr = sprintf("%%%d.%ds %%10d %%13d %%6.1f %%7.1f %%10.1f\n", maxdbnamelen, maxdbnamelen) + if (havendnstats) { + # normalized dn cache + ndnfree=ndnmaxsize-ndncursize + ndnfreep=ndnfree/ndnmaxsize*100 + ndnsize=(ndncount == 0) ? 0 : ndncursize/ndncount + printf fmtstr, "global" dbndnext, ndncount, ndnfree, ndnfreep, ndnsize, ndnratio + } + for (dbn in dbnames) { cur=stats[dbn,"entcur"] max=stats[dbn,"entmax"] @@ -164,7 +176,6 @@ parseldif() { free=max-cur freep=free/max*100 size=(cnt == 0) ? 0 : cur/cnt - fmtstr = sprintf("%%%d.%ds %%10d %%13d %%6.1f %%7.1f %%10.1f\n", maxdbnamelen, maxdbnamelen) printf fmtstr, dbnames[dbn] dbentext, cnt, free, freep, size, eratio if (havednstats) { dcur=stats[dbn,"dncur"] @@ -177,18 +188,6 @@ parseldif() { printf fmtstr, dbnames[dbn] dbdnext, dcnt, dfree, dfreep, dsize, dratio } - if (havendnstats) { - # normalized dn cache - nratio=stats[dbn,"ndnratio"] - ncursize=stats[dbn,"ndncursize"] - nmaxsize=stats[dbn,"ndnmaxsize"] - ncount=stats[dbn,"ndncount"] - nfree=nmaxsize-ncursize - nfreep=nfree/nmaxsize*100 - nsize=(ncount == 0) ? 0 : ncursize/ncount - printf fmtstr, dbnames[dbn] dbndnext, ncount, nfree, nfreep, nsize, nratio - } - if (indexlist) { len = idxmaxlen[dbn] fmtstr = sprintf("%%%d.%ds %%%d.%ds pagein %%8d pageout %%8d\n", maxdbnamelen, maxdbnamelen, len, len) diff --git a/ldap/servers/slapd/back-ldbm/monitor.c b/ldap/servers/slapd/back-ldbm/monitor.c index 5f57b9f51..f912dca2a 100644 --- a/ldap/servers/slapd/back-ldbm/monitor.c +++ b/ldap/servers/slapd/back-ldbm/monitor.c @@ -48,11 +48,9 @@ ldbm_back_monitor_instance_search(Slapi_PBlock *pb __attribute__((unused)), struct berval *vals[2]; char buf[BUFSIZ]; PRUint64 hits, tries; - long nentries, maxentries, count; + int64_t nentries; + int64_t maxentries; size_t size, maxsize; - size_t thread_size; - size_t evicts; - size_t slots; /* NPCTE fix for bugid 544365, esc 0. <P.R> <04-Jul-2001> */ struct stat astat; /* end of NPCTE fix for bugid 544365 */ @@ -101,9 +99,9 @@ ldbm_back_monitor_instance_search(Slapi_PBlock *pb __attribute__((unused)), MSET("currentEntryCacheSize"); sprintf(buf, "%lu", (long unsigned int)maxsize); MSET("maxEntryCacheSize"); - sprintf(buf, "%ld", nentries); + sprintf(buf, "%" PRId64, nentries); MSET("currentEntryCacheCount"); - sprintf(buf, "%ld", maxentries); + sprintf(buf, "%" PRId64, maxentries); MSET("maxEntryCacheCount"); if (entryrdn_get_switch()) { @@ -120,39 +118,11 @@ ldbm_back_monitor_instance_search(Slapi_PBlock *pb __attribute__((unused)), MSET("currentDnCacheSize"); sprintf(buf, "%lu", (long unsigned int)maxsize); MSET("maxDnCacheSize"); - sprintf(buf, "%ld", nentries); + sprintf(buf, "%" PRId64, nentries); MSET("currentDnCacheCount"); - sprintf(buf, "%ld", maxentries); + sprintf(buf, "%" PRId64, maxentries); MSET("maxDnCacheCount"); } - /* normalized dn cache stats */ - if (ndn_cache_started()) { - ndn_cache_get_stats(&hits, &tries, &size, &maxsize, &thread_size, &evicts, &slots, &count); - sprintf(buf, "%" PRIu64, tries); - MSET("normalizedDnCacheTries"); - sprintf(buf, "%" PRIu64, hits); - MSET("normalizedDnCacheHits"); - sprintf(buf, "%" PRIu64, (tries - hits)); - MSET("normalizedDnCacheMisses"); - sprintf(buf, "%lu", (unsigned long)(100.0 * (double)hits / (double)(tries > 0 ? tries : 1))); - MSET("normalizedDnCacheHitRatio"); - sprintf(buf, "%"PRIu64, evicts); - MSET("NormalizedDnCacheEvictions"); - sprintf(buf, "%lu", (long unsigned int)size); - MSET("currentNormalizedDnCacheSize"); - if (maxsize == 0) { - sprintf(buf, "%d", -1); - } else { - sprintf(buf, "%lu", (long unsigned int)maxsize); - } - MSET("maxNormalizedDnCacheSize"); - sprintf(buf, "%"PRIu64, thread_size); - MSET("NormalizedDnCacheThreadSize"); - sprintf(buf, "%"PRIu64, slots); - MSET("NormalizedDnCacheThreadSlots"); - sprintf(buf, "%ld", count); - MSET("currentNormalizedDnCacheCount"); - } #ifdef DEBUG { @@ -235,6 +205,14 @@ ldbm_back_monitor_search(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *entryAft DB_MPOOL_STAT *mpstat = NULL; DB_MPOOL_FSTAT **mpfstat = NULL; uintmax_t cache_tries; + int64_t count; + uint64_t hits; + uint64_t tries; + uint64_t size; + uint64_t maxsize; + uint64_t thread_size; + uint64_t evicts; + uint64_t slots; vals[0] = &val; vals[1] = NULL; @@ -271,6 +249,35 @@ ldbm_back_monitor_search(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *entryAft sprintf(buf, "%lu", (unsigned long)mpstat->st_rw_evict); MSET("dbCacheRWEvict"); + /* normalized dn cache stats */ + if (ndn_cache_started()) { + ndn_cache_get_stats(&hits, &tries, &size, &maxsize, &thread_size, &evicts, &slots, &count); + sprintf(buf, "%" PRIu64, tries); + MSET("normalizedDnCacheTries"); + sprintf(buf, "%" PRIu64, hits); + MSET("normalizedDnCacheHits"); + sprintf(buf, "%" PRIu64, (tries - hits)); + MSET("normalizedDnCacheMisses"); + sprintf(buf, "%" PRIu64, (uint64_t)(100.0 * (double)hits / (double)(tries > 0 ? tries : 1))); + MSET("normalizedDnCacheHitRatio"); + sprintf(buf, "%" PRIu64, evicts); + MSET("NormalizedDnCacheEvictions"); + sprintf(buf, "%" PRIu64, size); + MSET("currentNormalizedDnCacheSize"); + if (maxsize == 0) { + sprintf(buf, "%d", -1); + } else { + sprintf(buf, "%" PRIu64, maxsize); + } + MSET("maxNormalizedDnCacheSize"); + sprintf(buf, "%" PRIu64, thread_size); + MSET("NormalizedDnCacheThreadSize"); + sprintf(buf, "%" PRIu64, slots); + MSET("NormalizedDnCacheThreadSlots"); + sprintf(buf, "%" PRId64, count); + MSET("currentNormalizedDnCacheCount"); + } + slapi_ch_free((void **)&mpstat); if (mpfstat) diff --git a/src/lib389/lib389/monitor.py b/src/lib389/lib389/monitor.py index e36d9111e..1cf9b226c 100644 --- a/src/lib389/lib389/monitor.py +++ b/src/lib389/lib389/monitor.py @@ -10,6 +10,7 @@ import ldap from ldap import filter as ldap_filter from lib389._constants import * from lib389._mapped_object import DSLdapObjects, DSLdapObject +from lib389.utils import ds_is_older class Monitor(DSLdapObject): """An object that helps reading of cn=monitor for server statistics. @@ -96,6 +97,17 @@ class MonitorLDBM(DSLdapObject): 'dbcacheroevict', 'dbcacherwevict', ] + if not ds_is_older("1.4.0"): + self._backend_keys.extend([ + 'normalizeddncachetries', + 'normalizeddncachehits', + 'normalizeddncachemisses', + 'normalizeddncachehitratio', + 'currentnormalizeddncachesize', + 'maxnormalizeddncachesize', + 'currentnormalizeddncachecount' + ]) + def status(self): return self.get_attrs_vals(self._backend_keys) @@ -122,14 +134,17 @@ class MonitorBackend(DSLdapObject): 'maxdncachesize', 'currentdncachecount', 'maxdncachecount', - 'normalizeddncachetries', - 'normalizeddncachehits', - 'normalizeddncachemisses', - 'normalizeddncachehitratio', - 'currentnormalizeddncachesize', - 'maxnormalizeddncachesize', - 'currentnormalizeddncachecount', ] + if ds_is_older("1.4.0"): + self._backend_keys.extend([ + 'normalizeddncachetries', + 'normalizeddncachehits', + 'normalizeddncachemisses', + 'normalizeddncachehitratio', + 'currentnormalizeddncachesize', + 'maxnormalizeddncachesize', + 'currentnormalizeddncachecount' + ]) def status(self): return self.get_attrs_vals(self._backend_keys)
0
f4b90ed5e43fa06ea6185cf17073b7a32db6ef4c
389ds/389-ds-base
Bug 554573 - ACIs use bind DN from bind req rather than cert mapped DN from sasl/external https://bugzilla.redhat.com/show_bug.cgi?id=554573 Resolves: bug 554573 Bug Description: ACIs use bind DN from bind req rather than cert mapped DN from sasl/external Reviewed by: nhosoi (Thanks!) Branch: HEAD Fix Description: Added a new config option - nsslapd-force-sasl-external (on/off) default is off - when set to on, a SIMPLE bind on a connection that has set a DN from a cert will be changed to be a SASL/EXTERNAL bind. Platforms tested: RHEL5 x86_64 Flag Day: no Doc impact: yes - new attribute to document
commit f4b90ed5e43fa06ea6185cf17073b7a32db6ef4c Author: Rich Megginson <[email protected]> Date: Fri Mar 5 12:13:08 2010 -0700 Bug 554573 - ACIs use bind DN from bind req rather than cert mapped DN from sasl/external https://bugzilla.redhat.com/show_bug.cgi?id=554573 Resolves: bug 554573 Bug Description: ACIs use bind DN from bind req rather than cert mapped DN from sasl/external Reviewed by: nhosoi (Thanks!) Branch: HEAD Fix Description: Added a new config option - nsslapd-force-sasl-external (on/off) default is off - when set to on, a SIMPLE bind on a connection that has set a DN from a cert will be changed to be a SASL/EXTERNAL bind. Platforms tested: RHEL5 x86_64 Flag Day: no Doc impact: yes - new attribute to document diff --git a/.gitignore b/.gitignore index 4e6883132..c1ba6bd8b 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,3 @@ autom4te.cache *~ +*.patch diff --git a/ldap/servers/slapd/bind.c b/ldap/servers/slapd/bind.c index 3458ff668..d3e90091f 100644 --- a/ldap/servers/slapd/bind.c +++ b/ldap/servers/slapd/bind.c @@ -305,7 +305,8 @@ do_bind( Slapi_PBlock *pb ) switch ( version ) { case LDAP_VERSION2: if (method == LDAP_AUTH_SIMPLE - && (dn == NULL || *dn == '\0') && cred.bv_len == 0 + && (config_get_force_sasl_external() || + ((dn == NULL || *dn == '\0') && cred.bv_len == 0)) && pb->pb_conn->c_external_dn != NULL) { /* Treat this like a SASL EXTERNAL Bind: */ method = LDAP_AUTH_SASL; @@ -317,6 +318,17 @@ do_bind( Slapi_PBlock *pb ) } break; case LDAP_VERSION3: + if ((method == LDAP_AUTH_SIMPLE) && + config_get_force_sasl_external() && + (pb->pb_conn->c_external_dn != NULL)) { + /* Treat this like a SASL EXTERNAL Bind: */ + method = LDAP_AUTH_SASL; + saslmech = slapi_ch_strdup (LDAP_SASL_EXTERNAL); + /* This enables a client to establish an identity by sending + * a certificate in the SSL handshake, and also use LDAPv2 + * (by sending this type of Bind request). + */ + } break; default: LDAPDebug( LDAP_DEBUG_TRACE, "bind: unknown LDAP protocol version %d\n", diff --git a/ldap/servers/slapd/libglobs.c b/ldap/servers/slapd/libglobs.c index c4026ac6b..89a3c793e 100644 --- a/ldap/servers/slapd/libglobs.c +++ b/ldap/servers/slapd/libglobs.c @@ -620,7 +620,11 @@ static struct config_get_and_set { (ConfigGetFunc)config_get_anon_access_switch}, {CONFIG_MINSSF_ATTRIBUTE, config_set_minssf, NULL, 0, - (void**)&global_slapdFrontendConfig.minssf, CONFIG_INT, NULL} + (void**)&global_slapdFrontendConfig.minssf, CONFIG_INT, NULL}, + {CONFIG_FORCE_SASL_EXTERNAL_ATTRIBUTE, config_set_force_sasl_external, + NULL, 0, + (void**)&global_slapdFrontendConfig.force_sasl_external, CONFIG_ON_OFF, + (ConfigGetFunc)config_get_force_sasl_external} #ifdef MEMPOOL_EXPERIMENTAL ,{CONFIG_MEMPOOL_SWITCH_ATTRIBUTE, config_set_mempool_switch, NULL, 0, @@ -921,6 +925,7 @@ FrontendConfig_init () { cfg->rewrite_rfc1274 = LDAP_OFF; cfg->schemareplace = slapi_ch_strdup( CONFIG_SCHEMAREPLACE_STR_REPLICATION_ONLY ); cfg->schema_ignore_trailing_spaces = SLAPD_DEFAULT_SCHEMA_IGNORE_TRAILING_SPACES; + cfg->force_sasl_external = LDAP_OFF; /* do not force sasl external by default - let clients abide by the LDAP standards and send us a SASL/EXTERNAL bind if that's what they want to do */ cfg->pwpolicy_local = LDAP_OFF; cfg->pw_policy.pw_change = LDAP_ON; @@ -5491,6 +5496,34 @@ config_set_anon_access_switch( const char *attrname, char *value, return retVal; } +int +config_get_force_sasl_external(void) +{ + int retVal; + slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); + CFG_LOCK_READ(slapdFrontendConfig); + retVal = slapdFrontendConfig->force_sasl_external; + CFG_UNLOCK_READ(slapdFrontendConfig); + + return retVal; +} + +int +config_set_force_sasl_external( const char *attrname, char *value, + char *errorbuf, int apply ) +{ + int retVal = LDAP_SUCCESS; + slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); + + retVal = config_set_onoff(attrname, + value, + &(slapdFrontendConfig->force_sasl_external), + errorbuf, + apply); + + return retVal; +} + /* * This function is intended to be used from the dse code modify callback. It diff --git a/ldap/servers/slapd/proto-slap.h b/ldap/servers/slapd/proto-slap.h index 9133958cf..be3b9ddef 100644 --- a/ldap/servers/slapd/proto-slap.h +++ b/ldap/servers/slapd/proto-slap.h @@ -370,6 +370,7 @@ int config_set_anon_access_switch(const char *attrname, char *value, char *error int config_set_minssf(const char *attrname, char *value, char *errorbuf, int apply ); int config_set_accesslogbuffering(const char *attrname, char *value, char *errorbuf, int apply); int config_set_csnlogging(const char *attrname, char *value, char *errorbuf, int apply); +int config_set_force_sasl_external(const char *attrname, char *value, char *errorbuf, int apply ); #if !defined(_WIN32) && !defined(AIX) int config_set_maxdescriptors( const char *attrname, char *value, char *errorbuf, int apply ); @@ -507,6 +508,7 @@ int config_get_mempool_maxfreelist(); long config_get_system_page_size(); int config_get_system_page_bits(); #endif +int config_get_force_sasl_external(); int is_abspath(const char *); char* rel2abspath( char * ); diff --git a/ldap/servers/slapd/slap.h b/ldap/servers/slapd/slap.h index adef7a8af..589756f1e 100644 --- a/ldap/servers/slapd/slap.h +++ b/ldap/servers/slapd/slap.h @@ -1869,6 +1869,7 @@ typedef struct _slapdEntryPoints { #define CONFIG_SSL_CHECK_HOSTNAME_ATTRIBUTE "nsslapd-ssl-check-hostname" #define CONFIG_HASH_FILTERS_ATTRIBUTE "nsslapd-hash-filters" #define CONFIG_OUTBOUND_LDAP_IO_TIMEOUT_ATTRIBUTE "nsslapd-outbound-ldap-io-timeout" +#define CONFIG_FORCE_SASL_EXTERNAL_ATTRIBUTE "nsslapd-force-sasl-external" #ifdef MEMPOOL_EXPERIMENTAL #define CONFIG_MEMPOOL_SWITCH_ATTRIBUTE "nsslapd-mempool" @@ -2084,6 +2085,7 @@ typedef struct _slapdFrontendConfig { long system_page_size; /* system page size */ int system_page_bits; /* bit count to shift the system page size */ #endif /* MEMPOOL_EXPERIMENTAL */ + int force_sasl_external; /* force SIMPLE bind to be SASL/EXTERNAL if client cert credentials were supplied */ } slapdFrontendConfig_t; /* possible values for slapdFrontendConfig_t.schemareplace */
0
55857117cb324257e62a9683000ab8121cd54a7d
389ds/389-ds-base
Resolves: bug 249633 Description: setup-ds.pl should create default org entries Fix Description: Make the default in interactive mode to be to add those entries.
commit 55857117cb324257e62a9683000ab8121cd54a7d Author: Rich Megginson <[email protected]> Date: Wed Jul 25 22:05:26 2007 +0000 Resolves: bug 249633 Description: setup-ds.pl should create default org entries Fix Description: Make the default in interactive mode to be to add those entries. diff --git a/ldap/admin/src/scripts/DSDialogs.pm b/ldap/admin/src/scripts/DSDialogs.pm index 049e794d5..ee7bf0f3b 100644 --- a/ldap/admin/src/scripts/DSDialogs.pm +++ b/ldap/admin/src/scripts/DSDialogs.pm @@ -220,7 +220,8 @@ my $dspopulate = new Dialog ( my $self = shift; my $val = $self->{manager}->{inf}->{slapd}->{InstallLdifFile}; if (!defined($val)) { - $val = 'none'; + $val = 'suggest'; + $self->{manager}->{inf}->{slapd}->{AddOrgEntries} = 'Yes'; } return $val; },
0
83f3f443c596aaf17795d2f71aba00498159ef20
389ds/389-ds-base
bump version to 1.3.5.1
commit 83f3f443c596aaf17795d2f71aba00498159ef20 Author: Noriko Hosoi <[email protected]> Date: Wed Mar 23 14:13:11 2016 -0700 bump version to 1.3.5.1 diff --git a/VERSION.sh b/VERSION.sh index 94de01e9d..c13c57b38 100644 --- a/VERSION.sh +++ b/VERSION.sh @@ -10,7 +10,7 @@ vendor="389 Project" # PACKAGE_VERSION is constructed from these VERSION_MAJOR=1 VERSION_MINOR=3 -VERSION_MAINT=5 +VERSION_MAINT=5.1 # NOTE: VERSION_PREREL is automatically set for builds made out of a git tree VERSION_PREREL= VERSION_DATE=`date -u +%Y%m%d%H%M%S`
0
d787b37d106253e15d0c2c70f181e9ddb0698f43
389ds/389-ds-base
Bug 619122 - fix coverify Defect Type: Resource leaks issues CID 11975 - 12053 https://bugzilla.redhat.com/show_bug.cgi?id=619122 Resolves: bug 619122 Bug description: fix coverify Defect Type: Resource leaks issues CID 11978. description: The slapi_register_object_extension() has been modified to release fe when an error occurs.
commit d787b37d106253e15d0c2c70f181e9ddb0698f43 Author: Endi S. Dewata <[email protected]> Date: Wed Jul 28 12:35:18 2010 -0500 Bug 619122 - fix coverify Defect Type: Resource leaks issues CID 11975 - 12053 https://bugzilla.redhat.com/show_bug.cgi?id=619122 Resolves: bug 619122 Bug description: fix coverify Defect Type: Resource leaks issues CID 11978. description: The slapi_register_object_extension() has been modified to release fe when an error occurs. diff --git a/ldap/servers/slapd/factory.c b/ldap/servers/slapd/factory.c index 6de2d69fa..8a1dc2d18 100644 --- a/ldap/servers/slapd/factory.c +++ b/ldap/servers/slapd/factory.c @@ -450,6 +450,7 @@ slapi_register_object_extension( { LDAPDebug( LDAP_DEBUG_ANY, "ERROR: factory.c: Plugin %s failed to register extension for object %s.\n", pluginname, objectname, 0); rc= -1; + delete_factory_extension(&fe); } return rc; }
0
fdf78dca6c34b32522443c82ddd4c3c7ef04da80
389ds/389-ds-base
Issue 49095 - targetattr wildcard evaluation is incorrectly case sensitive Description: When processing an aci that uses a wildcard targetattr, the comparision should be done using case insensitive functions. https://pagure.io/389-ds-base/issue/49095 Reviewed by: firstyear(Thanks!)
commit fdf78dca6c34b32522443c82ddd4c3c7ef04da80 Author: Mark Reynolds <[email protected]> Date: Mon Mar 20 15:08:45 2017 -0400 Issue 49095 - targetattr wildcard evaluation is incorrectly case sensitive Description: When processing an aci that uses a wildcard targetattr, the comparision should be done using case insensitive functions. https://pagure.io/389-ds-base/issue/49095 Reviewed by: firstyear(Thanks!) diff --git a/dirsrvtests/tests/tickets/ticket49095_test.py b/dirsrvtests/tests/tickets/ticket49095_test.py new file mode 100644 index 000000000..04f92b2df --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket49095_test.py @@ -0,0 +1,85 @@ +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st as topo + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + +USER_DN = 'uid=testuser,dc=example,dc=com' +acis = ['(targetattr != "tele*") (version 3.0;acl "test case";allow (read,compare,search)(userdn = "ldap:///anyone");)', + '(targetattr != "TELE*") (version 3.0;acl "test case";allow (read,compare,search)(userdn = "ldap:///anyone");)', + '(targetattr != "telephonenum*") (version 3.0;acl "test case";allow (read,compare,search)(userdn = "ldap:///anyone");)', + '(targetattr != "TELEPHONENUM*") (version 3.0;acl "test case";allow (read,compare,search)(userdn = "ldap:///anyone");)'] + + +def test_ticket49095(topo): + """Check that target attrbiutes with wildcards are case insensitive + """ + + # Add an entry + try: + topo.standalone.add_s(Entry((USER_DN, { + 'objectclass': 'top extensibleObject'.split(), + 'uid': 'testuser', + 'telephonenumber': '555-555-5555' + }))) + except ldap.LDAPError as e: + log.fatal('Failed to add test user: ' + e.message['desc']) + assert False + + for aci in acis: + # Add ACI + try: + topo.standalone.modify_s(DEFAULT_SUFFIX, + [(ldap.MOD_REPLACE, 'aci', aci)]) + + except ldap.LDAPError as e: + log.fatal('Failed to set aci: ' + aci + ': ' + e.message['desc']) + assert False + + # Set Anonymous Bind to test aci + try: + topo.standalone.simple_bind_s("", "") + except ldap.LDAPError as e: + log.fatal('Failed to bind anonymously: ' + e.message['desc']) + assert False + + # Search for entry - should not get any results + try: + entry = topo.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_BASE, + 'telephonenumber=*') + if entry: + log.fatal('The entry was incorrectly returned') + assert False + except ldap.LDAPError as e: + log.fatal('Failed to search anonymously: ' + e.message['desc']) + assert False + + # Set root DN Bind so we can update aci's + try: + topo.standalone.simple_bind_s(DN_DM, PASSWORD) + except ldap.LDAPError as e: + log.fatal('Failed to bind anonymously: ' + e.message['desc']) + assert False + + log.info("Test Passed") + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) + diff --git a/ldap/servers/plugins/acl/acl.c b/ldap/servers/plugins/acl/acl.c index 0a9380852..48b8efcd0 100644 --- a/ldap/servers/plugins/acl/acl.c +++ b/ldap/servers/plugins/acl/acl.c @@ -3407,19 +3407,19 @@ acl_match_substring ( Slapi_Filter *f, char *str, int exact_match) } /* this assumes that str and the filter components are already - * normalized. If not, it shoul be done + * normalized. If not, it should be done */ if ( initial != NULL) { len = strlen(initial); if (exact_match) { - int rc = strncmp(p, initial, len); + int rc = strncasecmp(p, initial, len); if (rc) { return ACL_FALSE; } else { p += len; } } else { - p = strstr(p, initial); + p = strcasestr(p, initial); if (p) { p += len; } else { @@ -3430,7 +3430,7 @@ acl_match_substring ( Slapi_Filter *f, char *str, int exact_match) if ( any != NULL) { for (i = 0; any && any[i] != NULL; i++) { - p = strstr(p, any[i]); + p = strcasestr(p, any[i]); if (p) { p += strlen(any[i]); } else { @@ -3444,7 +3444,7 @@ acl_match_substring ( Slapi_Filter *f, char *str, int exact_match) len = strlen(final); tlen = strlen(p); if (len > tlen) return ACL_FALSE; - if (strcmp(p+tlen-len, final)) return ACL_FALSE; + if (strcasecmp(p+tlen-len, final)) return ACL_FALSE; } return ACL_TRUE;
0
56b9868c2fca5a56b11a4d0a9387980b6f338835
389ds/389-ds-base
logs created at startup can get wrong file mode https://bugzilla.redhat.com/show_bug.cgi?id=518279 Resolves: bug 518279 Bug Description: logs created at startup can get wrong file mode Reviewed by: nkinder (Thanks!) Fix Description: Try to apply the mode using chmod() if a log file has been specified. If and only if the log file has not been set, or if the chmod() succeeds, apply the changes to the internal config. Platforms tested: RHEL5 x86_64 Flag Day: no Doc impact: no
commit 56b9868c2fca5a56b11a4d0a9387980b6f338835 Author: Rich Megginson <[email protected]> Date: Tue Sep 22 17:04:19 2009 -0600 logs created at startup can get wrong file mode https://bugzilla.redhat.com/show_bug.cgi?id=518279 Resolves: bug 518279 Bug Description: logs created at startup can get wrong file mode Reviewed by: nkinder (Thanks!) Fix Description: Try to apply the mode using chmod() if a log file has been specified. If and only if the log file has not been set, or if the chmod() succeeds, apply the changes to the internal config. Platforms tested: RHEL5 x86_64 Flag Day: no Doc impact: no diff --git a/ldap/servers/slapd/log.c b/ldap/servers/slapd/log.c index 33c57f46e..6470c0663 100644 --- a/ldap/servers/slapd/log.c +++ b/ldap/servers/slapd/log.c @@ -661,6 +661,7 @@ int log_set_mode (const char *attrname, char *value, int logtype, char *errorbuf, int apply) { int v = 0; + int retval = LDAP_SUCCESS; slapdFrontendConfig_t *fe_cfg = getFrontendConfig(); if ( NULL == value ) { @@ -680,27 +681,54 @@ log_set_mode (const char *attrname, char *value, int logtype, char *errorbuf, in switch (logtype) { case SLAPD_ACCESS_LOG: LOG_ACCESS_LOCK_WRITE( ); - slapi_ch_free ( (void **) &fe_cfg->accesslog_mode ); - fe_cfg->accesslog_mode = slapi_ch_strdup (value); - loginfo.log_access_mode = v; + if (loginfo.log_access_file && + ( chmod( loginfo.log_access_file, v ) != 0) ) { + int oserr = errno; + PR_snprintf( errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, + "%s: Failed to chmod access log file to %s: errno %d (%s)", + attrname, value, oserr, slapd_system_strerror(oserr) ); + retval = LDAP_UNWILLING_TO_PERFORM; + } else { /* only apply the changes if no file or if successful */ + slapi_ch_free ( (void **) &fe_cfg->accesslog_mode ); + fe_cfg->accesslog_mode = slapi_ch_strdup (value); + loginfo.log_access_mode = v; + } LOG_ACCESS_UNLOCK_WRITE(); break; case SLAPD_ERROR_LOG: LOG_ERROR_LOCK_WRITE( ); - slapi_ch_free ( (void **) &fe_cfg->errorlog_mode ); - fe_cfg->errorlog_mode = slapi_ch_strdup (value); - loginfo.log_error_mode = v; + if (loginfo.log_error_file && + ( chmod( loginfo.log_error_file, v ) != 0) ) { + int oserr = errno; + PR_snprintf( errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, + "%s: Failed to chmod error log file to %s: errno %d (%s)", + attrname, value, oserr, slapd_system_strerror(oserr) ); + retval = LDAP_UNWILLING_TO_PERFORM; + } else { /* only apply the changes if no file or if successful */ + slapi_ch_free ( (void **) &fe_cfg->errorlog_mode ); + fe_cfg->errorlog_mode = slapi_ch_strdup (value); + loginfo.log_error_mode = v; + } LOG_ERROR_UNLOCK_WRITE(); break; case SLAPD_AUDIT_LOG: LOG_AUDIT_LOCK_WRITE( ); - slapi_ch_free ( (void **) &fe_cfg->auditlog_mode ); - fe_cfg->auditlog_mode = slapi_ch_strdup (value); - loginfo.log_audit_mode = v; + if (loginfo.log_audit_file && + ( chmod( loginfo.log_audit_file, v ) != 0) ) { + int oserr = errno; + PR_snprintf( errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, + "%s: Failed to chmod audit log file to %s: errno %d (%s)", + attrname, value, oserr, slapd_system_strerror(oserr) ); + retval = LDAP_UNWILLING_TO_PERFORM; + } else { /* only apply the changes if no file or if successful */ + slapi_ch_free ( (void **) &fe_cfg->auditlog_mode ); + fe_cfg->auditlog_mode = slapi_ch_strdup (value); + loginfo.log_audit_mode = v; + } LOG_AUDIT_UNLOCK_WRITE(); break; } - return LDAP_SUCCESS; + return retval; } /******************************************************************************
0
3c6758091e1d5bd2eaec52981410d7a01cb2bfa5
389ds/389-ds-base
Resolves: #243488 Summary: Use mozldap6 ldif_parse_line API Changes: 1) Removed ldif.h from the DS tree. 2) Eliminated the 5-th arg of ldif_parse_line (errmsg) and the errmsg related code.
commit 3c6758091e1d5bd2eaec52981410d7a01cb2bfa5 Author: Noriko Hosoi <[email protected]> Date: Mon Jun 11 17:22:38 2007 +0000 Resolves: #243488 Summary: Use mozldap6 ldif_parse_line API Changes: 1) Removed ldif.h from the DS tree. 2) Eliminated the 5-th arg of ldif_parse_line (errmsg) and the errmsg related code. diff --git a/ldap/admin/lib/dsalib_confs.c b/ldap/admin/lib/dsalib_confs.c index 0a2023cef..bce1594b8 100644 --- a/ldap/admin/lib/dsalib_confs.c +++ b/ldap/admin/lib/dsalib_confs.c @@ -78,22 +78,16 @@ ds_get_conf_from_file(FILE *conf) char *type, *value; int vlen = 0; int rc; - char *errmsg = NULL; if ( *line == '\n' || *line == '\0' ) { break; } /* this call modifies line */ - rc = ldif_parse_line(line, &type, &value, &vlen, &errmsg); + rc = ldif_parse_line(line, &type, &value, &vlen); if (rc != 0) { - if ( errmsg != NULL ) { - ds_send_error(errmsg, 0); - PR_smprintf_free(errmsg); - } else { ds_send_error("Unknown error processing config file", 0); - } free(begin); return NULL; } @@ -160,7 +154,7 @@ ds_get_value(char **ds_config, char *parm, int phase, int occurance) * Use ldif_parse_line() so continuation markers are * handled correctly, etc. */ - char *errmsg, *type = NULL, *value = NULL, *tmpvalue = NULL; + char *type = NULL, *value = NULL, *tmpvalue = NULL; int ldif_rc, tmpvlen = 0; char *tmpline = strdup(line); @@ -171,19 +165,15 @@ ds_get_value(char **ds_config, char *parm, int phase, int occurance) return(NULL); } - ldif_rc = ldif_parse_line( tmpline, &type, &tmpvalue, - &tmpvlen, &errmsg ); + ldif_rc = ldif_parse_line( tmpline, &type, &tmpvalue, &tmpvlen ); if (ldif_rc < 0) { - ds_send_error(errmsg, 0 /* do not print errno */); + ds_send_error("Unknown error processing config file", 0); } else if (ldif_rc == 0) { /* value returned in place */ value = strdup(tmpvalue); } else { /* malloc'd value */ value = tmpvalue; } free(tmpline); - if (errmsg) { - PR_smprintf_free(errmsg); - } return value; } } diff --git a/ldap/include/ldif.h b/ldap/include/ldif.h deleted file mode 100644 index c6de212cc..000000000 --- a/ldap/include/ldif.h +++ /dev/null @@ -1,113 +0,0 @@ -/** BEGIN COPYRIGHT BLOCK - * This Program is free software; you can redistribute it and/or modify it under - * the terms of the GNU General Public License as published by the Free Software - * Foundation; version 2 of the License. - * - * This Program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS - * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License along with - * this Program; if not, write to the Free Software Foundation, Inc., 59 Temple - * Place, Suite 330, Boston, MA 02111-1307 USA. - * - * In addition, as a special exception, Red Hat, Inc. gives You the additional - * right to link the code of this Program with code not covered under the GNU - * General Public License ("Non-GPL Code") and to distribute linked combinations - * including the two, subject to the limitations in this paragraph. Non-GPL Code - * permitted under this exception must only link to the code of this Program - * through those well defined interfaces identified in the file named EXCEPTION - * found in the source code files (the "Approved Interfaces"). The files of - * Non-GPL Code may instantiate templates or use macros or inline functions from - * the Approved Interfaces without causing the resulting work to be covered by - * the GNU General Public License. Only Red Hat, Inc. may make changes or - * additions to the list of Approved Interfaces. You must obey the GNU General - * Public License in all respects for all of the Program code and other code used - * in conjunction with the Program except the Non-GPL Code covered by this - * exception. If you modify this file, you may extend this exception to your - * version of the file, but you are not obligated to do so. If you do not wish to - * provide this exception without modification, you must delete this exception - * statement from your version and license this file solely under the GPL without - * exception. - * - * - * Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. - * Copyright (C) 2005 Red Hat, Inc. - * All rights reserved. - * END COPYRIGHT BLOCK **/ - -#ifdef HAVE_CONFIG_H -# include <config.h> -#endif - -/* - * Copyright (c) 1996 Regents of the University of Michigan. - * All rights reserved. - * - * Redistribution and use in source and binary forms are permitted - * provided that this notice is preserved and that due credit is given - * to the University of Michigan at Ann Arbor. The name of the University - * may not be used to endorse or promote products derived from this - * software without specific prior written permission. This software - * is provided ``as is'' without express or implied warranty. - */ - -#ifndef _LDIF_H -#define _LDIF_H - -#ifdef __cplusplus -extern "C" { -#endif - -#define LDIF_VERSION_ONE 1 /* LDIF standard version */ - -#define LDIF_MAX_LINE_WIDTH 76 /* maximum length of LDIF lines */ - -/* - * Macro to calculate maximum number of bytes that the base64 equivalent - * of an item that is "vlen" bytes long will take up. Base64 encoding - * uses one byte for every six bits in the value plus up to two pad bytes. - */ -#define LDIF_BASE64_LEN(vlen) (((vlen) * 4 / 3 ) + 3) - -/* - * Macro to calculate maximum size that an LDIF-encoded type (length - * tlen) and value (length vlen) will take up: room for type + ":: " + - * first newline + base64 value + continued lines. Each continued line - * needs room for a newline and a leading space character. - */ -#define LDIF_SIZE_NEEDED(tlen,vlen) \ - ((tlen) + 4 + LDIF_BASE64_LEN(vlen) \ - + ((LDIF_BASE64_LEN(vlen) + tlen + 3) / LDIF_MAX_LINE_WIDTH * 2 )) - -/* - * Options for ldif_put_type_and_value_with_options() and - * ldif_type_and_value_with_options(). - */ -#define LDIF_OPT_NOWRAP 0x01UL -#define LDIF_OPT_VALUE_IS_URL 0x02UL -#define LDIF_OPT_MINIMAL_ENCODING 0x04UL - -int ldif_parse_line( char *line, char **type, char **value, int *vlen, char **errcode); -char * ldif_getline( char **next ); -void ldif_put_type_and_value( char **out, char *t, char *val, int vlen ); -void ldif_put_type_and_value_nowrap( char **out, char *t, char *val, int vlen ); -void ldif_put_type_and_value_with_options( char **out, char *t, char *val, - int vlen, unsigned long options ); -char *ldif_type_and_value( char *type, char *val, int vlen ); -char *ldif_type_and_value_nowrap( char *type, char *val, int vlen ); -char *ldif_type_and_value_with_options( char *type, char *val, int vlen, - unsigned long options ); -int ldif_base64_decode( char *src, unsigned char *dst ); -int ldif_base64_encode( unsigned char *src, char *dst, int srclen, - int lenused ); -int ldif_base64_encode_nowrap( unsigned char *src, char *dst, int srclen, - int lenused ); -char *ldif_get_entry( FILE *fp, int *lineno ); - - -#ifdef __cplusplus -} -#endif - -#endif /* _LDIF_H */ diff --git a/ldap/servers/plugins/replication/cl5_api.c b/ldap/servers/plugins/replication/cl5_api.c index 4c45ec6a0..a2bc720b0 100644 --- a/ldap/servers/plugins/replication/cl5_api.c +++ b/ldap/servers/plugins/replication/cl5_api.c @@ -4988,21 +4988,15 @@ _cl5LDIF2Operation (char *ldifEntry, slapi_operation_parameters *op, char **repl next = ldifEntry; while ((line = ldif_getline(&next)) != NULL) { - char *errmsg = NULL; - if ( *line == '\n' || *line == '\0' ) { break; } /* this call modifies ldifEntry */ - rc = ldif_parse_line(line, &type, &value, &vlen, &errmsg); + rc = ldif_parse_line(line, &type, &value, &vlen); if (rc != 0) { - if ( errmsg != NULL ) { - slapi_log_error(SLAPI_LOG_PARSE, repl_plugin_name_cl, "%s", errmsg); - PR_smprintf_free(errmsg ); - } slapi_log_error(SLAPI_LOG_REPL, repl_plugin_name_cl, "_cl5LDIF2Operation: warning - failed to parse ldif line\n"); continue; diff --git a/ldap/servers/plugins/replication/replutil.c b/ldap/servers/plugins/replication/replutil.c index c86b14b97..c5e65ee76 100644 --- a/ldap/servers/plugins/replication/replutil.c +++ b/ldap/servers/plugins/replication/replutil.c @@ -428,8 +428,6 @@ parse_changes_string(char *str) slapi_mod_init (&mod, 0); while (line) { - char * errmsg = NULL; - if (strcasecmp (line, "-") == 0) { if (slapi_mod_isvalid (&mod)) @@ -446,14 +444,10 @@ parse_changes_string(char *str) break; } - rc = ldif_parse_line(line, &type, &value, &vlen, &errmsg); + rc = ldif_parse_line(line, &type, &value, &vlen); if (rc != 0) { /* ONREPL - log warning */ - if ( errmsg != NULL ) { - slapi_log_error( SLAPI_LOG_PARSE, repl_plugin_name, "%s", errmsg ); - PR_smprintf_free(errmsg ); - } slapi_log_error( SLAPI_LOG_REPL, repl_plugin_name, "Failed to parse the ldif line.\n"); continue; diff --git a/ldap/servers/slapd/back-ldbm/import-threads.c b/ldap/servers/slapd/back-ldbm/import-threads.c index 6d51a3b08..aab8b9a30 100644 --- a/ldap/servers/slapd/back-ldbm/import-threads.c +++ b/ldap/servers/slapd/back-ldbm/import-threads.c @@ -283,21 +283,16 @@ import_get_version(char *str) offset = s - str; mystr = ms = slapi_ch_strdup(str); while ( (s = ldif_getline( &ms )) != NULL ) { - char *errmsg = NULL; - if ( (retmalloc = ldif_parse_line( s, &type, &valuecharptr, &valuelen, &errmsg )) >= 0 ) { + if ( (retmalloc = ldif_parse_line( s, &type, &valuecharptr, &valuelen )) >= 0 ) { if (!strcasecmp(type, "version")) { my_version = atoi(valuecharptr); *(str + offset) = '#'; /* the memory below was not allocated by the slapi_ch_ functions */ - if (errmsg) PR_smprintf_free(errmsg); if (retmalloc) slapi_ch_free((void **) &valuecharptr); break; } - } else if ( errmsg != NULL ) { - LDAPDebug( LDAP_DEBUG_PARSE, "%s", errmsg, 0, 0 ); } /* the memory below was not allocated by the slapi_ch_ functions */ - if (errmsg) slapi_ch_free((void **) &errmsg); if (retmalloc) slapi_ch_free((void **) &valuecharptr); } diff --git a/ldap/servers/slapd/entry.c b/ldap/servers/slapd/entry.c index 0ae4b4fbc..31e2cb089 100644 --- a/ldap/servers/slapd/entry.c +++ b/ldap/servers/slapd/entry.c @@ -203,27 +203,17 @@ str2entry_fast( char *s, int flags, int read_stateinfo ) int maxvals; int del_maxvals; char *type; - char *errmsg = NULL; if ( *s == '\n' || *s == '\0' ) { break; } - if ( (retmalloc = ldif_parse_line( s, &type, &valuecharptr, &valuelen, &errmsg )) < 0 ) { - if ( errmsg != NULL ) { - LDAPDebug( LDAP_DEBUG_PARSE, "%s", errmsg, 0, 0 ); - /* the memory below was not allocated by the slapi_ch_ functions */ - PR_smprintf_free(errmsg ); - } + if ( (retmalloc = ldif_parse_line( s, &type, &valuecharptr, &valuelen )) < 0 ) { LDAPDebug( LDAP_DEBUG_TRACE, "<= str2entry_fast NULL (parse_line)\n", 0, 0, 0 ); continue; } - /* We don't use errmsg anywhere later. free it to avoid leaking... */ - /* the memory below was not allocated by the slapi_ch_ functions */ - slapi_ch_free( (void**)&errmsg ); - /* * Extract the attribute and value CSNs from the attribute type. */ @@ -581,7 +571,6 @@ str2entry_dupcheck( char *s, int flags, int read_stateinfo ) int i, j; char *next=NULL; char *valuecharptr=NULL; - char *errmsg = NULL; int retmalloc = 0; int rc; int fast_dup_check = 0; @@ -617,21 +606,12 @@ str2entry_dupcheck( char *s, int flags, int read_stateinfo ) break; } - if ( (retmalloc = ldif_parse_line( s, &type, &valuecharptr, &valuelen, &errmsg )) < 0 ) { - if ( errmsg != NULL ) { - LDAPDebug( LDAP_DEBUG_PARSE, "%s", errmsg, 0, 0 ); - /* the memory below was not allocated by the slapi_ch_ functions */ - PR_smprintf_free(errmsg ); - } + if ( (retmalloc = ldif_parse_line( s, &type, &valuecharptr, &valuelen )) < 0 ) { LDAPDebug( LDAP_DEBUG_TRACE, "<= slapi_str2entry NULL (parse_line)\n", 0, 0, 0 ); continue; } - /* We don't use errmsg anywhere later. free it to avoid leaking... */ - /* the memory below was not allocated by the slapi_ch_ functions */ - slapi_ch_free( (void**)&errmsg ); - /* * Extract the attribute and value CSNs from the attribute type. */ diff --git a/ldap/servers/slapd/fedse.c b/ldap/servers/slapd/fedse.c index 0b07a97c8..f71b7fdd4 100644 --- a/ldap/servers/slapd/fedse.c +++ b/ldap/servers/slapd/fedse.c @@ -1735,7 +1735,6 @@ search_easter_egg( Slapi_PBlock *pb, Slapi_Entry *entryBefore, Slapi_Entry *entr { static int twiddle= -1; char *type, *value, *copy; - char *errmsg = NULL; int vlen; struct berval bv; struct berval *bvals[2]; @@ -1745,12 +1744,7 @@ search_easter_egg( Slapi_PBlock *pb, Slapi_Entry *entryBefore, Slapi_Entry *entr bvals[0] = &bv; bvals[1] = NULL; copy= slapi_ch_strdup(easter_egg_photos[twiddle%NUM_EASTER_EGG_PHOTOS]); - if ( (retmalloc = ldif_parse_line(copy, &type, &value, &vlen, &errmsg)) < 0 ) { - if ( errmsg != NULL ) { - slapi_log_error( SLAPI_LOG_PARSE, "dse", "%s", errmsg ); - /* the memory below was not allocated by the slapi_ch_ functions */ - PR_smprintf_free(errmsg ); - } + if ( (retmalloc = ldif_parse_line(copy, &type, &value, &vlen)) < 0 ) { return SLAPI_DSE_CALLBACK_ERROR; } bv.bv_val = value; @@ -1760,7 +1754,6 @@ search_easter_egg( Slapi_PBlock *pb, Slapi_Entry *entryBefore, Slapi_Entry *entr slapi_ch_free((void**)&copy); twiddle++; /* the memory below was not allocated by the slapi_ch_ functions */ - slapi_ch_free( (void**)&errmsg ); if (retmalloc) slapi_ch_free( (void**)&value ); return SLAPI_DSE_CALLBACK_OK; }
0
5f09b899e705f9686c6bc689f16da1d8f884cc12
389ds/389-ds-base
added LICENSE file; renamed svrcore.spec to svrcore-devel.spec and made other changes to comply with fedora packaging guidelines - see https://bugzilla.redhat.com/bugzilla/show_bug.cgi?id=196393
commit 5f09b899e705f9686c6bc689f16da1d8f884cc12 Author: richm%stanfordalumni.org <richm%stanfordalumni.org> Date: Fri Jun 23 15:15:07 2006 +0000 added LICENSE file; renamed svrcore.spec to svrcore-devel.spec and made other changes to comply with fedora packaging guidelines - see https://bugzilla.redhat.com/bugzilla/show_bug.cgi?id=196393 diff --git a/LICENSE b/LICENSE new file mode 100644 index 000000000..2c8d3b9f9 --- /dev/null +++ b/LICENSE @@ -0,0 +1,34 @@ +***** BEGIN LICENSE BLOCK ***** +Version: MPL 1.1/GPL 2.0/LGPL 2.1 + +The contents of this file are subject to the Mozilla Public License Version +1.1 (the "License"); you may not use this file except in compliance with +the License. You may obtain a copy of the License at +http://www.mozilla.org/MPL/ + +Software distributed under the License is distributed on an "AS IS" basis, +WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License +for the specific language governing rights and limitations under the +License. + +The Original Code is the Netscape svrcore library. + +The Initial Developer of the Original Code is +Netscape Communications Corporation. +Portions created by the Initial Developer are Copyright (C) 1998 +the Initial Developer. All Rights Reserved. + +Contributor(s): Terry Hayes (Netscape/AOL) was the primary contributor + +Alternatively, the contents of this file may be used under the terms of +either the GNU General Public License Version 2 or later (the "GPL"), or +the GNU Lesser General Public License Version 2.1 or later (the "LGPL"), +in which case the provisions of the GPL or the LGPL are applicable instead +of those above. If you wish to allow use of your version of this file only +under the terms of either the GPL or the LGPL, and not to allow others to +use your version of this file under the terms of the MPL, indicate your +decision by deleting the provisions above and replace them with the notice +and other provisions required by the GPL or the LGPL. If you do not delete +the provisions above, a recipient may use your version of this file under +the terms of any one of the MPL, the GPL or the LGPL. +***** END LICENSE BLOCK ***** diff --git a/build_svrcore-devel_rpm b/build_svrcore-devel_rpm index b92a62a27..a7b51ab97 100644 --- a/build_svrcore-devel_rpm +++ b/build_svrcore-devel_rpm @@ -86,7 +86,7 @@ SVRCORE_TAG=SVRCORE_4_0_2_RTM SVRCORE_CORECONF_TAG=$SVRCORE_TAG PRJ=${PACKAGE}-${VERSION} RPM_HOME=`pwd` -SPEC_FILENAME=svrcore.spec +SPEC_FILENAME=svrcore-devel.spec # define subroutines for this script usage() { diff --git a/svrcore-devel.spec b/svrcore-devel.spec new file mode 100644 index 000000000..522a58abe --- /dev/null +++ b/svrcore-devel.spec @@ -0,0 +1,122 @@ +%define nspr_version 4.6 +%define nss_version 3.11 + +Summary: Svrcore - development files for secure PIN handling using NSS crypto +Name: svrcore-devel +Version: 4.0.2 +Release: 1%{?dist} +License: MPL/GPL/LGPL +URL: http://www.mozilla.org/projects/security/pki/ +Group: Development/Libraries +Requires: nspr-devel >= %{nspr_version} +Requires: nss-devel >= %{nss_version} +BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) +BuildRequires: nspr-devel >= %{nspr_version} +BuildRequires: nss-devel >= %{nss_version} +BuildRequires: gawk +BuildRequires: perl +BuildRequires: sed +BuildRequires: pkgconfig +Provides: svrcore-devel + +Source0: ftp://ftp.mozilla.org/pub/mozilla.org/directory/svrcore/releases/4.0.2/%{name}-%{version}.tar.gz + +%description +svrcore provides applications with several ways to handle secure PIN storage +e.g. in an application that must be restarted, but needs the PIN to unlock +the private key and other crypto material, without user intervention. svrcore +uses the facilities provided by NSS. + +%prep +%setup -q + +%build + +# Enable compiler optimizations and disable debugging code +BUILD_OPT=1 +export BUILD_OPT + +# Generate symbolic info for debuggers +XCFLAGS=$RPM_OPT_FLAGS +export XCFLAGS + +PKG_CONFIG_ALLOW_SYSTEM_LIBS=1 +PKG_CONFIG_ALLOW_SYSTEM_CFLAGS=1 + +export PKG_CONFIG_ALLOW_SYSTEM_LIBS +export PKG_CONFIG_ALLOW_SYSTEM_CFLAGS + +NSPR_INCLUDE_DIR=`/usr/bin/pkg-config --variable=includedir nspr` +export NSPR_INCLUDE_DIR + +NSS_INCLUDE_DIR=`/usr/bin/pkg-config --variable=includedir nss` +export NSS_INCLUDE_DIR + +%ifarch x86_64 ppc64 ia64 s390x +USE_64=1 +export USE_64 +%endif + +cd mozilla/security/svrcore +# This make assumes the build is still using the mozilla/security/coreconf stuff, +# which does all kinds of crazy stuff with copying files around, looking for +# dependencies, etc. +make EXPORTS="" RELEASE="" REQUIRES="" MODULE="" IMPORTS="" OBJDIR=. INSTALL=true + +# Set up our package file +%{__mkdir_p} $RPM_BUILD_ROOT/%{_libdir}/pkgconfig +%{__cat} svrcore.pc.in | sed -e "s,%%libdir%%,%{_libdir},g" \ + -e "s,%%prefix%%,%{_prefix},g" \ + -e "s,%%exec_prefix%%,%{_prefix},g" \ + -e "s,%%includedir%%,%{_includedir},g" \ + -e "s,%%NSPR_VERSION%%,%{nspr_version},g" \ + -e "s,%%NSS_VERSION%%,%{nss_version},g" \ + -e "s,%%SVRCORE_VERSION%%,%{version},g" > \ + $RPM_BUILD_ROOT/%{_libdir}/pkgconfig/%{name}.pc + +%install + +# There is no make install target so we'll do it ourselves. + +%{__mkdir_p} $RPM_BUILD_ROOT/%{_includedir} +%{__mkdir_p} $RPM_BUILD_ROOT/%{_libdir} + +cd mozilla/security/svrcore +# Copy the binary libraries we want +for file in libsvrcore.a +do + %{__install} -m 644 $file $RPM_BUILD_ROOT/%{_libdir} +done + +# Copy the include files +for file in svrcore.h +do + %{__install} -m 644 $file $RPM_BUILD_ROOT/%{_includedir} +done + + +%clean +%{__rm} -rf $RPM_BUILD_ROOT + +%files +%defattr(0644,root,root) +%{_libdir}/pkgconfig/%{name}.pc +%{_libdir}/libsvrcore.a +%{_includedir}/svrcore.h + +%changelog +* Thu Jun 22 2006 Rich Megginson <[email protected]> - 4.0.2-1 +- Bump rev to 4.0.2; now using HEAD of mozilla/security/coreconf +- which includes the coreconf-location.patch, so got rid of patch + +* Tue Apr 18 2006 Rich Megginson <[email protected]> - 4.0.1-3 +- Use pkg-config --variable=includedir to get include dirs + +* Wed Feb 1 2006 Rich <[email protected]> - 4.0.1-2 +- Requires nss version was wrong + +* Wed Jan 11 2006 Rich Megginson <[email protected]> - 4.01-1 +- Removed svrcore-config - use pkg-config instead + +* Mon Dec 19 2005 Rich Megginson <[email protected]> - 4.01-1 +- Initial revision diff --git a/svrcore.spec b/svrcore.spec deleted file mode 100644 index e69de29bb..000000000
0
e3b89866afc4ed31ae061078fd347c50062865a3
389ds/389-ds-base
Issue 6436 - MOD on a large group slow if substring index is present (#6437) Bug Description: If the substring index is configured for the group membership attribute ( member or uniqueMember ), the removal of a member from a large static group is pretty slow. Fix Description: A solution to this issue would be to introduce a new index to track a membership atttribute index. In the interm, we add a check to healthcheck to inform the user of the implications of this configuration. Fixes: https://github.com/389ds/389-ds-base/issues/6436 Reviewed by: @Firstyear, @tbordaz, @droideck (Thanks)
commit e3b89866afc4ed31ae061078fd347c50062865a3 Author: James Chapman <[email protected]> Date: Wed Jan 29 17:41:55 2025 +0000 Issue 6436 - MOD on a large group slow if substring index is present (#6437) Bug Description: If the substring index is configured for the group membership attribute ( member or uniqueMember ), the removal of a member from a large static group is pretty slow. Fix Description: A solution to this issue would be to introduce a new index to track a membership atttribute index. In the interm, we add a check to healthcheck to inform the user of the implications of this configuration. Fixes: https://github.com/389ds/389-ds-base/issues/6436 Reviewed by: @Firstyear, @tbordaz, @droideck (Thanks) diff --git a/dirsrvtests/tests/suites/healthcheck/health_config_test.py b/dirsrvtests/tests/suites/healthcheck/health_config_test.py index e1e5398ab..f09bc8bb8 100644 --- a/dirsrvtests/tests/suites/healthcheck/health_config_test.py +++ b/dirsrvtests/tests/suites/healthcheck/health_config_test.py @@ -167,6 +167,7 @@ def test_healthcheck_RI_plugin_missing_indexes(topology_st): MEMBER_DN = 'cn=member,cn=index,cn=userroot,cn=ldbm database,cn=plugins,cn=config' standalone = topology_st.standalone + standalone.config.set("nsslapd-accesslog-logbuffering", "on") log.info('Enable RI plugin') plugin = ReferentialIntegrityPlugin(standalone) @@ -188,7 +189,7 @@ def test_healthcheck_RI_plugin_missing_indexes(topology_st): def test_healthcheck_MO_plugin_missing_indexes(topology_st): - """Check if HealthCheck returns DSMOLE0002 code + """Check if HealthCheck returns DSMOLE0001 code :id: 236b0ec2-13da-48fb-b65a-db7406d56d5d :setup: Standalone instance @@ -203,8 +204,8 @@ def test_healthcheck_MO_plugin_missing_indexes(topology_st): :expectedresults: 1. Success 2. Success - 3. Healthcheck reports DSMOLE0002 code and related details - 4. Healthcheck reports DSMOLE0002 code and related details + 3. Healthcheck reports DSMOLE0001 code and related details + 4. Healthcheck reports DSMOLE0001 code and related details 5. Success 6. Healthcheck reports no issue found 7. Healthcheck reports no issue found @@ -214,6 +215,7 @@ def test_healthcheck_MO_plugin_missing_indexes(topology_st): MO_GROUP_ATTR = 'creatorsname' standalone = topology_st.standalone + standalone.config.set("nsslapd-accesslog-logbuffering", "on") log.info('Enable MO plugin') plugin = MemberOfPlugin(standalone) @@ -236,6 +238,87 @@ def test_healthcheck_MO_plugin_missing_indexes(topology_st): standalone.restart() +def test_healthcheck_MO_plugin_substring_index(topology_st): + """Check if HealthCheck returns DSMOLE0002 code when the + member, uniquemember attribute contains a substring index type + + :id: 10954811-24ac-4886-8183-e30892f8e02d + :setup: Standalone instance + :steps: + 1. Create DS instance + 2. Configure the instance with MO Plugin + 3. Change index type to substring for member attribute + 4. Use HealthCheck without --json option + 5. Use HealthCheck with --json option + 6. Change index type back to equality for member attribute + 7. Use HealthCheck without --json option + 8. Use HealthCheck with --json option + 9. Change index type to substring for uniquemember attribute + 10. Use HealthCheck without --json option + 11. Use HealthCheck with --json option + 12. Change index type back to equality for uniquemember attribute + 13. Use HealthCheck without --json option + 14. Use HealthCheck with --json option + + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Healthcheck reports DSMOLE0002 code and related details + 5. Healthcheck reports DSMOLE0002 code and related details + 6. Success + 7. Healthcheck reports no issue found + 8. Healthcheck reports no issue found + 9. Success + 10. Healthcheck reports DSMOLE0002 code and related details + 11. Healthcheck reports DSMOLE0002 code and related details + 12. Success + 13. Healthcheck reports no issue found + 14. Healthcheck reports no issue found + """ + + RET_CODE = 'DSMOLE0002' + MEMBER_DN = 'cn=member,cn=index,cn=userroot,cn=ldbm database,cn=plugins,cn=config' + UNIQUE_MEMBER_DN = 'cn=uniquemember,cn=index,cn=userroot,cn=ldbm database,cn=plugins,cn=config' + + standalone = topology_st.standalone + standalone.config.set("nsslapd-accesslog-logbuffering", "on") + + log.info('Enable MO plugin') + plugin = MemberOfPlugin(standalone) + plugin.disable() + plugin.enable() + + log.info('Change the index type of the member attribute index to substring') + index = Index(topology_st.standalone, MEMBER_DN) + index.replace('nsIndexType', 'sub') + + run_healthcheck_and_flush_log(topology_st, standalone, json=False, searched_code=RET_CODE) + run_healthcheck_and_flush_log(topology_st, standalone, json=True, searched_code=RET_CODE) + + log.info('Set the index type of the member attribute index back to eq') + index.replace('nsIndexType', 'eq') + + run_healthcheck_and_flush_log(topology_st, standalone, json=False, searched_code=CMD_OUTPUT) + run_healthcheck_and_flush_log(topology_st, standalone, json=True, searched_code=JSON_OUTPUT) + + log.info('Change the index type of the uniquemember attribute index to substring') + index = Index(topology_st.standalone, UNIQUE_MEMBER_DN) + index.replace('nsIndexType', 'sub') + + run_healthcheck_and_flush_log(topology_st, standalone, json=False, searched_code=RET_CODE) + run_healthcheck_and_flush_log(topology_st, standalone, json=True, searched_code=RET_CODE) + + log.info('Set the index type of the uniquemember attribute index back to eq') + index.replace('nsIndexType', 'eq') + + run_healthcheck_and_flush_log(topology_st, standalone, json=False, searched_code=CMD_OUTPUT) + run_healthcheck_and_flush_log(topology_st, standalone, json=True, searched_code=JSON_OUTPUT) + + # Restart the instance after changing the plugin to avoid breaking the other tests + standalone.restart() + + @pytest.mark.xfail(ds_is_older("1.4.1"), reason="Not implemented") def test_healthcheck_virtual_attr_incorrectly_indexed(topology_st): """Check if HealthCheck returns DSVIRTLE0001 code diff --git a/src/lib389/lib389/lint.py b/src/lib389/lib389/lint.py index d0747f0f4..460bf64fc 100644 --- a/src/lib389/lib389/lint.py +++ b/src/lib389/lib389/lint.py @@ -270,6 +270,21 @@ database after adding the missing index type. Here is an example using dsconf: """ } +DSMOLE0002 = { + 'dsle': 'DSMOLE0002', + 'severity': 'LOW', + 'description': 'Removal of a member can be slow ', + 'items': ['cn=memberof plugin,cn=plugins,cn=config', ], + 'detail': """If the substring index is configured for a membership attribute. The removal of a member +from the large group can be slow. + +""", + 'fix': """If not required, you can remove the substring index type using dsconf: + + # dsconf slapd-YOUR_INSTANCE backend index set --attr=ATTR BACKEND --del-type=sub +""" +} + # Disk Space check. Note - PARTITION is replaced by the calling function DSDSLE0001 = { 'dsle': 'DSDSLE0001', diff --git a/src/lib389/lib389/plugins.py b/src/lib389/lib389/plugins.py index 67af93a14..31bbfa502 100644 --- a/src/lib389/lib389/plugins.py +++ b/src/lib389/lib389/plugins.py @@ -12,7 +12,7 @@ import copy import os.path from lib389 import tasks from lib389._mapped_object import DSLdapObjects, DSLdapObject -from lib389.lint import DSRILE0001, DSRILE0002, DSMOLE0001 +from lib389.lint import DSRILE0001, DSRILE0002, DSMOLE0001, DSMOLE0002 from lib389.utils import ensure_str, ensure_list_bytes from lib389.schema import Schema from lib389._constants import ( @@ -827,6 +827,41 @@ class MemberOfPlugin(Plugin): report['check'] = f'memberof:attr_indexes' yield report + def _lint_member_substring_index(self): + if self.status(): + from lib389.backend import Backends + backends = Backends(self._instance).list() + membership_attrs = ['member', 'uniquemember'] + container = self.get_attr_val_utf8_l("nsslapd-plugincontainerscope") + for backend in backends: + suffix = backend.get_attr_val_utf8_l('nsslapd-suffix') + if suffix == "cn=changelog": + # Always skip retro changelog + continue + if container is not None: + # Check if this backend is in the scope + if not container.endswith(suffix): + # skip this backend that is not in the scope + continue + indexes = backend.get_indexes() + for attr in membership_attrs: + report = copy.deepcopy(DSMOLE0002) + try: + index = indexes.get(attr) + types = index.get_attr_vals_utf8_l("nsIndexType") + if "sub" in types: + report['detail'] = report['detail'].replace('ATTR', attr) + report['detail'] = report['detail'].replace('BACKEND', suffix) + report['fix'] = report['fix'].replace('ATTR', attr) + report['fix'] = report['fix'].replace('BACKEND', suffix) + report['fix'] = report['fix'].replace('YOUR_INSTANCE', self._instance.serverid) + report['items'].append(suffix) + report['items'].append(attr) + report['check'] = f'attr:substring_index' + yield report + except KeyError: + continue + def get_attr(self): """Get memberofattr attribute"""
0
14943d562a4084e4c0f968b414f163af4e58ba10
389ds/389-ds-base
Issue 5547 - automember plugin improvements Description: Rebuild task has the following improvements: - Only one task allowed at a time - Do not cleanup previous members by default. Add new CLI option to intentionally cleanup memberships before rebuilding from scratch. - Add better task logging to show fixup progress To prevent automember from being called in a nested be_txn loop thread storage is used to check and skip these loops. relates: https://github.com/389ds/389-ds-base/issues/5547 Reviewed by: spichugi(Thanks!)
commit 14943d562a4084e4c0f968b414f163af4e58ba10 Author: Mark Reynolds <[email protected]> Date: Sun Nov 27 09:37:19 2022 -0500 Issue 5547 - automember plugin improvements Description: Rebuild task has the following improvements: - Only one task allowed at a time - Do not cleanup previous members by default. Add new CLI option to intentionally cleanup memberships before rebuilding from scratch. - Add better task logging to show fixup progress To prevent automember from being called in a nested be_txn loop thread storage is used to check and skip these loops. relates: https://github.com/389ds/389-ds-base/issues/5547 Reviewed by: spichugi(Thanks!) diff --git a/dirsrvtests/tests/suites/automember_plugin/automember_mod_test.py b/dirsrvtests/tests/suites/automember_plugin/automember_mod_test.py index 8d25384bf..7a0ed3275 100644 --- a/dirsrvtests/tests/suites/automember_plugin/automember_mod_test.py +++ b/dirsrvtests/tests/suites/automember_plugin/automember_mod_test.py @@ -5,12 +5,13 @@ # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- -# +import ldap import logging import pytest import os +import time from lib389.utils import ds_is_older -from lib389._constants import * +from lib389._constants import DEFAULT_SUFFIX from lib389.plugins import AutoMembershipPlugin, AutoMembershipDefinitions from lib389.idm.user import UserAccounts from lib389.idm.group import Groups @@ -41,6 +42,11 @@ def automember_fixture(topo, request): user_accts = UserAccounts(topo.standalone, DEFAULT_SUFFIX) user = user_accts.create_test_user() + # Create extra users + users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + for i in range(0, 100): + users.create_test_user(uid=i) + # Create automember definitions and regex rules automember_prop = { 'cn': 'testgroup_definition', @@ -59,7 +65,7 @@ def automember_fixture(topo, request): automemberplugin.enable() topo.standalone.restart() - return (user, groups) + return user, groups def test_mods(automember_fixture, topo): @@ -72,19 +78,21 @@ def test_mods(automember_fixture, topo): 2. Update user that should add it to group[1] 3. Update user that should add it to group[2] 4. Update user that should add it to group[0] - 5. Test rebuild task correctly moves user to group[1] + 5. Test rebuild task adds user to group[1] + 6. Test rebuild task cleanups groups and only adds it to group[1] :expectedresults: 1. Success 2. Success 3. Success 4. Success 5. Success + 6. Success """ (user, groups) = automember_fixture # Update user which should go into group[0] user.replace('cn', 'whatever') - groups[0].is_member(user.dn) + assert groups[0].is_member(user.dn) if groups[1].is_member(user.dn): assert False if groups[2].is_member(user.dn): @@ -92,7 +100,7 @@ def test_mods(automember_fixture, topo): # Update user0 which should go into group[1] user.replace('cn', 'mark') - groups[1].is_member(user.dn) + assert groups[1].is_member(user.dn) if groups[0].is_member(user.dn): assert False if groups[2].is_member(user.dn): @@ -100,7 +108,7 @@ def test_mods(automember_fixture, topo): # Update user which should go into group[2] user.replace('cn', 'simon') - groups[2].is_member(user.dn) + assert groups[2].is_member(user.dn) if groups[0].is_member(user.dn): assert False if groups[1].is_member(user.dn): @@ -108,7 +116,7 @@ def test_mods(automember_fixture, topo): # Update user which should go back into group[0] (full circle) user.replace('cn', 'whatever') - groups[0].is_member(user.dn) + assert groups[0].is_member(user.dn) if groups[1].is_member(user.dn): assert False if groups[2].is_member(user.dn): @@ -128,12 +136,24 @@ def test_mods(automember_fixture, topo): automemberplugin.enable() topo.standalone.restart() - # Run rebuild task + # Run rebuild task (no cleanup) task = automemberplugin.fixup(DEFAULT_SUFFIX, "objectclass=posixaccount") + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + # test only one fixup task is allowed at a time + automemberplugin.fixup(DEFAULT_SUFFIX, "objectclass=top") task.wait() - # Test membership - groups[1].is_member(user.dn) + # Test membership (user should still be in groups[0]) + assert groups[1].is_member(user.dn) + if not groups[0].is_member(user.dn): + assert False + + # Run rebuild task with cleanup + task = automemberplugin.fixup(DEFAULT_SUFFIX, "objectclass=posixaccount", cleanup=True) + task.wait() + + # Test membership (user should only be in groups[1]) + assert groups[1].is_member(user.dn) if groups[0].is_member(user.dn): assert False if groups[2].is_member(user.dn): @@ -148,4 +168,3 @@ if __name__ == '__main__': # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) pytest.main(["-s", CURRENT_FILE]) - diff --git a/ldap/servers/plugins/automember/automember.c b/ldap/servers/plugins/automember/automember.c index d0770365b..38f817e5d 100644 --- a/ldap/servers/plugins/automember/automember.c +++ b/ldap/servers/plugins/automember/automember.c @@ -1,5 +1,5 @@ /** BEGIN COPYRIGHT BLOCK - * Copyright (C) 2011 Red Hat, Inc. + * Copyright (C) 2022 Red Hat, Inc. * All rights reserved. * * License: GPL (version 3 or any later version). @@ -14,7 +14,7 @@ * Auto Membership Plug-in */ #include "automember.h" - +#include <pthread.h> /* * Plug-in globals @@ -22,7 +22,9 @@ static PRCList *g_automember_config = NULL; static Slapi_RWLock *g_automember_config_lock = NULL; static uint64_t abort_rebuild_task = 0; - +static pthread_key_t td_automem_block_nested; +static PRBool fixup_running = PR_FALSE; +static PRLock *fixup_lock = NULL; static void *_PluginID = NULL; static Slapi_DN *_PluginDN = NULL; static Slapi_DN *_ConfigAreaDN = NULL; @@ -93,9 +95,43 @@ static void automember_task_export_destructor(Slapi_Task *task); static void automember_task_map_destructor(Slapi_Task *task); #define DEFAULT_FILE_MODE PR_IRUSR | PR_IWUSR +#define FIXUP_PROGRESS_LIMIT 1000 static uint64_t plugin_do_modify = 0; static uint64_t plugin_is_betxn = 0; +/* automember_plugin fixup task and add operations should block other be_txn + * plugins from calling automember_post_op_mod() */ +static int32_t +slapi_td_block_nested_post_op(void) +{ + int32_t val = 12345; + + if (pthread_setspecific(td_automem_block_nested, (void *)&val) != 0) { + return PR_FAILURE; + } + return PR_SUCCESS; +} + +static int32_t +slapi_td_unblock_nested_post_op(void) +{ + if (pthread_setspecific(td_automem_block_nested, NULL) != 0) { + return PR_FAILURE; + } + return PR_SUCCESS; +} + +static int32_t +slapi_td_is_post_op_nested(void) +{ + int32_t *value = pthread_getspecific(td_automem_block_nested); + + if (value == NULL) { + return 0; + } + return 1; +} + /* * Config cache locking functions */ @@ -317,6 +353,14 @@ automember_start(Slapi_PBlock *pb) return -1; } + if (fixup_lock == NULL) { + if ((fixup_lock = PR_NewLock()) == NULL) { + slapi_log_err(SLAPI_LOG_ERR, AUTOMEMBER_PLUGIN_SUBSYSTEM, + "automember_start - Failed to create fixup lock.\n"); + return -1; + } + } + /* * Get the plug-in target dn from the system * and store it for future use. */ @@ -360,6 +404,11 @@ automember_start(Slapi_PBlock *pb) } } + if (pthread_key_create(&td_automem_block_nested, NULL) != 0) { + slapi_log_err(SLAPI_LOG_ERR, AUTOMEMBER_PLUGIN_SUBSYSTEM, + "automember_start - pthread_key_create failed\n"); + } + slapi_log_err(SLAPI_LOG_PLUGIN, AUTOMEMBER_PLUGIN_SUBSYSTEM, "automember_start - ready for service\n"); slapi_log_err(SLAPI_LOG_TRACE, AUTOMEMBER_PLUGIN_SUBSYSTEM, @@ -394,6 +443,8 @@ automember_close(Slapi_PBlock *pb __attribute__((unused))) slapi_sdn_free(&_ConfigAreaDN); slapi_destroy_rwlock(g_automember_config_lock); g_automember_config_lock = NULL; + PR_DestroyLock(fixup_lock); + fixup_lock = NULL; slapi_log_err(SLAPI_LOG_TRACE, AUTOMEMBER_PLUGIN_SUBSYSTEM, "<-- automember_close\n"); @@ -1620,7 +1671,6 @@ out: return rc; } - /* * automember_update_member_value() * @@ -1635,7 +1685,7 @@ automember_update_member_value(Slapi_Entry *member_e, const char *group_dn, char LDAPMod *mods[2]; char *vals[2]; char *member_value = NULL; - int rc = 0; + int rc = LDAP_SUCCESS; Slapi_DN *group_sdn; /* First thing check that the group still exists */ @@ -1654,7 +1704,7 @@ automember_update_member_value(Slapi_Entry *member_e, const char *group_dn, char "automember_update_member_value - group (default or target) can not be retrieved (%s) err=%d\n", group_dn, rc); } - return rc; + goto out; } /* If grouping_value is dn, we need to fetch the dn instead. */ @@ -1880,6 +1930,13 @@ automember_mod_post_op(Slapi_PBlock *pb) PRCList *list = NULL; int rc = SLAPI_PLUGIN_SUCCESS; + if (slapi_td_is_post_op_nested()) { + /* don't process op twice in the same thread */ + return rc; + } else { + slapi_td_block_nested_post_op(); + } + slapi_log_err(SLAPI_LOG_TRACE, AUTOMEMBER_PLUGIN_SUBSYSTEM, "--> automember_mod_post_op\n"); @@ -2006,6 +2063,7 @@ automember_mod_post_op(Slapi_PBlock *pb) } } } + slapi_td_unblock_nested_post_op(); slapi_log_err(SLAPI_LOG_TRACE, AUTOMEMBER_PLUGIN_SUBSYSTEM, "<-- automember_mod_post_op (%d)\n", rc); @@ -2025,6 +2083,13 @@ automember_add_post_op(Slapi_PBlock *pb) slapi_log_err(SLAPI_LOG_TRACE, AUTOMEMBER_PLUGIN_SUBSYSTEM, "--> automember_add_post_op\n"); + if (slapi_td_is_post_op_nested()) { + /* don't process op twice in the same thread */ + return rc; + } else { + slapi_td_block_nested_post_op(); + } + /* Reload config if a config entry was added. */ if ((sdn = automember_get_sdn(pb))) { if (automember_dn_is_config(sdn)) { @@ -2040,7 +2105,7 @@ automember_add_post_op(Slapi_PBlock *pb) /* If replication, just bail. */ if (automember_isrepl(pb)) { - return SLAPI_PLUGIN_SUCCESS; + goto bail; } /* Get the newly added entry. */ @@ -2053,7 +2118,7 @@ automember_add_post_op(Slapi_PBlock *pb) tombstone); slapi_value_free(&tombstone); if (is_tombstone) { - return SLAPI_PLUGIN_SUCCESS; + goto bail; } /* Check if a config entry applies @@ -2064,21 +2129,19 @@ automember_add_post_op(Slapi_PBlock *pb) list = PR_LIST_HEAD(g_automember_config); while (list != g_automember_config) { config = (struct configEntry *)list; - /* Does the entry meet scope and filter requirements? */ if (slapi_dn_issuffix(slapi_sdn_get_dn(sdn), config->scope) && - (slapi_filter_test_simple(e, config->filter) == 0)) { + (slapi_filter_test_simple(e, config->filter) == 0)) + { /* Find out what membership changes are needed and make them. */ if (automember_update_membership(config, e, NULL) == SLAPI_PLUGIN_FAILURE) { rc = SLAPI_PLUGIN_FAILURE; break; } } - list = PR_NEXT_LINK(list); } } - automember_config_unlock(); } else { slapi_log_err(SLAPI_LOG_PLUGIN, AUTOMEMBER_PLUGIN_SUBSYSTEM, @@ -2099,6 +2162,7 @@ bail: slapi_pblock_set(pb, SLAPI_RESULT_CODE, &result); slapi_pblock_set(pb, SLAPI_PB_RESULT_TEXT, &errtxt); } + slapi_td_unblock_nested_post_op(); return rc; } @@ -2139,6 +2203,7 @@ typedef struct _task_data Slapi_DN *base_dn; char *bind_dn; int scope; + PRBool cleanup; } task_data; static void @@ -2271,6 +2336,7 @@ automember_task_abort_thread(void *arg) * basedn: dc=example,dc=com * filter: (uid=*) * scope: sub + * cleanup: yes/on (default is off) * * basedn and filter are required. If scope is omitted, the default is sub */ @@ -2285,9 +2351,22 @@ automember_task_add(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eAfter __attr const char *base_dn; const char *filter; const char *scope; + const char *cleanup_str; + PRBool cleanup = PR_FALSE; *returncode = LDAP_SUCCESS; + PR_Lock(fixup_lock); + if (fixup_running) { + PR_Unlock(fixup_lock); + *returncode = LDAP_UNWILLING_TO_PERFORM; + slapi_log_err(SLAPI_LOG_ERR, AUTOMEMBER_PLUGIN_SUBSYSTEM, + "automember_task_add - there is already a fixup task running\n"); + rv = SLAPI_DSE_CALLBACK_ERROR; + goto out; + } + PR_Unlock(fixup_lock); + /* * Grab the task params */ @@ -2301,6 +2380,12 @@ automember_task_add(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eAfter __attr rv = SLAPI_DSE_CALLBACK_ERROR; goto out; } + if ((cleanup_str = slapi_entry_attr_get_ref(e, "cleanup"))) { + if (strcasecmp(cleanup_str, "yes") == 0 || strcasecmp(cleanup_str, "on")) { + cleanup = PR_TRUE; + } + } + scope = slapi_fetch_attr(e, "scope", "sub"); /* * setup our task data @@ -2316,6 +2401,7 @@ automember_task_add(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eAfter __attr mytaskdata->bind_dn = slapi_ch_strdup(bind_dn); mytaskdata->base_dn = slapi_sdn_new_dn_byval(base_dn); mytaskdata->filter_str = slapi_ch_strdup(filter); + mytaskdata->cleanup = cleanup; if (scope) { if (strcasecmp(scope, "sub") == 0) { @@ -2335,6 +2421,9 @@ automember_task_add(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eAfter __attr task = slapi_plugin_new_task(slapi_entry_get_ndn(e), arg); slapi_task_set_destructor_fn(task, automember_task_destructor); slapi_task_set_data(task, mytaskdata); + PR_Lock(fixup_lock); + fixup_running = PR_TRUE; + PR_Unlock(fixup_lock); /* * Start the task as a separate thread */ @@ -2346,6 +2435,9 @@ automember_task_add(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eAfter __attr "automember_task_add - Unable to create task thread!\n"); *returncode = LDAP_OPERATIONS_ERROR; slapi_task_finish(task, *returncode); + PR_Lock(fixup_lock); + fixup_running = PR_FALSE; + PR_Unlock(fixup_lock); rv = SLAPI_DSE_CALLBACK_ERROR; } else { rv = SLAPI_DSE_CALLBACK_OK; @@ -2373,6 +2465,9 @@ automember_rebuild_task_thread(void *arg) PRCList *list = NULL; PRCList *include_list = NULL; int result = 0; + int64_t fixup_progress_count = 0; + int64_t fixup_progress_elapsed = 0; + int64_t fixup_start_time = 0; size_t i = 0; /* Reset abort flag */ @@ -2381,6 +2476,7 @@ automember_rebuild_task_thread(void *arg) if (!task) { return; /* no task */ } + slapi_task_inc_refcount(task); slapi_log_err(SLAPI_LOG_PLUGIN, AUTOMEMBER_PLUGIN_SUBSYSTEM, "automember_rebuild_task_thread - Refcount incremented.\n"); @@ -2394,9 +2490,11 @@ automember_rebuild_task_thread(void *arg) slapi_task_log_status(task, "Automember rebuild task starting (base dn: (%s) filter (%s)...", slapi_sdn_get_dn(td->base_dn), td->filter_str); /* - * Set the bind dn in the local thread data + * Set the bind dn in the local thread data, and block post op mods */ slapi_td_set_dn(slapi_ch_strdup(td->bind_dn)); + slapi_td_block_nested_post_op(); + fixup_start_time = slapi_current_rel_time_t(); /* * Take the config lock now and search the database */ @@ -2427,6 +2525,21 @@ automember_rebuild_task_thread(void *arg) * Loop over the entries */ for (i = 0; entries && (entries[i] != NULL); i++) { + fixup_progress_count++; + if (fixup_progress_count % FIXUP_PROGRESS_LIMIT == 0 ) { + slapi_task_log_notice(task, + "Processed %ld entries in %ld seconds (+%ld seconds)", + fixup_progress_count, + slapi_current_rel_time_t() - fixup_start_time, + slapi_current_rel_time_t() - fixup_progress_elapsed); + slapi_task_log_status(task, + "Processed %ld entries in %ld seconds (+%ld seconds)", + fixup_progress_count, + slapi_current_rel_time_t() - fixup_start_time, + slapi_current_rel_time_t() - fixup_progress_elapsed); + slapi_task_inc_progress(task); + fixup_progress_elapsed = slapi_current_rel_time_t(); + } if (slapi_atomic_load_64(&abort_rebuild_task, __ATOMIC_ACQUIRE) == 1) { /* The task was aborted */ slapi_task_log_notice(task, "Automember rebuild task was intentionally aborted"); @@ -2444,48 +2557,66 @@ automember_rebuild_task_thread(void *arg) if (slapi_dn_issuffix(slapi_entry_get_dn(entries[i]), config->scope) && (slapi_filter_test_simple(entries[i], config->filter) == 0)) { - /* First clear out all the defaults groups */ - for (size_t ii = 0; config->default_groups && config->default_groups[ii]; ii++) { - if ((result = automember_update_member_value(entries[i], config->default_groups[ii], - config->grouping_attr, config->grouping_value, NULL, DEL_MEMBER))) - { - slapi_task_log_notice(task, "Automember rebuild membership task unable to delete " - "member from default group (%s) error (%d)", - config->default_groups[ii], result); - slapi_task_log_status(task, "Automember rebuild membership task unable to delete " - "member from default group (%s) error (%d)", - config->default_groups[ii], result); - slapi_log_err(SLAPI_LOG_ERR, AUTOMEMBER_PLUGIN_SUBSYSTEM, - "automember_rebuild_task_thread - Unable to unable to delete from (%s) error (%d)\n", - config->default_groups[ii], result); - goto out; - } - } - - /* Then clear out the non-default group */ - if (config->inclusive_rules && !PR_CLIST_IS_EMPTY((PRCList *)config->inclusive_rules)) { - include_list = PR_LIST_HEAD((PRCList *)config->inclusive_rules); - while (include_list != (PRCList *)config->inclusive_rules) { - struct automemberRegexRule *curr_rule = (struct automemberRegexRule *)include_list; - if ((result = automember_update_member_value(entries[i], slapi_sdn_get_dn(curr_rule->target_group_dn), - config->grouping_attr, config->grouping_value, NULL, DEL_MEMBER))) + if (td->cleanup) { + + slapi_log_err(SLAPI_LOG_PLUGIN, AUTOMEMBER_PLUGIN_SUBSYSTEM, + "automember_rebuild_task_thread - Cleaning up groups (config %s)\n", + config->dn); + /* First clear out all the defaults groups */ + for (size_t ii = 0; config->default_groups && config->default_groups[ii]; ii++) { + if ((result = automember_update_member_value(entries[i], + config->default_groups[ii], + config->grouping_attr, + config->grouping_value, + NULL, DEL_MEMBER))) { slapi_task_log_notice(task, "Automember rebuild membership task unable to delete " - "member from group (%s) error (%d)", - slapi_sdn_get_dn(curr_rule->target_group_dn), result); + "member from default group (%s) error (%d)", + config->default_groups[ii], result); slapi_task_log_status(task, "Automember rebuild membership task unable to delete " - "member from group (%s) error (%d)", - slapi_sdn_get_dn(curr_rule->target_group_dn), result); + "member from default group (%s) error (%d)", + config->default_groups[ii], result); slapi_log_err(SLAPI_LOG_ERR, AUTOMEMBER_PLUGIN_SUBSYSTEM, "automember_rebuild_task_thread - Unable to unable to delete from (%s) error (%d)\n", - slapi_sdn_get_dn(curr_rule->target_group_dn), result); + config->default_groups[ii], result); goto out; } - include_list = PR_NEXT_LINK(include_list); } + + /* Then clear out the non-default group */ + if (config->inclusive_rules && !PR_CLIST_IS_EMPTY((PRCList *)config->inclusive_rules)) { + include_list = PR_LIST_HEAD((PRCList *)config->inclusive_rules); + while (include_list != (PRCList *)config->inclusive_rules) { + struct automemberRegexRule *curr_rule = (struct automemberRegexRule *)include_list; + if ((result = automember_update_member_value(entries[i], + slapi_sdn_get_dn(curr_rule->target_group_dn), + config->grouping_attr, + config->grouping_value, + NULL, DEL_MEMBER))) + { + slapi_task_log_notice(task, "Automember rebuild membership task unable to delete " + "member from group (%s) error (%d)", + slapi_sdn_get_dn(curr_rule->target_group_dn), result); + slapi_task_log_status(task, "Automember rebuild membership task unable to delete " + "member from group (%s) error (%d)", + slapi_sdn_get_dn(curr_rule->target_group_dn), result); + slapi_log_err(SLAPI_LOG_ERR, AUTOMEMBER_PLUGIN_SUBSYSTEM, + "automember_rebuild_task_thread - Unable to unable to delete from (%s) error (%d)\n", + slapi_sdn_get_dn(curr_rule->target_group_dn), result); + goto out; + } + include_list = PR_NEXT_LINK(include_list); + } + } + slapi_log_err(SLAPI_LOG_PLUGIN, AUTOMEMBER_PLUGIN_SUBSYSTEM, + "automember_rebuild_task_thread - Finished cleaning up groups (config %s)\n", + config->dn); } /* Update the memberships for this entries */ + slapi_log_err(SLAPI_LOG_PLUGIN, AUTOMEMBER_PLUGIN_SUBSYSTEM, + "automember_rebuild_task_thread - Updating membership (config %s)\n", + config->dn); if (slapi_is_shutting_down() || automember_update_membership(config, entries[i], NULL) == SLAPI_PLUGIN_FAILURE) { @@ -2509,15 +2640,22 @@ out: slapi_task_log_notice(task, "Automember rebuild task aborted. Error (%d)", result); slapi_task_log_status(task, "Automember rebuild task aborted. Error (%d)", result); } else { - slapi_task_log_notice(task, "Automember rebuild task finished. Processed (%d) entries.", (int32_t)i); - slapi_task_log_status(task, "Automember rebuild task finished. Processed (%d) entries.", (int32_t)i); + slapi_task_log_notice(task, "Automember rebuild task finished. Processed (%ld) entries in %ld seconds", + (int64_t)i, slapi_current_rel_time_t() - fixup_start_time); + slapi_task_log_status(task, "Automember rebuild task finished. Processed (%ld) entries in %ld seconds", + (int64_t)i, slapi_current_rel_time_t() - fixup_start_time); } slapi_task_inc_progress(task); slapi_task_finish(task, result); slapi_task_dec_refcount(task); slapi_atomic_store_64(&abort_rebuild_task, 0, __ATOMIC_RELEASE); + slapi_td_unblock_nested_post_op(); + PR_Lock(fixup_lock); + fixup_running = PR_FALSE; + PR_Unlock(fixup_lock); + slapi_log_err(SLAPI_LOG_PLUGIN, AUTOMEMBER_PLUGIN_SUBSYSTEM, - "automember_rebuild_task_thread - Refcount decremented.\n"); + "automember_rebuild_task_thread - task finished, refcount decremented.\n"); } /* diff --git a/ldap/servers/slapd/back-ldbm/ldbm_add.c b/ldap/servers/slapd/back-ldbm/ldbm_add.c index e3d18bab3..27e9be748 100644 --- a/ldap/servers/slapd/back-ldbm/ldbm_add.c +++ b/ldap/servers/slapd/back-ldbm/ldbm_add.c @@ -1,6 +1,6 @@ /** BEGIN COPYRIGHT BLOCK * Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. - * Copyright (C) 2005 Red Hat, Inc. + * Copyright (C) 2022 Red Hat, Inc. * Copyright (C) 2009 Hewlett-Packard Development Company, L.P. * All rights reserved. * @@ -1256,10 +1256,6 @@ ldbm_back_add(Slapi_PBlock *pb) goto common_return; error_return: - /* Revert the caches if this is the parent operation */ - if (parent_op) { - revert_cache(inst, &parent_time); - } if (addingentry_id_assigned) { next_id_return(be, addingentry->ep_id); } @@ -1368,6 +1364,11 @@ diskfull_return: if (!not_an_error) { rc = SLAPI_FAIL_GENERAL; } + + /* Revert the caches if this is the parent operation */ + if (parent_op) { + revert_cache(inst, &parent_time); + } } common_return: diff --git a/ldap/servers/slapd/back-ldbm/ldbm_delete.c b/ldap/servers/slapd/back-ldbm/ldbm_delete.c index e187e9125..2542ff7eb 100644 --- a/ldap/servers/slapd/back-ldbm/ldbm_delete.c +++ b/ldap/servers/slapd/back-ldbm/ldbm_delete.c @@ -1,6 +1,6 @@ /** BEGIN COPYRIGHT BLOCK * Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. - * Copyright (C) 2005 Red Hat, Inc. + * Copyright (C) 2022 Red Hat, Inc. * Copyright (C) 2009 Hewlett-Packard Development Company, L.P. * All rights reserved. * @@ -1352,11 +1352,6 @@ commit_return: goto common_return; error_return: - /* Revert the caches if this is the parent operation */ - if (parent_op) { - revert_cache(inst, &parent_time); - } - if (tombstone) { if (cache_is_in_cache(&inst->inst_cache, tombstone)) { tomb_ep_id = tombstone->ep_id; /* Otherwise, tombstone might have been freed. */ @@ -1441,6 +1436,11 @@ error_return: conn_id, op_id, parent_modify_c.old_entry, parent_modify_c.new_entry, myrc); } + /* Revert the caches if this is the parent operation */ + if (parent_op) { + revert_cache(inst, &parent_time); + } + common_return: if (orig_entry) { /* NOTE: #define SLAPI_DELETE_BEPREOP_ENTRY SLAPI_ENTRY_PRE_OP */ diff --git a/ldap/servers/slapd/back-ldbm/ldbm_modify.c b/ldap/servers/slapd/back-ldbm/ldbm_modify.c index 3fd573337..4d8647223 100644 --- a/ldap/servers/slapd/back-ldbm/ldbm_modify.c +++ b/ldap/servers/slapd/back-ldbm/ldbm_modify.c @@ -1,6 +1,6 @@ /** BEGIN COPYRIGHT BLOCK * Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. - * Copyright (C) 2005 Red Hat, Inc. + * Copyright (C) 2022 Red Hat, Inc. * Copyright (C) 2009 Hewlett-Packard Development Company, L.P. * All rights reserved. * @@ -1040,11 +1040,6 @@ ldbm_back_modify(Slapi_PBlock *pb) goto common_return; error_return: - /* Revert the caches if this is the parent operation */ - if (parent_op) { - revert_cache(inst, &parent_time); - } - if (postentry != NULL) { slapi_entry_free(postentry); postentry = NULL; @@ -1100,6 +1095,10 @@ error_return: if (!not_an_error) { rc = SLAPI_FAIL_GENERAL; } + /* Revert the caches if this is the parent operation */ + if (parent_op) { + revert_cache(inst, &parent_time); + } } /* if ec is in cache, remove it, then add back e if we still have it */ diff --git a/src/lib389/lib389/cli_conf/plugins/automember.py b/src/lib389/lib389/cli_conf/plugins/automember.py index ad611d793..97d6bd659 100644 --- a/src/lib389/lib389/cli_conf/plugins/automember.py +++ b/src/lib389/lib389/cli_conf/plugins/automember.py @@ -155,7 +155,7 @@ def fixup(inst, basedn, log, args): log.info('Attempting to add task entry... This will fail if Automembership plug-in is not enabled.') if not plugin.status(): log.error("'%s' is disabled. Rebuild membership task can't be executed" % plugin.rdn) - fixup_task = plugin.fixup(args.DN, args.filter) + fixup_task = plugin.fixup(args.DN, args.filter, args.cleanup) if args.wait: log.info(f'Waiting for fixup task "{fixup_task.dn}" to complete. You can safely exit by pressing Control C ...') fixup_task.wait(timeout=None) @@ -219,8 +219,8 @@ def create_parser(subparsers): subcommands = automember.add_subparsers(help='action') add_generic_plugin_parsers(subcommands, AutoMembershipPlugin) - list = subcommands.add_parser('list', help='List Automembership definitions or regex rules.') - subcommands_list = list.add_subparsers(help='action') + automember_list = subcommands.add_parser('list', help='List Automembership definitions or regex rules.') + subcommands_list = automember_list.add_subparsers(help='action') list_definitions = subcommands_list.add_parser('definitions', help='Lists Automembership definitions.') list_definitions.set_defaults(func=definition_list) list_regexes = subcommands_list.add_parser('regexes', help='List Automembership regex rules.') @@ -257,21 +257,23 @@ def create_parser(subparsers): show_regex = subcommands_regex.add_parser('show', help='Displays Automembership regex.') show_regex.set_defaults(func=regex_show) - fixup = subcommands.add_parser('fixup', help='Run a rebuild membership task.') - fixup.set_defaults(func=fixup) - fixup.add_argument('DN', help="Base DN that contains entries to fix up") - fixup.add_argument('-f', '--filter', required=True, help='Sets the LDAP filter for entries to fix up') - fixup.add_argument('-s', '--scope', required=True, choices=['sub', 'base', 'one'], type=str.lower, - help='Sets the LDAP search scope for entries to fix up') - fixup.add_argument('--wait', action='store_true', - help="Wait for the task to finish, this could take a long time") + fixup_task = subcommands.add_parser('fixup', help='Run a rebuild membership task.') + fixup_task.set_defaults(func=fixup) + fixup_task.add_argument('DN', help="Base DN that contains entries to fix up") + fixup_task.add_argument('-f', '--filter', required=True, help='Sets the LDAP filter for entries to fix up') + fixup_task.add_argument('-s', '--scope', required=True, choices=['sub', 'base', 'one'], type=str.lower, + help='Sets the LDAP search scope for entries to fix up') + fixup_task.add_argument('--cleanup', action='store_true', + help="Clean up previous group memberships before rebuilding") + fixup_task.add_argument('--wait', action='store_true', + help="Wait for the task to finish, this could take a long time") fixup_status = subcommands.add_parser('fixup-status', help='Check the status of a fix-up task') fixup_status.set_defaults(func=do_fixup_status) fixup_status.add_argument('--dn', help="The task entry's DN") fixup_status.add_argument('--show-log', action='store_true', help="Display the task log") fixup_status.add_argument('--watch', action='store_true', - help="Watch the task's status and wait for it to finish") + help="Watch the task's status and wait for it to finish") abort_fixup = subcommands.add_parser('abort-fixup', help='Abort the rebuild membership task.') abort_fixup.set_defaults(func=abort) diff --git a/src/lib389/lib389/plugins.py b/src/lib389/lib389/plugins.py index 27bc57717..4bf8d9177 100644 --- a/src/lib389/lib389/plugins.py +++ b/src/lib389/lib389/plugins.py @@ -1141,13 +1141,15 @@ class AutoMembershipPlugin(Plugin): def __init__(self, instance, dn="cn=Auto Membership Plugin,cn=plugins,cn=config"): super(AutoMembershipPlugin, self).__init__(instance, dn) - def fixup(self, basedn, _filter=None): + def fixup(self, basedn, _filter=None, cleanup=False): """Create an automember rebuild membership task :param basedn: Basedn to fix up :type basedn: str :param _filter: a filter for entries to fix up :type _filter: str + :param cleanup: cleanup old group memberships + :type cleanup: boolean :returns: an instance of Task(DSLdapObject) """ @@ -1156,6 +1158,9 @@ class AutoMembershipPlugin(Plugin): task_properties = {'basedn': basedn} if _filter is not None: task_properties['filter'] = _filter + if cleanup: + task_properties['cleanup'] = "yes" + task.create(properties=task_properties) return task diff --git a/src/lib389/lib389/tasks.py b/src/lib389/lib389/tasks.py index cc32b3a0c..cfba46d0c 100644 --- a/src/lib389/lib389/tasks.py +++ b/src/lib389/lib389/tasks.py @@ -1007,12 +1007,13 @@ class Tasks(object): return exitCode def automemberRebuild(self, suffix=DEFAULT_SUFFIX, scope='sub', - filterstr='objectclass=top', args=None): + filterstr='objectclass=top', cleanup=False, args=None): ''' - @param suffix - The suffix the task should examine - defualt is + @param suffix - The suffix the task should examine - default is "dc=example,dc=com" @param scope - The scope of the search to find entries - @param fitlerstr - THe search filter to find entries + @param fitlerstr - The search filter to find entries + @param cleanup - reset/clear the old group mmeberships prior to rebuilding @param args - is a dictionary that contains modifier of the task wait: True/[False] - If True, waits for the completion of the task before to return @@ -1028,6 +1029,8 @@ class Tasks(object): entry.setValues('basedn', suffix) entry.setValues('filter', filterstr) entry.setValues('scope', scope) + if cleanup: + entry.setValues('cleanup', 'yes') # start the task and possibly wait for task completion try:
0
1b5e0cf7d65ffdf10676d118c835ae50ad047a42
389ds/389-ds-base
Fix RUV updating code
commit 1b5e0cf7d65ffdf10676d118c835ae50ad047a42 Author: David Boreham <[email protected]> Date: Fri Apr 15 05:30:28 2005 +0000 Fix RUV updating code diff --git a/ldap/servers/plugins/replication/repl5_ruv.c b/ldap/servers/plugins/replication/repl5_ruv.c index fa37865a7..3b4abec07 100644 --- a/ldap/servers/plugins/replication/repl5_ruv.c +++ b/ldap/servers/plugins/replication/repl5_ruv.c @@ -199,7 +199,7 @@ ruv_init_from_slapi_attr_and_check_purl(Slapi_Attr *attr, RUV **ruv, ReplicaId * if (NULL != ruve) { /* Is the local purl already in the ruv ? */ - if ( (*contain_purl==0) && (strncmp(ruve->replica_purl, purl, strlen(purl))==0) ) + if ( (*contain_purl==0) && ruve->replica_purl && purl && (strncmp(ruve->replica_purl, purl, strlen(purl))==0) ) { *contain_purl = ruve->rid; } @@ -1877,22 +1877,19 @@ ruv_is_newer (Object *sruvobj, Object *cruvobj) } void -force_csn_update (RUV *ruv, CSN *csn) +ruv_force_csn_update (RUV *ruv, CSN *csn) { - CSN *max; + CSN *max = NULL; if (ruv != NULL) { - ruv_get_max_csn(ruv, &max); - if (csn_compare(max, csn)) + { ruv_set_max_csn(ruv, csn, NULL); - + } csn_free(&max); } - - } #ifdef TESTING /* Some unit tests for code in this file */ diff --git a/ldap/servers/plugins/replication/repl5_ruv.h b/ldap/servers/plugins/replication/repl5_ruv.h index 60ac19025..0aa27de4e 100644 --- a/ldap/servers/plugins/replication/repl5_ruv.h +++ b/ldap/servers/plugins/replication/repl5_ruv.h @@ -81,7 +81,7 @@ int ruv_local_contains_supplier(RUV *ruv, ReplicaId rid); whether or not an RUV is empty */ PRBool ruv_has_csns(const RUV *ruv); PRBool ruv_is_newer (Object *sruv, Object *cruv); -void force_csn_update (RUV *ruv, CSN *csn); +void ruv_force_csn_update (RUV *ruv, CSN *csn); #ifdef __cplusplus } #endif diff --git a/ldap/servers/plugins/replication/windows_inc_protocol.c b/ldap/servers/plugins/replication/windows_inc_protocol.c index b80e48bdc..ddf1df148 100644 --- a/ldap/servers/plugins/replication/windows_inc_protocol.c +++ b/ldap/servers/plugins/replication/windows_inc_protocol.c @@ -218,6 +218,11 @@ w_set_pause_and_busy_time(long *pausetime, long *busywaittime) * schedule_change START */ +/* + * DBDB: what follows is quite possibly the worst code I have ever seen. + * Unfortunately we chose not to re-write it when we did the windows sync version. + */ + /* * Main state machine for the incremental protocol. This routine will, * under normal circumstances, not return until the protocol is shut @@ -1088,6 +1093,7 @@ send_updates(Private_Repl_Protocol *prp, RUV *remote_update_vector, PRUint32 *nu int return_value; int rc; CL5ReplayIterator *changelog_iterator = NULL; + RUV *current_ruv = ruv_dup(remote_update_vector); LDAPDebug( LDAP_DEBUG_TRACE, "=> send_updates\n", 0, 0, 0 ); @@ -1293,9 +1299,8 @@ send_updates(Private_Repl_Protocol *prp, RUV *remote_update_vector, PRUint32 *nu /* Positive response received */ (*num_changes_sent)++; agmt_inc_last_update_changecount (prp->agmt, csn_get_replicaid(entry.op->csn), 0 /*replayed*/); - /* bring the consumers (AD) RUV up to date */ - force_csn_update(remote_update_vector, entry.op->csn); + ruv_force_csn_update(current_ruv, entry.op->csn); } break; case CL5_BAD_DATA: @@ -1352,6 +1357,12 @@ send_updates(Private_Repl_Protocol *prp, RUV *remote_update_vector, PRUint32 *nu w_cl5_operation_parameters_done ( entry.op ); cl5DestroyReplayIterator(&changelog_iterator); } + /* Save the RUV that we successfully replayed, this ensures that next time we start off at the next changelog record */ + if (current_ruv) + { + agmt_set_consumer_ruv(prp->agmt,current_ruv); + ruv_destroy(&current_ruv); + } LDAPDebug( LDAP_DEBUG_TRACE, "<= send_updates\n", 0, 0, 0 ); return return_value; } diff --git a/ldap/servers/plugins/replication/windows_protocol_util.c b/ldap/servers/plugins/replication/windows_protocol_util.c index 55f1bd88a..fa7a2eb03 100644 --- a/ldap/servers/plugins/replication/windows_protocol_util.c +++ b/ldap/servers/plugins/replication/windows_protocol_util.c @@ -326,6 +326,37 @@ map_dn_values(Private_Repl_Protocol *prp,Slapi_ValueSet *original_values, Slapi_ } } +static void +windows_dump_ruvs(Object *supl_ruv_obj, Object *cons_ruv_obj) +{ + if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) + { + slapi_log_error(SLAPI_LOG_REPL, NULL, "acquire_replica, supplier RUV:\n"); + if (supl_ruv_obj) { + RUV* sup = NULL; + object_acquire(supl_ruv_obj); + sup = (RUV*) object_get_data ( supl_ruv_obj ); + ruv_dump (sup, "supplier", NULL); + object_release(supl_ruv_obj); + } else + { + slapi_log_error(SLAPI_LOG_REPL, NULL, "acquire_replica, supplier RUV = null\n"); + } + slapi_log_error(SLAPI_LOG_REPL, NULL, "acquire_replica, consumer RUV:\n"); + + if (cons_ruv_obj) + { + RUV* con = NULL; + object_acquire(cons_ruv_obj); + con = (RUV*) object_get_data ( cons_ruv_obj ); + ruv_dump (con,"consumer", NULL); + object_release( cons_ruv_obj ); + } else { + slapi_log_error(SLAPI_LOG_REPL, NULL, "acquire_replica, consumer RUV = null\n"); + } + } +} + /* * Acquire exclusive access to a replica. Send a start replication extended * operation to the replica. The response will contain a success code, and @@ -347,8 +378,12 @@ windows_acquire_replica(Private_Repl_Protocol *prp, RUV **ruv, int check_ruv) { int return_value = ACQUIRE_SUCCESS; - ConnResult crc; - Repl_Connection *conn; + ConnResult crc = 0; + Repl_Connection *conn = NULL; + Replica *replica = NULL; + Object *supl_ruv_obj, *cons_ruv_obj = NULL; + PRBool is_newer = PR_FALSE; + RUV *r = NULL; LDAPDebug( LDAP_DEBUG_TRACE, "=> windows_acquire_replica\n", 0, 0, 0 ); @@ -364,79 +399,44 @@ windows_acquire_replica(Private_Repl_Protocol *prp, RUV **ruv, int check_ruv) return ACQUIRE_SUCCESS; } + if (NULL != ruv) { - Replica *replica; - Object *supl_ruv_obj, *cons_ruv_obj; - PRBool is_newer = PR_FALSE; - RUV *r; - - - if (prp->agmt) - { - cons_ruv_obj = agmt_get_consumer_ruv(prp->agmt); - } - - - - - - object_acquire(prp->replica_object); - replica = object_get_data(prp->replica_object); - supl_ruv_obj = replica_get_ruv ( replica ); - - /* make a copy of the existing RUV as a starting point - XXX this is probably a not-so-elegant hack */ - - slapi_log_error(SLAPI_LOG_REPL, NULL, "acquire_replica, supplier RUV:\n"); - if (supl_ruv_obj) { - object_acquire(supl_ruv_obj); - ruv_dump ((RUV*) object_get_data ( supl_ruv_obj ), "supplier", NULL); - object_release(supl_ruv_obj); - }else - slapi_log_error(SLAPI_LOG_REPL, NULL, "acquire_replica, supplier RUV = null\n"); - - slapi_log_error(SLAPI_LOG_REPL, NULL, "acquire_replica, consumer RUV:\n"); - - if (cons_ruv_obj) - { - RUV* con; - object_acquire(cons_ruv_obj); - con = (RUV*) object_get_data ( cons_ruv_obj ); - ruv_dump (con,"consumer", NULL); - object_release( cons_ruv_obj ); - } else { - slapi_log_error(SLAPI_LOG_REPL, NULL, "acquire_replica, consumer RUV = null\n"); - } + ruv_destroy ( ruv ); + } - is_newer = ruv_is_newer ( supl_ruv_obj, cons_ruv_obj ); - - /* This follows ruv_is_newer, since it's always newer if it's null */ - if (cons_ruv_obj == NULL) - { - RUV *s; - s = (RUV*) object_get_data ( replica_get_ruv ( replica ) ); - - agmt_set_consumer_ruv(prp->agmt, s ); - object_release ( replica_get_ruv ( replica ) ); - cons_ruv_obj = agmt_get_consumer_ruv(prp->agmt); - } + object_acquire(prp->replica_object); + replica = object_get_data(prp->replica_object); + supl_ruv_obj = replica_get_ruv ( replica ); + cons_ruv_obj = agmt_get_consumer_ruv(prp->agmt); - r = (RUV*) object_get_data ( cons_ruv_obj); - *ruv = r; + windows_dump_ruvs(supl_ruv_obj,cons_ruv_obj); + is_newer = ruv_is_newer ( supl_ruv_obj, cons_ruv_obj ); + + /* Handle the pristine case */ + if (cons_ruv_obj == NULL) + { + /* DBDB: this is all wrong. Need to fix this */ + RUV *s = NULL; + s = (RUV*) object_get_data ( replica_get_ruv ( replica ) ); - - - if ( supl_ruv_obj ) object_release ( supl_ruv_obj ); - if ( cons_ruv_obj ) object_release ( cons_ruv_obj ); - object_release (prp->replica_object); - replica = NULL; - - if (is_newer == PR_FALSE && check_ruv) { - prp->last_acquire_response_code = NSDS50_REPL_UPTODATE; - LDAPDebug( LDAP_DEBUG_TRACE, "<= windows_acquire_replica - ACQUIRE_CONSUMER_WAS_UPTODATE\n", 0, 0, 0 ); - return ACQUIRE_CONSUMER_WAS_UPTODATE; - } - } + agmt_set_consumer_ruv(prp->agmt, s ); + object_release ( replica_get_ruv ( replica ) ); + cons_ruv_obj = agmt_get_consumer_ruv(prp->agmt); + } + r = (RUV*) object_get_data(cons_ruv_obj); + *ruv = ruv_dup(r); + + if ( supl_ruv_obj ) object_release ( supl_ruv_obj ); + if ( cons_ruv_obj ) object_release ( cons_ruv_obj ); + object_release (prp->replica_object); + replica = NULL; + + /* Once we get here we have a valid ruv */ + if (is_newer == PR_FALSE && check_ruv) { + prp->last_acquire_response_code = NSDS50_REPL_UPTODATE; + LDAPDebug( LDAP_DEBUG_TRACE, "<= windows_acquire_replica - ACQUIRE_CONSUMER_WAS_UPTODATE\n", 0, 0, 0 ); + return ACQUIRE_CONSUMER_WAS_UPTODATE; + } prp->last_acquire_response_code = NSDS50_REPL_REPLICA_NO_RESPONSE;
0
19854546165c830827028f09f8b873b0f2daeefc
389ds/389-ds-base
Issue 6515 - CLI - dsidm get_dn does not return JSON format Description: The get_dn() functions do not check for the JSON flag. Group members and role subtree status were not returned in JSON. Also added a LogCapture function to return the the raw results so we can do json validation. Also did some other minor python lint cleanup. Relates: https://github.com/389ds/389-ds-base/issues/5889 Relates: https://github.com/389ds/389-ds-base/issues/6502 Relates: https://github.com/389ds/389-ds-base/issues/6503 Relates: https://github.com/389ds/389-ds-base/issues/6515 Reviewed by: spichugi(Thanks!)
commit 19854546165c830827028f09f8b873b0f2daeefc Author: Mark Reynolds <[email protected]> Date: Mon Apr 7 18:00:39 2025 -0400 Issue 6515 - CLI - dsidm get_dn does not return JSON format Description: The get_dn() functions do not check for the JSON flag. Group members and role subtree status were not returned in JSON. Also added a LogCapture function to return the the raw results so we can do json validation. Also did some other minor python lint cleanup. Relates: https://github.com/389ds/389-ds-base/issues/5889 Relates: https://github.com/389ds/389-ds-base/issues/6502 Relates: https://github.com/389ds/389-ds-base/issues/6503 Relates: https://github.com/389ds/389-ds-base/issues/6515 Reviewed by: spichugi(Thanks!) diff --git a/dirsrvtests/tests/suites/clu/dsidm_account_test.py b/dirsrvtests/tests/suites/clu/dsidm_account_test.py index 7509bb9ff..4b48a11a5 100644 --- a/dirsrvtests/tests/suites/clu/dsidm_account_test.py +++ b/dirsrvtests/tests/suites/clu/dsidm_account_test.py @@ -1,21 +1,24 @@ # --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2023 Red Hat, Inc. +# Copyright (C) 2025 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # -import ldap -import time -import subprocess -import pytest import logging import os - +import json +import pytest +import ldap from lib389 import DEFAULT_SUFFIX -from lib389.cli_idm.account import list, get_dn, lock, unlock, delete, modify, rename, entry_status, \ - subtree_status, reset_password, change_password +from lib389.cli_idm.account import ( + get_dn, + lock, + unlock, + entry_status, + subtree_status, +) from lib389.topologies import topology_st from lib389.cli_base import FakeArgs from lib389.utils import ds_is_older @@ -33,6 +36,7 @@ def create_test_user(topology_st, request): log.info('Create test user') users = nsUserAccounts(topology_st.standalone, DEFAULT_SUFFIX) test_user = users.create_test_user() + log.info('Created test user: %s', test_user.dn) def fin(): log.info('Delete test user') @@ -77,7 +81,7 @@ def test_dsidm_account_entry_status_with_lock(topology_st, create_test_user): 'Entry Modification Date'] state_lock = 'Entry State: directly locked through nsAccountLock' - state_unlock= 'Entry State: activated' + state_unlock = 'Entry State: activated' lock_msg = 'Entry {} is locked'.format(test_user.dn) unlock_msg = 'Entry {} is unlocked'.format(test_user.dn) @@ -94,7 +98,8 @@ def test_dsidm_account_entry_status_with_lock(topology_st, create_test_user): log.info('Test dsidm account entry-status') entry_status(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args) - check_value_in_log_and_reset(topology_st, content_list=entry_list, check_value=state_unlock) + check_value_in_log_and_reset(topology_st, content_list=entry_list, + check_value=state_unlock) log.info('Test dsidm account lock') lock(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args) @@ -102,11 +107,13 @@ def test_dsidm_account_entry_status_with_lock(topology_st, create_test_user): log.info('Test dsidm account subtree-status with locked account') subtree_status(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args) - check_value_in_log_and_reset(topology_st, content_list=entry_list, check_value=state_lock) + check_value_in_log_and_reset(topology_st, content_list=entry_list, + check_value=state_lock) log.info('Test dsidm account entry-status with locked account') entry_status(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args) - check_value_in_log_and_reset(topology_st, content_list=entry_list, check_value=state_lock) + check_value_in_log_and_reset(topology_st, content_list=entry_list, + check_value=state_lock) log.info('Test dsidm account unlock') unlock(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args) @@ -114,11 +121,53 @@ def test_dsidm_account_entry_status_with_lock(topology_st, create_test_user): log.info('Test dsidm account subtree-status with unlocked account') subtree_status(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args) - check_value_in_log_and_reset(topology_st, content_list=entry_list, check_value=state_unlock) + check_value_in_log_and_reset(topology_st, content_list=entry_list, + check_value=state_unlock) log.info('Test dsidm account entry-status with unlocked account') entry_status(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args) - check_value_in_log_and_reset(topology_st, content_list=entry_list, check_value=state_unlock) + check_value_in_log_and_reset(topology_st, content_list=entry_list, + check_value=state_unlock) + + +def test_dsidm_account_entry_get_by_dn(topology_st, create_test_user): + """ Test dsidm account get_dn works with non-json and json + + :id: dd848f67c-9944-48a4-ae5e-98dce4fbc364 + :setup: Standalone instance + :steps: + 1. Get user by DN (non-json) + 2. Get user by DN (json) + :expectedresults: + 1. Success + 2. Success + """ + + inst = topology_st.standalone + user_dn = "uid=test_user_1000,ou=people,dc=example,dc=com" + + args = FakeArgs() + args.dn = user_dn + args.json = False + args.basedn = DEFAULT_SUFFIX + args.scope = ldap.SCOPE_SUBTREE + args.filter = "(uid=*)" + args.become_inactive_on = False + args.inactive_only = False + + # Test non-json result + check_val = "homeDirectory: /home/test_user_1000" + get_dn(inst, DEFAULT_SUFFIX, topology_st.logcap.log, args) + check_value_in_log_and_reset(topology_st, check_value=check_val) + + # Test json + args.json = True + get_dn(inst, DEFAULT_SUFFIX, topology_st.logcap.log, args) + + result = topology_st.logcap.get_raw_outputs() + json_result = json.loads(result[0]) + assert json_result['dn'] == user_dn + if __name__ == '__main__': # Run isolated diff --git a/dirsrvtests/tests/suites/clu/dsidm_group_test.py b/dirsrvtests/tests/suites/clu/dsidm_group_test.py index 7f663b61f..6fa9bd2eb 100644 --- a/dirsrvtests/tests/suites/clu/dsidm_group_test.py +++ b/dirsrvtests/tests/suites/clu/dsidm_group_test.py @@ -1,5 +1,5 @@ # --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2024 Red Hat, Inc. +# Copyright (C) 2025 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). @@ -8,6 +8,7 @@ # import pytest +import json import logging import os @@ -445,6 +446,7 @@ def test_dsidm_group_members_add_remove(topology_st, create_test_group): args = FakeArgs() args.cn = group_name + args.json = False log.info('Test dsidm group members to show no associated members') members(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args) @@ -459,6 +461,14 @@ def test_dsidm_group_members_add_remove(topology_st, create_test_group): members(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args) check_value_in_log_and_reset(topology_st, check_value=output_with_member) + # Test json + args.json = True + members(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args) + result = topology_st.logcap.get_raw_outputs() + json_result = json.loads(result[0]) + assert len(json_result['members']) == 1 + args.json = False + log.info('Test dsidm group remove_member') remove_member(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args) check_value_in_log_and_reset(topology_st, check_value=output_remove_member) @@ -467,6 +477,14 @@ def test_dsidm_group_members_add_remove(topology_st, create_test_group): members(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args) check_value_in_log_and_reset(topology_st, check_value=output_no_member) + # Test json + args.json = True + members(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args) + result = topology_st.logcap.get_raw_outputs() + json_result = json.loads(result[0]) + assert len(json_result['members']) == 0 + args.json = False + if __name__ == '__main__': # Run isolated diff --git a/dirsrvtests/tests/suites/clu/dsidm_role_test.py b/dirsrvtests/tests/suites/clu/dsidm_role_test.py index da59462a2..5a7b3b9f5 100644 --- a/dirsrvtests/tests/suites/clu/dsidm_role_test.py +++ b/dirsrvtests/tests/suites/clu/dsidm_role_test.py @@ -7,6 +7,7 @@ # --- END COPYRIGHT BLOCK --- import pytest +import json import logging import os @@ -734,7 +735,7 @@ def test_dsidm_role_lock_unlock_entrystatus(topology_st, create_test_managed_rol check_value_in_log(topology_st, check_value=entry_dn_output) check_value_in_log_and_reset(topology_st, check_value=entry_unlocked_output) - log.info('Test dsidm role entry-status to verify activation status of the unlocked role') + log.info('Test dsidm role entry-status to verify activation status of the unlocked role - json') args.json = True entry_status(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args) check_value_in_log_and_reset(topology_st, content_list=entry_unlocked_content) @@ -745,7 +746,6 @@ def test_dsidm_role_lock_unlock_entrystatus(topology_st, create_test_managed_rol managed_disabled_role.delete() [email protected](reason="DS6502") @pytest.mark.skipif(ds_is_older("1.4.2"), reason="Not implemented") def test_dsidm_role_subtree_status(topology_st, create_test_managed_role): """ Test dsidm role subtree-status option @@ -771,9 +771,23 @@ def test_dsidm_role_subtree_status(topology_st, create_test_managed_role): output = 'Entry DN: {}'.format(test_role.dn) log.info('Test dsidm role subtree-status') + topology_st.logcap.flush() subtree_status(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args) + result = topology_st.logcap.get_raw_outputs() check_value_in_log_and_reset(topology_st, check_value=output) + log.info('Test dsidm role subtree-status - json') + args.json = True + subtree_status(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args) + result = topology_st.logcap.get_raw_outputs() + json_result = json.loads(result[0]) + found = False + for entry in json_result['entries']: + if entry['dn'] == test_role.dn: + found = True + break + assert found + log.info('Clean up') test_role.delete() @@ -782,4 +796,4 @@ if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) - pytest.main("-s {}".format(CURRENT_FILE)) \ No newline at end of file + pytest.main("-s {}".format(CURRENT_FILE)) diff --git a/dirsrvtests/tests/suites/clu/dsidm_uniquegroup_test.py b/dirsrvtests/tests/suites/clu/dsidm_uniquegroup_test.py index 29218216d..0532791c1 100644 --- a/dirsrvtests/tests/suites/clu/dsidm_uniquegroup_test.py +++ b/dirsrvtests/tests/suites/clu/dsidm_uniquegroup_test.py @@ -8,6 +8,7 @@ # import pytest +import json import logging import os @@ -445,6 +446,7 @@ def test_dsidm_uniquegroup_members_add_remove(topology_st, create_test_uniquegro args = FakeArgs() args.cn = uniquegroup_name + args.json = False log.info('Test dsidm uniquegroup members to show no associated members') members(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args) @@ -459,6 +461,14 @@ def test_dsidm_uniquegroup_members_add_remove(topology_st, create_test_uniquegro members(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args) check_value_in_log_and_reset(topology_st, check_value=output_with_member) + # Test json + args.json = True + members(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args) + result = topology_st.logcap.get_raw_outputs() + json_result = json.loads(result[0]) + assert len(json_result['members']) == 1 + args.json = False + log.info('Test dsidm uniquegroup remove_member') remove_member(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args) check_value_in_log_and_reset(topology_st, check_value=output_remove_member) @@ -467,6 +477,14 @@ def test_dsidm_uniquegroup_members_add_remove(topology_st, create_test_uniquegro members(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args) check_value_in_log_and_reset(topology_st, check_value=output_no_member) + # Test json + args.json = True + members(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args) + result = topology_st.logcap.get_raw_outputs() + json_result = json.loads(result[0]) + assert len(json_result['members']) == 0 + args.json = False + if __name__ == '__main__': # Run isolated diff --git a/src/lib389/lib389/cli_base/__init__.py b/src/lib389/lib389/cli_base/__init__.py index 57b4f4555..06b8f9964 100644 --- a/src/lib389/lib389/cli_base/__init__.py +++ b/src/lib389/lib389/cli_base/__init__.py @@ -1,5 +1,5 @@ # --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2023 Red Hat, Inc. +# Copyright (C) 2025 Red Hat, Inc. # Copyright (C) 2019 William Brown <[email protected]> # All rights reserved. # @@ -14,7 +14,6 @@ import sys import json import ldap from ldap.dn import is_dn - from getpass import getpass from lib389 import DirSrv from lib389.utils import assert_c, get_ldapurl_from_serverid @@ -34,7 +33,7 @@ def _get_arg(args, msg=None, hidden=False, confirm=False): x = getpass("%s : " % msg) y = getpass("CONFIRM - %s : " % msg) if x != y: - log.info("Passwords do not match, try again.") + print("Passwords do not match, try again.") else: return y else: @@ -89,7 +88,7 @@ def _warn(data, msg=None): if msg is not None: print("%s :" % msg) if 'Yes I am sure' != input("Type 'Yes I am sure' to continue: "): - raise Exception("Not sure if want") + raise Exception("Not sure I want") return data @@ -197,7 +196,7 @@ def _generic_list(inst, basedn, log, manager_class, args=None): ol = mc.list() if len(ol) == 0: if args and args.json: - print(json.dumps({"type": "list", "items": []}, indent=4)) + log.info(json.dumps({"type": "list", "items": []}, indent=4)) else: log.info("No objects to display") elif len(ol) > 0: @@ -209,9 +208,9 @@ def _generic_list(inst, basedn, log, manager_class, args=None): if args and args.json: json_result['items'].append(o_str) else: - print(o_str) + log.info(o_str) if args and args.json: - print(json.dumps(json_result, indent=4)) + log.info(json.dumps(json_result, indent=4)) # Display these entries better! @@ -219,19 +218,19 @@ def _generic_get(inst, basedn, log, manager_class, selector, args=None): mc = manager_class(inst, basedn) if args and args.json: o = mc.get(selector, json=True) - print(o) + log.info(o) else: o = mc.get(selector) o_str = o.display() - print(o_str) + log.info(o_str) def _generic_get_entry(inst, basedn, log, manager_class, args=None): mc = manager_class(inst, basedn) if args and args.json: - print(mc.get_all_attrs_json()) + log.info(mc.get_all_attrs_json()) else: - print(mc.display()) + log.info(mc.display()) def _generic_get_attr(inst, basedn, log, manager_class, args=None): @@ -241,30 +240,33 @@ def _generic_get_attr(inst, basedn, log, manager_class, args=None): if args and args.json: vals[attr] = mc.get_attr_vals_utf8(attr) else: - print(mc.display_attr(attr).rstrip()) + log.info(mc.display_attr(attr).rstrip()) if args.json: - print(json.dumps({"type": "entry", "dn": mc._dn, "attrs": vals}, indent=4)) + log.info(json.dumps({"type": "entry", "dn": mc._dn, "attrs": vals}, indent=4)) def _generic_get_dn(inst, basedn, log, manager_class, dn, args=None): mc = manager_class(inst, basedn) - o = mc.get(dn=dn) - o_str = o.display() - print(o_str) + if args is not None and args.json: + o = mc.get(dn=dn, json=True) + log.info(o) + else: + o = mc.get(dn=dn) + log.info(o.display()) def _generic_create(inst, basedn, log, manager_class, kwargs, args=None): mc = manager_class(inst, basedn) o = mc.create(properties=kwargs) o_str = o.__unicode__() - print('Successfully created %s' % o_str) + log.info('Successfully created %s' % o_str) def _generic_delete(inst, basedn, log, object_class, dn, args=None): # Load the oc direct o = object_class(inst, dn) o.delete() - print('Successfully deleted %s' % dn) + log.info('Successfully deleted %s' % dn) # Attr functions expect attribute values to be "attr=value" @@ -276,7 +278,7 @@ def _generic_replace_attr(inst, basedn, log, manager_class, args=None): if "=" in myattr: [attr, val] = myattr.split("=", 1) mc.replace(attr, val) - print("Successfully replaced \"{}\"".format(attr)) + log.info("Successfully replaced \"{}\"".format(attr)) else: raise ValueError("You must specify a value to replace the attribute ({})".format(myattr)) else: @@ -291,7 +293,7 @@ def _generic_add_attr(inst, basedn, log, manager_class, args=None): if "=" in myattr: [attr, val] = myattr.split("=", 1) mc.add(attr, val) - print("Successfully added \"{}\"".format(attr)) + log.info("Successfully added \"{}\"".format(attr)) else: raise ValueError("You must specify a value to add for the attribute ({})".format(myattr)) else: @@ -311,7 +313,7 @@ def _generic_del_attr(inst, basedn, log, manager_class, args=None): # remove all mc.remove_all(myattr) attr = myattr # for logging - print("Successfully removed \"{}\"".format(attr)) + log.info("Successfully removed \"{}\"".format(attr)) else: # Missing value raise ValueError("Missing attribute to delete") @@ -434,12 +436,14 @@ class LogCapture(logging.Handler): """ super(LogCapture, self).__init__() self.outputs = [] + self.raw_outputs = [] self.log = logging.getLogger("LogCapture") self.log.addHandler(self) self.log.setLevel(logging.INFO) def emit(self, record): self.outputs.append(record) + self.raw_outputs.append(str(record.msg)) def contains(self, query): """ @@ -455,8 +459,12 @@ class LogCapture(logging.Handler): for rec in self.outputs: print(str(rec)) + def get_raw_outputs(self): + return self.raw_outputs + def flush(self): self.outputs = [] + self.raw_outputs = [] class FakeArgs(object): diff --git a/src/lib389/lib389/cli_idm/__init__.py b/src/lib389/lib389/cli_idm/__init__.py index f4e3866a0..6c8ae7af9 100644 --- a/src/lib389/lib389/cli_idm/__init__.py +++ b/src/lib389/lib389/cli_idm/__init__.py @@ -1,6 +1,6 @@ # --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2016, William Brown <william at blackhats.net.au> -# Copyright (C) 2024 Red Hat, Inc. +# Copyright (C) 2025 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). @@ -140,9 +140,12 @@ def _generic_get(inst, basedn, log, manager_class, selector, args=None): def _generic_get_dn(inst, basedn, log, manager_class, dn, args=None): mc = manager_class(inst, basedn) - o = mc.get(dn=dn) - o_str = o.display() - log.info(o_str) + if args is not None and args.json: + o = mc.get(dn=dn, json=True) + log.info(o) + else: + o = mc.get(dn=dn) + log.info(o.display()) def _generic_create(inst, basedn, log, manager_class, kwargs, args=None): diff --git a/src/lib389/lib389/cli_idm/account.py b/src/lib389/lib389/cli_idm/account.py index a0dfd8f65..1b2bf78e3 100644 --- a/src/lib389/lib389/cli_idm/account.py +++ b/src/lib389/lib389/cli_idm/account.py @@ -91,7 +91,7 @@ def entry_status(inst, basedn, log, args): def subtree_status(inst, basedn, log, args): - filter = "" + filter = "(objectclass=*)" scope = ldap.SCOPE_SUBTREE epoch_inactive_time = None if args.scope == "one": diff --git a/src/lib389/lib389/cli_idm/group.py b/src/lib389/lib389/cli_idm/group.py index b7f17ef5b..11bf7b9d5 100644 --- a/src/lib389/lib389/cli_idm/group.py +++ b/src/lib389/lib389/cli_idm/group.py @@ -1,12 +1,13 @@ # --- BEGIN COPYRIGHT BLOCK --- # Copyright (C) 2016, William Brown <william at blackhats.net.au> -# Copyright (C) 2023 Red Hat, Inc. +# Copyright (C) 2025 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- +import json from lib389.idm.group import Group, Groups, MUST_ATTRIBUTES from lib389.cli_base import populate_attr_arguments, _generic_modify, CustomHelpFormatter from lib389.cli_idm import ( @@ -71,10 +72,18 @@ def members(inst, basedn, log, args): # Display members? member_list = group.list_members() if len(member_list) == 0: - log.info('No members to display') + if args is not None and args.json: + json_result = {"type": "list", "members": []} + log.info(json.dumps(json_result, indent=4)) + else: + log.info('No members to display') else: - for m in member_list: - log.info('dn: %s' % m) + if args is not None and args.json: + json_result = {"type": "list", "members": member_list} + log.info(json.dumps(json_result, indent=4)) + else: + for m in member_list: + log.info('dn: %s' % m) def add_member(inst, basedn, log, args): diff --git a/src/lib389/lib389/cli_idm/role.py b/src/lib389/lib389/cli_idm/role.py index 3dd826dd5..b14ee9d72 100644 --- a/src/lib389/lib389/cli_idm/role.py +++ b/src/lib389/lib389/cli_idm/role.py @@ -108,17 +108,25 @@ def entry_status(inst, basedn, log, args): def subtree_status(inst, basedn, log, args): basedn = _get_dn_arg(args.basedn, msg="Enter basedn to check") - filter = "" - scope = ldap.SCOPE_SUBTREE - - role_list = Roles(inst, basedn).filter(filter, scope) + role_list = Roles(inst, basedn).list() if not role_list: raise ValueError(f"No entries were found under {basedn} or the user doesn't have an access") + if args.json: + json_result = {"type": "status", "entries": []} + for entry in role_list: status = entry.status() - log.info(f'Entry DN: {entry.dn}') - log.info(f'Entry State: {status["state"].describe(status["role_dn"])}\n') + if args.json: + json_result['entries'].append({ + "dn": entry.dn, + "state": status["state"].describe(status["role_dn"]) + }) + else: + log.info(f'Entry DN: {entry.dn}') + log.info(f'Entry State: {status["state"].describe(status["role_dn"])}\n') + if args.json: + log.info(json.dumps(json_result)) def lock(inst, basedn, log, args): diff --git a/src/lib389/lib389/cli_idm/uniquegroup.py b/src/lib389/lib389/cli_idm/uniquegroup.py index 27622a5d8..205169466 100644 --- a/src/lib389/lib389/cli_idm/uniquegroup.py +++ b/src/lib389/lib389/cli_idm/uniquegroup.py @@ -6,6 +6,7 @@ # See LICENSE for details. # --- END COPYRIGHT BLOCK --- +import json from lib389.idm.group import UniqueGroup, UniqueGroups, MUST_ATTRIBUTES from lib389.cli_base import populate_attr_arguments, _generic_modify, CustomHelpFormatter from lib389.cli_idm import ( @@ -70,10 +71,18 @@ def members(inst, basedn, log, args): # Display members? member_list = group.list_members() if len(member_list) == 0: - log.info('No members to display') + if args is not None and args.json: + json_result = {"type": "list", "members": []} + log.info(json.dumps(json_result, indent=4)) + else: + log.info('No members to display') else: - for m in member_list: - log.info('dn: %s' % m) + if args is not None and args.json: + json_result = {"type": "list", "members": member_list} + log.info(json.dumps(json_result, indent=4)) + else: + for m in member_list: + log.info('dn: %s' % m) def add_member(inst, basedn, log, args):
0
7a7609d88caf9c0971e694d7eeb78f30aea7fec9
389ds/389-ds-base
Ticket 47489 - Under specific values of nsDS5ReplicaName, replication may get broken or updates missing Bug Description: If the 'nsDS5ReplicaName' (of a replica), contains the database suffix (e.g. 'db', 'db3' or 'db4). Then replication plugin fails to open the changelog. It could conduct to changelog being recreated or some last updates to be corrupted. A consequence that I can reproduce, is that some updates may be removed from the changelog and missing updates on consumers. This could conduct to replication break, if for example an entry created is not replicated and then later updated. Fix Description: The fix consist to use 'PL_strrstr' rather than 'strstr' to check the database suffix is valid https://fedorahosted.org/389/ticket/47489 Reviewed by: Rich Megginson (thanks Rich !) Platforms tested: Fedora 17 Flag Day: no Doc impact: no
commit 7a7609d88caf9c0971e694d7eeb78f30aea7fec9 Author: Thierry bordaz (tbordaz) <[email protected]> Date: Wed Sep 11 11:08:58 2013 +0200 Ticket 47489 - Under specific values of nsDS5ReplicaName, replication may get broken or updates missing Bug Description: If the 'nsDS5ReplicaName' (of a replica), contains the database suffix (e.g. 'db', 'db3' or 'db4). Then replication plugin fails to open the changelog. It could conduct to changelog being recreated or some last updates to be corrupted. A consequence that I can reproduce, is that some updates may be removed from the changelog and missing updates on consumers. This could conduct to replication break, if for example an entry created is not replicated and then later updated. Fix Description: The fix consist to use 'PL_strrstr' rather than 'strstr' to check the database suffix is valid https://fedorahosted.org/389/ticket/47489 Reviewed by: Rich Megginson (thanks Rich !) Platforms tested: Fedora 17 Flag Day: no Doc impact: no diff --git a/ldap/servers/plugins/replication/cl5_api.c b/ldap/servers/plugins/replication/cl5_api.c index 6bf421701..35553b9c2 100644 --- a/ldap/servers/plugins/replication/cl5_api.c +++ b/ldap/servers/plugins/replication/cl5_api.c @@ -6241,7 +6241,7 @@ static int _cl5FileEndsWith(const char *filename, const char *ext) { return 0; } - p = strstr(filename, ext); + p = PL_strrstr(filename, ext); if (NULL == p) { return 0;
0
8bea55468d7513b74fcd044841c17fd2efedd90f
389ds/389-ds-base
Ticket #545 - Segfault during initial LDIF import: str2entry_dupcheck() Bug description: If an attribute type having multiple values exists in an entry, and any compare function is not associated with the attribute type, fast_dup_check flag was not disabled from the second time. Since fast_dup_check requires the compare function based on the attribute syntax, it causes the segfault. Fix description: This patch checks whether a compare function is associated with the multi-valued attribute not just at the first time the attribute type appears but at the second time and after, and disable fast_dup_check properly if needed. https://fedorahosted.org/389/ticket/545 Reviewed by Rich (Thank you!!)
commit 8bea55468d7513b74fcd044841c17fd2efedd90f Author: Noriko Hosoi <[email protected]> Date: Wed Jan 9 17:21:24 2013 -0800 Ticket #545 - Segfault during initial LDIF import: str2entry_dupcheck() Bug description: If an attribute type having multiple values exists in an entry, and any compare function is not associated with the attribute type, fast_dup_check flag was not disabled from the second time. Since fast_dup_check requires the compare function based on the attribute syntax, it causes the segfault. Fix description: This patch checks whether a compare function is associated with the multi-valued attribute not just at the first time the attribute type appears but at the second time and after, and disable fast_dup_check properly if needed. https://fedorahosted.org/389/ticket/545 Reviewed by Rich (Thank you!!) diff --git a/ldap/servers/slapd/entry.c b/ldap/servers/slapd/entry.c index e30f03b93..337e8b440 100644 --- a/ldap/servers/slapd/entry.c +++ b/ldap/servers/slapd/entry.c @@ -803,8 +803,8 @@ str2entry_dupcheck( const char *rawdn, char *s, int flags, int read_stateinfo ) bvtype = bv_null; bvvalue = bv_null; if ( slapi_ldif_parse_line( s, &bvtype, &bvvalue, &freeval ) < 0 ) { - LDAPDebug( LDAP_DEBUG_TRACE, - "<= str2entry_dupcheck NULL (parse_line)\n", 0, 0, 0 ); + LDAPDebug1Arg(LDAP_DEBUG_ANY, + "Warning: ignoring invalid line \"%s\"...\n", s); continue; } type = bvtype.bv_val; @@ -1047,6 +1047,17 @@ str2entry_dupcheck( const char *rawdn, char *s, int flags, int read_stateinfo ) } prev_attr = &attrs[nattrs]; nattrs++; + } else { /* prev_attr != NULL */ + if ( check_for_duplicate_values ) { + /* + * If the compare function wasn't available, + * we have to revert to AVL-tree-based dup checking, + * which uses index keys for comparisons + */ + if (NULL == prev_attr->sa_comparefn) { + fast_dup_check = 0; + } + } } sa = prev_attr; /* For readability */
0
59d1556458647d1573b7d4a224c80052cce7bee1
389ds/389-ds-base
Ticket 49295 - Fix CI tests Description: Fix the next round of failures. There are still more issues with 48252 and 48755 https://pagure.io/389-ds-base/issue/49295 Reviewed by: spichugi(Thanks!)
commit 59d1556458647d1573b7d4a224c80052cce7bee1 Author: Mark Reynolds <[email protected]> Date: Mon Aug 21 16:02:54 2017 -0400 Ticket 49295 - Fix CI tests Description: Fix the next round of failures. There are still more issues with 48252 and 48755 https://pagure.io/389-ds-base/issue/49295 Reviewed by: spichugi(Thanks!) diff --git a/dirsrvtests/tests/suites/plugins/rootdn_plugin_test.py b/dirsrvtests/tests/suites/plugins/rootdn_plugin_test.py index a5f42acf6..af5c4c4d4 100644 --- a/dirsrvtests/tests/suites/plugins/rootdn_plugin_test.py +++ b/dirsrvtests/tests/suites/plugins/rootdn_plugin_test.py @@ -295,7 +295,7 @@ def test_rootdn_access_denied_ip(topology_st): try: topology_st.standalone.simple_bind_s(USER1_DN, PASSWORD) except ldap.LDAPError as e: - log.fatal('test_rootdn_access_denied_ip: : failed to bind as user1') + log.fatal('test_rootdn_access_denied_ip: failed to bind as user1') assert False try: @@ -344,9 +344,10 @@ def test_rootdn_access_denied_host(topology_st): topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_ADD, 'rootdn-deny-host', hostname)]) - topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_ADD, - 'rootdn-deny-host', - localhost)]) + if localhost != hostname: + topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_ADD, + 'rootdn-deny-host', + localhost)]) except ldap.LDAPError as e: log.fatal('test_rootdn_access_denied_host: Failed to set deny host: error ' + e.message['desc']) @@ -524,12 +525,16 @@ def test_rootdn_access_allowed_host(topology_st): hostname = socket.gethostname() localhost = DirSrvTools.getLocalhost() try: - topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_ADD, + topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_DELETE, 'rootdn-allow-host', - localhost)]) + None)]) topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_ADD, 'rootdn-allow-host', - hostname)]) + localhost)]) + if hostname != localhost: + topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_ADD, + 'rootdn-allow-host', + hostname)]) except ldap.LDAPError as e: log.fatal('test_rootdn_access_allowed_host: Failed to set allowed host: error ' + e.message['desc']) diff --git a/dirsrvtests/tests/suites/replication/wait_for_async_feature_test.py b/dirsrvtests/tests/suites/replication/wait_for_async_feature_test.py index 34ead6707..cac25f80f 100644 --- a/dirsrvtests/tests/suites/replication/wait_for_async_feature_test.py +++ b/dirsrvtests/tests/suites/replication/wait_for_async_feature_test.py @@ -36,7 +36,7 @@ def waitfor_async_attr(topology_m2, request): # Run through all masters for num in range(1, 3): master = topology_m2.ms["master{}".format(num)] - agmt = topology_m2.ms["master{}_agmts".format(num)].values()[0] + agmt = master.agreement.list(suffix=DEFAULT_SUFFIX)[0].dn try: if attr_value: log.info("Set %s: %s on %s" % ( @@ -97,9 +97,8 @@ def entries(topology_m2, request): def test_not_int_value(topology_m2): """Tests not integer value""" - master1 = topology_m2.ms["master1"] - agmt = topology_m2.ms["master1_agmts"]["m1_m2"] + agmt = master1.agreement.list(suffix=DEFAULT_SUFFIX)[0].dn log.info("Try to set %s: wv1" % WAITFOR_ASYNC_ATTR) try: @@ -111,9 +110,9 @@ def test_not_int_value(topology_m2): def test_multi_value(topology_m2): """Tests multi value""" - master1 = topology_m2.ms["master1"] - agmt = topology_m2.ms["master1_agmts"]["m1_m2"] + agmt = master1.agreement.list(suffix=DEFAULT_SUFFIX)[0].dn + log.info("agmt: %s" % agmt) log.info("Try to set %s: 100 and 101 in the same time (multi value test)" % ( @@ -134,7 +133,7 @@ def test_value_check(topology_m2, waitfor_async_attr): for num in range(1, 3): master = topology_m2.ms["master{}".format(num)] - agmt = topology_m2.ms["master{}_agmts".format(num)].values()[0] + agmt = master.agreement.list(suffix=DEFAULT_SUFFIX)[0].dn log.info("Check attr %s on %s" % (WAITFOR_ASYNC_ATTR, master.serverid)) try: diff --git a/dirsrvtests/tests/tickets/ticket1347760_test.py b/dirsrvtests/tests/tickets/ticket1347760_test.py index 9187cf6be..4dc23115e 100644 --- a/dirsrvtests/tests/tickets/ticket1347760_test.py +++ b/dirsrvtests/tests/tickets/ticket1347760_test.py @@ -79,7 +79,7 @@ def check_op_result(server, op, dn, superior, exists, rc): else: opstr = 'Modifying non-existing entry' elif op == 'modrdn': - if superior != None: + if superior is not None: targetdn = superior if exists: opstr = 'Moving to existing superior' @@ -113,7 +113,7 @@ def check_op_result(server, op, dn, superior, exists, rc): elif op == 'modify': server.modify_s(dn, [(ldap.MOD_REPLACE, 'description', 'test')]) elif op == 'modrdn': - if superior != None: + if superior is not None: server.rename_s(dn, 'uid=new', newsuperior=superior, delold=1) else: server.rename_s(dn, 'uid=new', delold=1) @@ -127,7 +127,7 @@ def check_op_result(server, op, dn, superior, exists, rc): log.info("Exception (expected): %s" % type(e).__name__) log.info('Desc ' + e.message['desc']) assert isinstance(e, rc) - if e.message.has_key('matched'): + if 'matched' in e.message: log.info('Matched is returned: ' + e.message['matched']) if rc != ldap.NO_SUCH_OBJECT: assert False @@ -206,7 +206,7 @@ def test_ticket1347760(topology_st): assert isinstance(e, ldap.INVALID_CREDENTIALS) regex = re.compile('No such entry') cause = pattern_accesslog(file_obj, regex) - if cause == None: + if cause is None: log.fatal('Cause not found - %s' % cause) assert False else: @@ -220,7 +220,7 @@ def test_ticket1347760(topology_st): topology_st.standalone.simple_bind_s(BOGUSSUFFIX, 'bogus') regex = re.compile('No suffix for bind') cause = pattern_accesslog(file_obj, regex) - if cause == None: + if cause is None: log.fatal('Cause not found - %s' % cause) assert False else: @@ -238,7 +238,7 @@ def test_ticket1347760(topology_st): assert isinstance(e, ldap.INVALID_CREDENTIALS) regex = re.compile('Invalid credentials') cause = pattern_accesslog(file_obj, regex) - if cause == None: + if cause is None: log.fatal('Cause not found - %s' % cause) assert False else: @@ -437,13 +437,13 @@ def test_ticket1347760(topology_st): log.info('Desc ' + e.message['desc']) assert isinstance(e, ldap.UNWILLING_TO_PERFORM) - log.info('Bind as {%s,%s} which should fail with %s.' % (BINDDN, 'bogus', ldap.INVALID_CREDENTIALS.__name__)) + log.info('Bind as {%s,%s} which should fail with %s.' % (BINDDN, 'bogus', ldap.UNWILLING_TO_PERFORM.__name__)) try: topology_st.standalone.simple_bind_s(BINDDN, 'bogus') except ldap.LDAPError as e: log.info("Exception (expected): %s" % type(e).__name__) log.info('Desc ' + e.message['desc']) - assert isinstance(e, ldap.INVALID_CREDENTIALS) + assert isinstance(e, ldap.UNWILLING_TO_PERFORM) log.info('SUCCESS') diff --git a/dirsrvtests/tests/tickets/ticket47536_test.py b/dirsrvtests/tests/tickets/ticket47536_test.py index bb30bc864..bd7baf85d 100644 --- a/dirsrvtests/tests/tickets/ticket47536_test.py +++ b/dirsrvtests/tests/tickets/ticket47536_test.py @@ -13,8 +13,7 @@ import subprocess from lib389.tasks import * from lib389.utils import * from lib389.topologies import topology_m2 - -from lib389._constants import DEFAULT_SUFFIX, DN_DM, PASSWORD +from lib389._constants import * pytestmark = pytest.mark.skipif(ds_is_older('1.3.5'), reason="Not implemented") @@ -290,7 +289,9 @@ def config_tls_agreements(topology_m2): log.info("##################### master1 <- tls_clientAuth -- master2 ##################") log.info("##### Update the agreement of master1") - m1_m2_agmt = topology_m2.ms["master1_agmts"]["m1_m2"] + m1 = topology_m2.ms["master1"] + m1_m2_agmt = m1.agreement.list(suffix=DEFAULT_SUFFIX)[0].dn + topology_m2.ms["master1"].modify_s(m1_m2_agmt, [(ldap.MOD_REPLACE, 'nsDS5ReplicaTransportInfo', 'TLS')]) log.info("##### Add the cert to the repl manager on master1") @@ -328,16 +329,18 @@ def config_tls_agreements(topology_m2): m1certmap = '%s/certmap.conf' % (m1confdir) os.system('chmod 660 %s' % m1certmap) m1cm = open(m1certmap, "w") - m1cm.write('certmap Example %s\n' % ISSUER) - m1cm.write('Example:DNComps cn\n') + m1cm.write('certmap Example %s\n' % ISSUER) + m1cm.write('Example:DNComps cn\n') m1cm.write('Example:FilterComps\n') - m1cm.write('Example:verifycert on\n') - m1cm.write('Example:CmapLdapAttr description') + m1cm.write('Example:verifycert on\n') + m1cm.write('Example:CmapLdapAttr description') m1cm.close() os.system('chmod 440 %s' % m1certmap) log.info("##### Update the agreement of master2") - m2_m1_agmt = topology_m2.ms["master2_agmts"]["m2_m1"] + m2 = topology_m2.ms["master2"] + m2_m1_agmt = m2.agreement.list(suffix=DEFAULT_SUFFIX)[0].dn + topology_m2.ms["master2"].modify_s(m2_m1_agmt, [(ldap.MOD_REPLACE, 'nsDS5ReplicaTransportInfo', 'TLS'), (ldap.MOD_REPLACE, 'nsDS5ReplicaBindMethod', 'SSLCLIENTAUTH')]) diff --git a/dirsrvtests/tests/tickets/ticket47966_test.py b/dirsrvtests/tests/tickets/ticket47966_test.py index cace64377..ba6f3b66e 100644 --- a/dirsrvtests/tests/tickets/ticket47966_test.py +++ b/dirsrvtests/tests/tickets/ticket47966_test.py @@ -26,7 +26,7 @@ def test_ticket47966(topology_m2): log.info('Testing Ticket 47966 - [VLV] slapd crashes during Dogtag clone reinstallation') M1 = topology_m2.ms["master1"] M2 = topology_m2.ms["master2"] - m1_m2_agmt = topology_m2.ms["master1_agmts"]["m1_m2"] + m1_m2_agmt = M1.agreement.list(suffix=DEFAULT_SUFFIX)[0].dn log.info('0. Create a VLV index on Master 2.') # get the backend entry diff --git a/dirsrvtests/tests/tickets/ticket47981_test.py b/dirsrvtests/tests/tickets/ticket47981_test.py index 9ed36949c..753169e77 100644 --- a/dirsrvtests/tests/tickets/ticket47981_test.py +++ b/dirsrvtests/tests/tickets/ticket47981_test.py @@ -82,7 +82,7 @@ def addSubtreePwPolicy(inst): except ldap.LDAPError as e: log.error('Failed to add COS def: error ' + e.message['desc']) assert False - time.sleep(0.5) + time.sleep(1) def delSubtreePwPolicy(inst): @@ -109,7 +109,7 @@ def delSubtreePwPolicy(inst): except ldap.LDAPError as e: log.error('Failed to delete COS container: error ' + e.message['desc']) assert False - time.sleep(0.5) + time.sleep(1) def test_ticket47981(topology_st): diff --git a/dirsrvtests/tests/tickets/ticket48252_test.py b/dirsrvtests/tests/tickets/ticket48252_test.py index d8d25e8a4..47f0eac90 100644 --- a/dirsrvtests/tests/tickets/ticket48252_test.py +++ b/dirsrvtests/tests/tickets/ticket48252_test.py @@ -22,7 +22,6 @@ USER_NUM = 10 TEST_USER = "test_user" - def test_ticket48252_setup(topology_st): """ Enable USN plug-in for enabling tombstones @@ -51,7 +50,7 @@ def test_ticket48252_setup(topology_st): def in_index_file(topology_st, id, index): key = "%s%s" % (TEST_USER, id) - log.info(" dbscan - checking %s is in index file %s..." % (key, index)) + log.info(" dbscan - checking %s is in index file %s..." % (key, index)) dbscanOut = topology_st.standalone.dbscan(DEFAULT_BENAME, index) if key in dbscanOut: @@ -73,16 +72,16 @@ def test_ticket48252_run_0(topology_st): uas = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX) del_rdn = "cn=%s0" % TEST_USER del_entry = uas.get('%s0' % TEST_USER) - log.info(" Deleting a test entry %s..." % del_entry) + log.info(" Deleting a test entry %s..." % del_entry) del_entry.delete() assert in_index_file(topology_st, 0, 'cn') == False - log.info(" db2index - reindexing %s ..." % 'cn') + log.info(" db2index - reindexing %s ..." % 'cn') assert topology_st.standalone.db2index(DEFAULT_BENAME, 'cn') assert in_index_file(topology_st, 0, 'cn') == False - log.info(" entry %s is not in the cn index file after reindexed." % del_rdn) + log.info(" entry %s is not in the cn index file after reindexed." % del_rdn) log.info('Case 1 - PASSED') @@ -95,7 +94,7 @@ def test_ticket48252_run_1(topology_st): uas = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX) del_rdn = "cn=%s1" % TEST_USER del_entry = uas.get('%s1' % TEST_USER) - log.info(" Deleting a test entry %s..." % del_rdn) + log.info(" Deleting a test entry %s..." % del_rdn) del_uniqueid = del_entry.get_attr_val_utf8('nsuniqueid') @@ -106,20 +105,20 @@ def test_ticket48252_run_1(topology_st): entry = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, '(&(objectclass=nstombstone)(nsuniqueid=%s))' % del_uniqueid) assert len(entry) == 1 - log.info(" entry %s is in the objectclass index file." % del_rdn) + log.info(" entry %s is in the objectclass index file." % del_rdn) entry = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, '(&(objectclass=nstombstone)(%s))' % del_rdn) assert len(entry) == 0 - log.info(" entry %s is correctly not in the cn index file before reindexing." % del_rdn) + log.info(" entry %s is correctly not in the cn index file before reindexing." % del_rdn) - log.info(" db2index - reindexing %s ..." % 'objectclass') + log.info(" db2index - reindexing %s ..." % 'objectclass') assert topology_st.standalone.db2index(DEFAULT_BENAME, 'objectclass') entry = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, '(&(objectclass=nstombstone)(nsuniqueid=%s))' % del_uniqueid) assert len(entry) == 1 - log.info(" entry %s is in the objectclass index file after reindexing." % del_rdn) + log.info(" entry %s is in the objectclass index file after reindexing." % del_rdn) entry = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, '(&(objectclass=nstombstone)(%s))' % del_rdn) assert len(entry) == 0 - log.info(" entry %s is correctly not in the cn index file after reindexing." % del_rdn) + log.info(" entry %s is correctly not in the cn index file after reindexing." % del_rdn) log.info('Case 2 - PASSED') diff --git a/dirsrvtests/tests/tickets/ticket48755_test.py b/dirsrvtests/tests/tickets/ticket48755_test.py index 9f2e2ac92..b147d3e53 100644 --- a/dirsrvtests/tests/tickets/ticket48755_test.py +++ b/dirsrvtests/tests/tickets/ticket48755_test.py @@ -133,7 +133,7 @@ def test_ticket48755(topology_m2): log.info('%s => %s => %s => %s => 10 USERS' % (DEFAULT_SUFFIX, parent1, parent01, parent001)) log.info("Run Consumer Initialization.") - m1_m2_agmt = topology_m2.ms["master1_agmts"]["m1_m2"] + m1_m2_agmt = M1.agreement.list(suffix=DEFAULT_SUFFIX)[0].dn M1.startReplication_async(m1_m2_agmt) M1.waitForReplInit(m1_m2_agmt) time.sleep(2) diff --git a/dirsrvtests/tests/tickets/ticket48784_test.py b/dirsrvtests/tests/tickets/ticket48784_test.py index f562eb821..0f63302b1 100644 --- a/dirsrvtests/tests/tickets/ticket48784_test.py +++ b/dirsrvtests/tests/tickets/ticket48784_test.py @@ -113,11 +113,13 @@ def config_tls_agreements(topology_m2): log.info("######################## master1 <-- startTLS -> master2 #####################") log.info("##### Update the agreement of master1") - m1_m2_agmt = topology_m2.ms["master1_agmts"]["m1_m2"] + m1 = topology_m2.ms["master1"] + m1_m2_agmt = m1.agreement.list(suffix=DEFAULT_SUFFIX)[0].dn topology_m2.ms["master1"].modify_s(m1_m2_agmt, [(ldap.MOD_REPLACE, 'nsDS5ReplicaTransportInfo', 'TLS')]) log.info("##### Update the agreement of master2") - m2_m1_agmt = topology_m2.ms["master2_agmts"]["m2_m1"] + m2 = topology_m2.ms["master2"] + m2_m1_agmt = m2.agreement.list(suffix=DEFAULT_SUFFIX)[0].dn topology_m2.ms["master2"].modify_s(m2_m1_agmt, [(ldap.MOD_REPLACE, 'nsDS5ReplicaTransportInfo', 'TLS')]) time.sleep(1) diff --git a/dirsrvtests/tests/tickets/ticket49287_test.py b/dirsrvtests/tests/tickets/ticket49287_test.py index 6a5f0a235..21ccf609d 100644 --- a/dirsrvtests/tests/tickets/ticket49287_test.py +++ b/dirsrvtests/tests/tickets/ticket49287_test.py @@ -9,10 +9,9 @@ import pytest from lib389.tasks import * from lib389.utils import * -from lib389._constants import (SUFFIX, ReplicaRole, defaultProperties, REPLICATION_BIND_DN, PLUGIN_MEMBER_OF, - REPLICATION_BIND_PW, REPLICATION_BIND_METHOD, REPLICATION_TRANSPORT) from lib389.properties import RA_NAME, RA_BINDDN, RA_BINDPW, RA_METHOD, RA_TRANSPORT_PROT, BACKEND_NAME from lib389.topologies import topology_m2 +from lib389._constants import * DEBUGGING = os.getenv('DEBUGGING', False) GROUP_DN = ("cn=group," + DEFAULT_SUFFIX) @@ -23,17 +22,19 @@ else: logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) + def _add_repl_backend(s1, s2, be, rid): suffix = 'ou=%s,dc=test,dc=com' % be create_backend(s1, s2, suffix, be) add_ou(s1, suffix) replicate_backend(s1, s2, suffix, rid) + def _wait_for_sync(s1, s2, testbase, final_db): now = time.time() - cn1 = 'sync-%s-%d' % (now,1) - cn2 = 'sync-%s-%d' % (now,2) + cn1 = 'sync-%s-%d' % (now, 1) + cn2 = 'sync-%s-%d' % (now, 2) add_user(s1, cn1, testbase, 'add on m1', sleep=False) add_user(s2, cn2, testbase, 'add on m2', sleep=False) dn1 = 'cn=%s,%s' % (cn1, testbase) @@ -60,15 +61,16 @@ def _check_entry_exist(master, dn, loops=10, wait=1): assert False assert attempt <= loops + def config_memberof(server): server.plugins.enable(name=PLUGIN_MEMBER_OF) MEMBEROF_PLUGIN_DN = ('cn=' + PLUGIN_MEMBER_OF + ',cn=plugins,cn=config') - server.modify_s(MEMBEROF_PLUGIN_DN, - [(ldap.MOD_REPLACE, - 'memberOfAllBackends','on')]) + server.modify_s(MEMBEROF_PLUGIN_DN, [(ldap.MOD_REPLACE, + 'memberOfAllBackends', + 'on')]) # Configure fractional to prevent total init to send memberof - ents = server.agreement.list(suffix=SUFFIX) + ents = server.agreement.list(suffix=DEFAULT_SUFFIX) log.info('update %s to add nsDS5ReplicatedAttributeListTotal' % ents[0].dn) for ent in ents: server.modify_s(ent.dn, @@ -79,25 +81,31 @@ def config_memberof(server): 'nsDS5ReplicatedAttributeList', '(objectclass=*) $ EXCLUDE memberOf')]) + def _disable_auto_oc_memberof(server): MEMBEROF_PLUGIN_DN = ('cn=' + PLUGIN_MEMBER_OF + ',cn=plugins,cn=config') server.modify_s(MEMBEROF_PLUGIN_DN, [(ldap.MOD_REPLACE, 'memberOfAutoAddOC', 'nsContainer')]) + def _enable_auto_oc_memberof(server): MEMBEROF_PLUGIN_DN = ('cn=' + PLUGIN_MEMBER_OF + ',cn=plugins,cn=config') server.modify_s(MEMBEROF_PLUGIN_DN, [(ldap.MOD_REPLACE, 'memberOfAutoAddOC', 'nsMemberOf')]) + def add_dc(server, dn): server.add_s(Entry((dn, {'objectclass': ['top', 'domain']}))) + def add_ou(server, dn): server.add_s(Entry((dn, {'objectclass': ['top', 'organizationalunit']}))) + def add_container(server, dn): server.add_s(Entry((dn, {'objectclass': ['top', 'nscontainer']}))) + def add_user(server, cn, testbase, desc, sleep=True): dn = 'cn=%s,%s' % (cn, testbase) log.fatal('Adding user (%s): ' % dn) @@ -107,6 +115,7 @@ def add_user(server, cn, testbase, desc, sleep=True): if sleep: time.sleep(2) + def add_person(server, cn, testbase, desc, sleep=True): dn = 'cn=%s,%s' % (cn, testbase) log.fatal('Adding user (%s): ' % dn) @@ -116,6 +125,7 @@ def add_person(server, cn, testbase, desc, sleep=True): if sleep: time.sleep(2) + def add_multi_member(server, cn, mem_id, mem_usr, testbase, sleep=True): dn = 'cn=%s,ou=groups,%s' % (cn, testbase) members = [] @@ -126,10 +136,11 @@ def add_multi_member(server, cn, mem_id, mem_usr, testbase, sleep=True): server.modify_s(dn, mod) except ldap.OBJECT_CLASS_VIOLATION: log.info('objectclass violation') - + if sleep: time.sleep(2) + def add_member(server, cn, mem, testbase, sleep=True): dn = 'cn=%s,ou=groups,%s' % (cn, testbase) mem_dn = 'cn=%s,ou=people,%s' % (mem, testbase) @@ -138,19 +149,21 @@ def add_member(server, cn, mem, testbase, sleep=True): if sleep: time.sleep(2) + def add_group(server, testbase, nr, sleep=True): dn = 'cn=g%d,ou=groups,%s' % (nr, testbase) server.add_s(Entry((dn, {'objectclass': ['top', 'groupofnames'], 'member': [ - 'cn=m1_%d,%s' % (nr,testbase), - 'cn=m2_%d,%s' % (nr,testbase), - 'cn=m3_%d,%s' % (nr,testbase) + 'cn=m1_%d,%s' % (nr, testbase), + 'cn=m2_%d,%s' % (nr, testbase), + 'cn=m3_%d,%s' % (nr, testbase) ], 'description': 'group %d' % nr}))) if sleep: time.sleep(2) + def del_group(server, testbase, nr, sleep=True): dn = 'cn=g%d,%s' % (nr, testbase) @@ -158,24 +171,28 @@ def del_group(server, testbase, nr, sleep=True): if sleep: time.sleep(2) + def mod_entry(server, cn, testbase, desc): dn = 'cn=%s,%s' % (cn, testbase) mod = [(ldap.MOD_ADD, 'description', desc)] server.modify_s(dn, mod) time.sleep(2) + def del_entry(server, testbase, cn): dn = 'cn=%s,%s' % (cn, testbase) server.delete_s(dn) time.sleep(2) + def _disable_nunc_stans(server): server.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-enable-nunc-stans', 'off')]) + def _enable_spec_logging(server): mods = [(ldap.MOD_REPLACE, 'nsslapd-accesslog-level', str(260)), # Internal op - (ldap.MOD_REPLACE, 'nsslapd-errorlog-level', str(8192+65536)), + (ldap.MOD_REPLACE, 'nsslapd-errorlog-level', str(8192 + 65536)), # (ldap.MOD_REPLACE, 'nsslapd-errorlog-level', str(8192+32768+524288)), # (ldap.MOD_REPLACE, 'nsslapd-errorlog-level', str(8192)), (ldap.MOD_REPLACE, 'nsslapd-plugin-logging', 'on')] @@ -183,17 +200,17 @@ def _enable_spec_logging(server): server.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-auditlog-logging-enabled', 'on')]) + def create_agmt(s1, s2, replSuffix): - properties = {RA_NAME: 'meTo_{}:{}'.format(s1.host, - str(s2.port)), + properties = {RA_NAME: 'meTo_{}:{}'.format(s1.host, str(s2.port)), RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} new_agmt = s1.agreement.create(suffix=replSuffix, - host=s2.host, - port=s2.port, - properties=properties) + host=s2.host, + port=s2.port, + properties=properties) return new_agmt @@ -203,9 +220,11 @@ def create_backend(s1, s2, beSuffix, beName): s2.mappingtree.create(beSuffix, beName) s2.backend.create(beSuffix, {BACKEND_NAME: beName}) + def replicate_backend(s1, s2, beSuffix, rid): s1.replica.enableReplication(suffix=beSuffix, role=ReplicaRole.MASTER, replicaId=rid) - s2.replica.enableReplication(suffix=beSuffix, role=ReplicaRole.MASTER, replicaId=rid+1) + s2.replica.enableReplication(suffix=beSuffix, role=ReplicaRole.MASTER, replicaId=rid + 1) + # agreement m2_m1_agmt is not needed... :p s1s2_agmt = create_agmt(s1, s2, beSuffix) @@ -213,23 +232,25 @@ def replicate_backend(s1, s2, beSuffix, rid): s1.agreement.init(beSuffix, s2.host, s2.port) s1.waitForReplInit(s1s2_agmt) + def check_group_mods(server1, server2, group, testbase): - # add members to group + # add members to group add_multi_member(server1, group, 1, [1,2,3,4,5], testbase, sleep=False) add_multi_member(server1, group, 2, [3,4,5], testbase, sleep=False) add_multi_member(server1, group, 3, [0], testbase, sleep=False) add_multi_member(server1, group, 4, [1,3,5], testbase, sleep=False) add_multi_member(server1, group, 5, [2,0], testbase, sleep=False) add_multi_member(server1, group, 6, [2,3,4], testbase, sleep=False) - # check that replication is working - # for main backend and some member backends + # check that replication is working + # for main backend and some member backends _wait_for_sync(server1, server2, testbase, None) for i in range(6): be = "be_%d" % i - _wait_for_sync(server1, server2, 'ou=%s,dc=test,dc=com'%be, None) + _wait_for_sync(server1, server2, 'ou=%s,dc=test,dc=com' % be, None) + def check_multi_group_mods(server1, server2, group1, group2, testbase): - # add members to group + # add members to group add_multi_member(server2, group1, 1, [1,2,3,4,5], testbase, sleep=False) add_multi_member(server1, group2, 1, [1,2,3,4,5], testbase, sleep=False) add_multi_member(server2, group1, 2, [3,4,5], testbase, sleep=False) @@ -242,12 +263,13 @@ def check_multi_group_mods(server1, server2, group1, group2, testbase): add_multi_member(server1, group2, 5, [2,0], testbase, sleep=False) add_multi_member(server2, group1, 6, [2,3,4], testbase, sleep=False) add_multi_member(server1, group2, 6, [2,3,4], testbase, sleep=False) - # check that replication is working - # for main backend and some member backends + # check that replication is working + # for main backend and some member backends _wait_for_sync(server1, server2, testbase, None) for i in range(6): be = "be_%d" % i - _wait_for_sync(server1, server2, 'ou=%s,dc=test,dc=com'%be, None) + _wait_for_sync(server1, server2, 'ou=%s,dc=test,dc=com' % be, None) + def test_ticket49287(topology_m2): """ @@ -260,7 +282,6 @@ def test_ticket49287(topology_m2): M1 = topology_m2.ms["master1"] M2 = topology_m2.ms["master2"] - config_memberof(M1) config_memberof(M2) @@ -290,7 +311,7 @@ def test_ticket49287(topology_m2): cn = 'a%d' % i add_user(M1, cn, peoplebase, 'add on m1', sleep=False) time.sleep(2) - add_group(M1,testbase, 1) + add_group(M1, testbase, 1) for i in range(10): cn = 'a%d' % i add_member(M1, 'g1', cn, testbase, sleep=False) @@ -304,37 +325,38 @@ def test_ticket49287(topology_m2): # test group with members in multiple backends for i in range(7): be = "be_%d" % i - _add_repl_backend(M1, M2, be, 300+i) + _add_repl_backend(M1, M2, be, 300 + i) # add entries akllowing meberof - for i in range(1,7): + for i in range(1, 7): be = "be_%d" % i for i in range(10): cn = 'a%d' % i - add_user(M1, cn, 'ou=%s,dc=test,dc=com'%be, 'add on m1', sleep=False) + add_user(M1, cn, 'ou=%s,dc=test,dc=com' % be, 'add on m1', sleep=False) # add entries not allowing memberof be = 'be_0' for i in range(10): cn = 'a%d' % i - add_person(M1, cn, 'ou=%s,dc=test,dc=com'%be, 'add on m1', sleep=False) + add_person(M1, cn, 'ou=%s,dc=test,dc=com' % be, 'add on m1', sleep=False) _disable_auto_oc_memberof(M1) _disable_auto_oc_memberof(M2) - add_group(M1,testbase, 2) + add_group(M1, testbase, 2) check_group_mods(M1, M2, 'g2', testbase) _enable_auto_oc_memberof(M1) - add_group(M1,testbase, 3) + add_group(M1, testbase, 3) check_group_mods(M1, M2, 'g3', testbase) _enable_auto_oc_memberof(M2) - add_group(M1,testbase, 4) + add_group(M1, testbase, 4) check_group_mods(M1, M2, 'g4', testbase) - add_group(M1,testbase, 5) - add_group(M1,testbase, 6) + add_group(M1, testbase, 5) + add_group(M1, testbase, 6) check_multi_group_mods(M1, M2, 'g5', 'g6', testbase) + if __name__ == '__main__': # Run isolated # -s for DEBUG mode
0
5f1dc41e280e3f17f5072f72a92830128ab41f5b
389ds/389-ds-base
Issue 1081 - Stop schema replication from overwriting x-origin Bug Description: During schema replication all attributes/objectclasses were rewritten as "user defined" on the consumer. This was happening because we treated all schema updates, regardless of the origin, as new "custom" schema. Fix Description: If a schema update is a replicated operation do not check/adjust x_origin value. relates: https://github.com/389ds/389-ds-base/issues/1081 Reviewed by: progier(Thanks!)
commit 5f1dc41e280e3f17f5072f72a92830128ab41f5b Author: Mark Reynolds <[email protected]> Date: Mon Mar 20 10:38:10 2023 -0400 Issue 1081 - Stop schema replication from overwriting x-origin Bug Description: During schema replication all attributes/objectclasses were rewritten as "user defined" on the consumer. This was happening because we treated all schema updates, regardless of the origin, as new "custom" schema. Fix Description: If a schema update is a replicated operation do not check/adjust x_origin value. relates: https://github.com/389ds/389-ds-base/issues/1081 Reviewed by: progier(Thanks!) diff --git a/dirsrvtests/tests/suites/replication/tombstone_fixup_test.py b/dirsrvtests/tests/suites/replication/tombstone_fixup_test.py index 0bb451fe0..280a431a5 100644 --- a/dirsrvtests/tests/suites/replication/tombstone_fixup_test.py +++ b/dirsrvtests/tests/suites/replication/tombstone_fixup_test.py @@ -59,8 +59,9 @@ def test_precise_tombstone_purging(topology_m1): args = {EXPORT_REPL_INFO: True, TASK_WAIT: True} m1_tasks.exportLDIF(DEFAULT_SUFFIX, None, ldif_file, args) + m1.restart() # harden test case - # Strip LDIF of nsTombstoneCSN, getthe LDIF lines, the n create new ldif + # Strip LDIF of nsTombstoneCSN, get the LDIF lines, then create new ldif ldif = open(ldif_file, "r") lines = ldif.readlines() ldif.close() diff --git a/dirsrvtests/tests/suites/schema/schema_replication_test.py b/dirsrvtests/tests/suites/schema/schema_replication_test.py index 626e56d81..0d083cb05 100644 --- a/dirsrvtests/tests/suites/schema/schema_replication_test.py +++ b/dirsrvtests/tests/suites/schema/schema_replication_test.py @@ -1,24 +1,24 @@ # --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2016 Red Hat, Inc. +# Copyright (C) 2023 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # -''' +""" Created on Nov 7, 2013 @author: tbordaz -''' +""" +import json import logging import re import time - import ldap import pytest from lib389 import Entry -from lib389._constants import * +from lib389._constants import DN_CONFIG, SUFFIX from lib389.topologies import topology_m1c1 from lib389.utils import * @@ -296,7 +296,19 @@ def test_schema_replication_two(topology_m1c1, schema_replication_init): # Check the error log of the supplier does not contain an error # This message may happen during the learning phase regex = re.compile(r"must not be overwritten \(set replication log for additional info\)") - res = pattern_errorlog(topology_m1c1.ms["supplier1"].errorlog_file, regex) + pattern_errorlog(topology_m1c1.ms["supplier1"].errorlog_file, regex) + + # Check that standard schema was not rewritten to be "user defined' on the consumer + cn_attrs = json.loads(topology_m1c1.cs["consumer1"].schema.query_attributetype("cn", json=True)) + cn_attr = cn_attrs['at'] + assert cn_attr['x_origin'][0].lower() != "user defined" + if len(cn_attr['x_origin']) > 1: + assert cn_attr['x_origin'][1].lower() != "user defined" + + # Check that the new OC "supplierNewOCB" was written to be "user defined' on the consumer + ocs = json.loads(topology_m1c1.cs["consumer1"].schema.query_objectclass("supplierNewOCB", json=True)) + new_oc = ocs['oc'] + assert new_oc['x_origin'][0].lower() == "user defined" @pytest.mark.ds47490 @@ -450,7 +462,7 @@ def test_schema_replication_five(topology_m1c1, schema_replication_init): # Check the schemaCSN was NOT updated on the consumer # with 47721, supplier learns the missing definition log.debug("test_schema_replication_five supplier_schema_csn=%s", supplier_schema_csn) - log.debug("ctest_schema_replication_five onsumer_schema_csn=%s", consumer_schema_csn) + log.debug("ctest_schema_replication_five consumer_schema_csn=%s", consumer_schema_csn) if support_schema_learning(topology_m1c1): assert supplier_schema_csn == consumer_schema_csn else: @@ -491,7 +503,7 @@ def test_schema_replication_six(topology_m1c1, schema_replication_init): +must=telexnumber +must=telexnumber - consumer +supplierNewOCA +supplierNewOCB +consumerNewOCA +supplierNewOCC +must=telexnumber +must=telexnumber - + Note: replication log is enabled to get more details """ _header(topology_m1c1, "Same OC - extra MUST: Schema is pushed - no error") diff --git a/ldap/servers/slapd/schema.c b/ldap/servers/slapd/schema.c index dc52bb19e..81743c78e 100644 --- a/ldap/servers/slapd/schema.c +++ b/ldap/servers/slapd/schema.c @@ -138,10 +138,10 @@ static int schema_delete_attributes(Slapi_Entry *entryBefore, char *errorbuf, size_t errorbufsize, int is_internal_operation); -static int schema_add_attribute(Slapi_PBlock *pb, LDAPMod *mod, char *errorbuf, size_t errorbufsize, int schema_ds4x_compat); -static int schema_add_objectclass(Slapi_PBlock *pb, LDAPMod *mod, char *errorbuf, size_t errorbufsize, int schema_ds4x_compat); -static int schema_replace_attributes(Slapi_PBlock *pb, LDAPMod *mod, char *errorbuf, size_t errorbufsize); -static int schema_replace_objectclasses(Slapi_PBlock *pb, LDAPMod *mod, char *errorbuf, size_t errorbufsize); +static int schema_add_attribute(Slapi_PBlock *pb, LDAPMod *mod, char *errorbuf, size_t errorbufsize, int schema_ds4x_compat, int is_replicated_operation); +static int schema_add_objectclass(Slapi_PBlock *pb, LDAPMod *mod, char *errorbuf, size_t errorbufsize, int schema_ds4x_compat, int is_replicated_operation); +static int schema_replace_attributes(Slapi_PBlock *pb, LDAPMod *mod, char *errorbuf, size_t errorbufsize, int is_replicated_operation); +static int schema_replace_objectclasses(Slapi_PBlock *pb, LDAPMod *mod, char *errorbuf, size_t errorbufsize, int is_replicated_operation); static int schema_check_name(char *name, PRBool isAttribute, char *errorbuf, size_t errorbufsize); static int schema_check_oid(const char *name, const char *oid, PRBool isAttribute, char *errorbuf, size_t errorbufsize); static int isExtensibleObjectclass(const char *objectclass); @@ -235,7 +235,8 @@ parse_at_str(const char *input, struct asyntaxinfo **asipp, char *errorbuf, size } static int -parse_oc_str(const char *input, struct objclass **oc, char *errorbuf, size_t errorbufsize, PRUint32 schema_flags, int is_user_defined, int schema_ds4x_compat, struct objclass *private_schema) +parse_oc_str(const char *input, struct objclass **oc, char *errorbuf, size_t errorbufsize, PRUint32 schema_flags, + int is_user_defined, int schema_ds4x_compat, struct objclass *private_schema) { if (oc) { *oc = NULL; @@ -2056,18 +2057,18 @@ modify_schema_dse(Slapi_PBlock *pb, Slapi_Entry *entryBefore, Slapi_Entry *entry } else { if (strcasecmp(mods[i]->mod_type, "attributetypes") == 0) { /* - * Replace all attributetypes - * It has already been checked that if it was a replicated schema - * it is a superset of the current schema. That is fine to apply the mods - */ - *returncode = schema_replace_attributes(pb, mods[i], returntext, SLAPI_DSE_RETURNTEXT_SIZE); + * Replace all attributetypes + * It has already been checked that if it was a replicated schema + * it is a superset of the current schema. That is fine to apply the mods + */ + *returncode = schema_replace_attributes(pb, mods[i], returntext, SLAPI_DSE_RETURNTEXT_SIZE, is_replicated_operation); } else if (strcasecmp(mods[i]->mod_type, "objectclasses") == 0) { /* - * Replace all objectclasses - * It has already been checked that if it was a replicated schema - * it is a superset of the current schema. That is fine to apply the mods - */ - *returncode = schema_replace_objectclasses(pb, mods[i], returntext, SLAPI_DSE_RETURNTEXT_SIZE); + * Replace all objectclasses + * It has already been checked that if it was a replicated schema + * it is a superset of the current schema. That is fine to apply the mods + */ + *returncode = schema_replace_objectclasses(pb, mods[i], returntext, SLAPI_DSE_RETURNTEXT_SIZE, is_replicated_operation); } else if (strcasecmp(mods[i]->mod_type, "nsschemacsn") == 0) { if (is_replicated_operation) { /* Update the schema CSN */ @@ -2100,23 +2101,26 @@ modify_schema_dse(Slapi_PBlock *pb, Slapi_Entry *entryBefore, Slapi_Entry *entry } } - /* - * Add an objectclass or attribute - */ + * Add an objectclass or attribute + */ else if (SLAPI_IS_MOD_ADD(mods[i]->mod_op)) { if (strcasecmp(mods[i]->mod_type, "attributetypes") == 0) { /* - * Add a new attribute - */ + * Add a new attribute + */ *returncode = schema_add_attribute(pb, mods[i], returntext, - SLAPI_DSE_RETURNTEXT_SIZE, schema_ds4x_compat); + SLAPI_DSE_RETURNTEXT_SIZE, + schema_ds4x_compat, + is_replicated_operation); } else if (strcasecmp(mods[i]->mod_type, "objectclasses") == 0) { /* - * Add a new objectclass - */ + * Add a new objectclass + */ *returncode = schema_add_objectclass(pb, mods[i], returntext, - SLAPI_DSE_RETURNTEXT_SIZE, schema_ds4x_compat); + SLAPI_DSE_RETURNTEXT_SIZE, + schema_ds4x_compat, + is_replicated_operation); } else { if (schema_ds4x_compat) { *returncode = LDAP_NO_SUCH_ATTRIBUTE; @@ -2566,7 +2570,7 @@ schema_delete_attributes(Slapi_Entry *entryBefore __attribute__((unused)), LDAPM } static int -schema_add_attribute(Slapi_PBlock *pb, LDAPMod *mod, char *errorbuf, size_t errorbufsize, int schema_ds4x_compat) +schema_add_attribute(Slapi_PBlock *pb, LDAPMod *mod, char *errorbuf, size_t errorbufsize, int schema_ds4x_compat, int is_replicated_operation) { int i; char *attr_ldif; @@ -2585,7 +2589,7 @@ schema_add_attribute(Slapi_PBlock *pb, LDAPMod *mod, char *errorbuf, size_t erro attr_ldif = (char *)mod->mod_bvalues[i]->bv_val; status = parse_at_str(attr_ldif, NULL, errorbuf, errorbufsize, - nolock, 1 /* user defined */, schema_ds4x_compat, 1); + nolock, is_replicated_operation ? 0 : 1 /* user defined */, schema_ds4x_compat, 1); if (LDAP_SUCCESS != status) { break; /* stop on first error */ } @@ -2763,7 +2767,7 @@ add_oc_internal(struct objclass *pnew_oc, char *errorbuf, size_t errorbufsize, i * Note that replace was not supported at all before iDS 5.0. */ static int -schema_replace_attributes(Slapi_PBlock *pb, LDAPMod *mod, char *errorbuf, size_t errorbufsize) +schema_replace_attributes(Slapi_PBlock *pb, LDAPMod *mod, char *errorbuf, size_t errorbufsize, int is_replicated_operation) { int i, rc = LDAP_SUCCESS; struct asyntaxinfo *newasip, *oldasip; @@ -2783,7 +2787,10 @@ schema_replace_attributes(Slapi_PBlock *pb, LDAPMod *mod, char *errorbuf, size_t for (i = 0; mod->mod_bvalues[i] != NULL; ++i) { if (LDAP_SUCCESS != (rc = parse_at_str(mod->mod_bvalues[i]->bv_val, - &newasip, errorbuf, errorbufsize, 0, 1, 0, 0))) { + &newasip, errorbuf, errorbufsize, 0, + is_replicated_operation ? 0 : 1, + 0, 0))) + { goto clean_up_and_return; } @@ -2851,7 +2858,7 @@ clean_up_and_return: static int -schema_add_objectclass(Slapi_PBlock *pb, LDAPMod *mod, char *errorbuf, size_t errorbufsize, int schema_ds4x_compat) +schema_add_objectclass(Slapi_PBlock *pb, LDAPMod *mod, char *errorbuf, size_t errorbufsize, int schema_ds4x_compat, int is_replicated_operation) { struct objclass *pnew_oc = NULL; char *newoc_ldif; @@ -2860,8 +2867,10 @@ schema_add_objectclass(Slapi_PBlock *pb, LDAPMod *mod, char *errorbuf, size_t er for (j = 0; mod->mod_bvalues[j]; j++) { newoc_ldif = (char *)mod->mod_bvalues[j]->bv_val; if (LDAP_SUCCESS != (rc = parse_oc_str(newoc_ldif, &pnew_oc, - errorbuf, errorbufsize, 0, 1 /* user defined */, - schema_ds4x_compat, NULL))) { + errorbuf, errorbufsize, 0, + is_replicated_operation ? 0 : 1 /* user defined */, + schema_ds4x_compat, NULL))) + { oc_free(&pnew_oc); return rc; } @@ -2916,7 +2925,7 @@ schema_add_objectclass(Slapi_PBlock *pb, LDAPMod *mod, char *errorbuf, size_t er */ static int -schema_replace_objectclasses(Slapi_PBlock *pb, LDAPMod *mod, char *errorbuf, size_t errorbufsize) +schema_replace_objectclasses(Slapi_PBlock *pb, LDAPMod *mod, char *errorbuf, size_t errorbufsize, int is_replicated_operation) { struct objclass *newocp, *curlisthead, *prevocp, *tmpocp; struct objclass *newlisthead = NULL, *newlistend = NULL; @@ -2937,7 +2946,8 @@ schema_replace_objectclasses(Slapi_PBlock *pb, LDAPMod *mod, char *errorbuf, siz if (LDAP_SUCCESS != (rc = parse_oc_str(mod->mod_bvalues[i]->bv_val, &newocp, errorbuf, errorbufsize, DSE_SCHEMA_NO_GLOCK, - 1 /* user defined */, 0 /* no DS 4.x compat issues */, NULL))) { + is_replicated_operation ? 0 : 1 /* user defined */, + 0 /* no DS 4.x compat issues */, NULL))) { rc = LDAP_INVALID_SYNTAX; goto clean_up_and_return; } @@ -3127,7 +3137,8 @@ oc_free(struct objclass **ocp) * returns an LDAP error code (LDAP_SUCCESS if all goes well) */ static int -parse_attr_str(const char *input, struct asyntaxinfo **asipp, char *errorbuf, size_t errorbufsize, PRUint32 schema_flags, int is_user_defined, int schema_ds4x_compat, int is_remote __attribute__((unused))) +parse_attr_str(const char *input, struct asyntaxinfo **asipp, char *errorbuf, size_t errorbufsize, + PRUint32 schema_flags, int is_user_defined, int schema_ds4x_compat, int is_remote __attribute__((unused))) { struct asyntaxinfo *tmpasip; struct asyntaxinfo *tmpasi;
0
432badcd7891b3dad6a0156a52980e882d4967e3
389ds/389-ds-base
Bug(s) fixed: 151678 Bug Description: new instance creation creates error aci (2 types in RDN) Reviewed by: Nathan (Thanks!) Fix Description: This only seems to occur when logging in to the console as a user other than the Console Admin user (e.g. as Directory Manager in my tests). We need the Console Admin DN or user id to construct the ACIs. This value is held in the suitespot3x_uid form parameter. I had removed it while working on this bug or a related bug earlier, but now that I've added it back, everything seems to be working again. Platforms tested: RHEL4 Flag Day: no Doc impact: no QA impact: should be covered by regular nightly and manual testing New Tests integrated into TET: none
commit 432badcd7891b3dad6a0156a52980e882d4967e3 Author: Rich Megginson <[email protected]> Date: Fri Aug 26 19:41:40 2005 +0000 Bug(s) fixed: 151678 Bug Description: new instance creation creates error aci (2 types in RDN) Reviewed by: Nathan (Thanks!) Fix Description: This only seems to occur when logging in to the console as a user other than the Console Admin user (e.g. as Directory Manager in my tests). We need the Console Admin DN or user id to construct the ACIs. This value is held in the suitespot3x_uid form parameter. I had removed it while working on this bug or a related bug earlier, but now that I've added it back, everything seems to be working again. Platforms tested: RHEL4 Flag Day: no Doc impact: no QA impact: should be covered by regular nightly and manual testing New Tests integrated into TET: none diff --git a/ldap/admin/src/create_instance.c b/ldap/admin/src/create_instance.c index 7e737f5c2..aa08733fe 100644 --- a/ldap/admin/src/create_instance.c +++ b/ldap/admin/src/create_instance.c @@ -4672,6 +4672,7 @@ int parse_form(server_config_s *cf) return 1; } + cf->suitespot3x_uid = ds_a_get_cgi_var("suitespot3x_uid", NULL, NULL); cf->cfg_sspt = ds_a_get_cgi_var("cfg_sspt", NULL, NULL); cf->cfg_sspt_uid = ds_a_get_cgi_var("cfg_sspt_uid", NULL, NULL); if (cf->cfg_sspt_uid && *(cf->cfg_sspt_uid) &&
0
02d7b19be95764255f5d948aa5eebf4af49c4ed9
389ds/389-ds-base
Ticket 48313 - MEP suite tests for major functionality https://fedorahosted.org/389/ticket/48313 http://directory.fedoraproject.org/docs/389ds/design/mep-rework.html http://www.port389.org/docs/389ds/design/managed-entry-design.html Bug Description: The managed entries plugin works well for the IPA use case, but has a number of shortcomings when used with existing objects. Before the rewrite as described can be carried out, a complete functional test suite of MEP is required to validate the changes made to the plugin do not break existing use cases. Fix Description: This patch provides tests that cover the current states MEP is capable of handling. This does not cover the states that will be covered by the rework of the plugin. Author: wibrown Reviewed by: spichugi (Thank you!)
commit 02d7b19be95764255f5d948aa5eebf4af49c4ed9 Author: William Brown <[email protected]> Date: Wed Oct 28 09:31:06 2015 +1000 Ticket 48313 - MEP suite tests for major functionality https://fedorahosted.org/389/ticket/48313 http://directory.fedoraproject.org/docs/389ds/design/mep-rework.html http://www.port389.org/docs/389ds/design/managed-entry-design.html Bug Description: The managed entries plugin works well for the IPA use case, but has a number of shortcomings when used with existing objects. Before the rewrite as described can be carried out, a complete functional test suite of MEP is required to validate the changes made to the plugin do not break existing use cases. Fix Description: This patch provides tests that cover the current states MEP is capable of handling. This does not cover the states that will be covered by the rework of the plugin. Author: wibrown Reviewed by: spichugi (Thank you!) diff --git a/dirsrvtests/suites/mep_plugin/mep_test.py b/dirsrvtests/suites/mep_plugin/mep_test.py deleted file mode 100644 index 2bda08d6f..000000000 --- a/dirsrvtests/suites/mep_plugin/mep_test.py +++ /dev/null @@ -1,93 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# -import os -import sys -import time -import ldap -import logging -import pytest -from lib389 import DirSrv, Entry, tools, tasks -from lib389.tools import DirSrvTools -from lib389._constants import * -from lib389.properties import * -from lib389.tasks import * -from lib389.utils import * - -logging.getLogger(__name__).setLevel(logging.DEBUG) -log = logging.getLogger(__name__) - -installation1_prefix = None - - -class TopologyStandalone(object): - def __init__(self, standalone): - standalone.open() - self.standalone = standalone - - [email protected](scope="module") -def topology(request): - global installation1_prefix - if installation1_prefix: - args_instance[SER_DEPLOYED_DIR] = installation1_prefix - - # Creating standalone instance ... - standalone = DirSrv(verbose=False) - args_instance[SER_HOST] = HOST_STANDALONE - args_instance[SER_PORT] = PORT_STANDALONE - args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE - args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX - args_standalone = args_instance.copy() - standalone.allocate(args_standalone) - instance_standalone = standalone.exists() - if instance_standalone: - standalone.delete() - standalone.create() - standalone.open() - - # Clear out the tmp dir - standalone.clearTmpDir(__file__) - - return TopologyStandalone(standalone) - - -def test_mep_init(topology): - ''' - Write any test suite initialization here(if needed) - ''' - - return - - -def test_mep_(topology): - ''' - Write a single test here... - ''' - - return - - -def test_mep_final(topology): - topology.standalone.delete() - log.info('mep test suite PASSED') - - -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_mep_init(topo) - test_mep_(topo) - test_mep_final(topo) - - -if __name__ == '__main__': - run_isolated() -
0
6356a2ad871758bedbac50f44940a187adde3497
389ds/389-ds-base
Move NSS/SSL initialization after the setuid so that key/cert/other nss related files are owned by the correct user, but make that happen before the detach so we can ask for the pin on the terminal.
commit 6356a2ad871758bedbac50f44940a187adde3497 Author: Rich Megginson <[email protected]> Date: Fri Nov 18 21:09:46 2005 +0000 Move NSS/SSL initialization after the setuid so that key/cert/other nss related files are owned by the correct user, but make that happen before the detach so we can ask for the pin on the terminal. diff --git a/ldap/servers/slapd/main.c b/ldap/servers/slapd/main.c index 9338c9e3f..72219c5ea 100644 --- a/ldap/servers/slapd/main.c +++ b/ldap/servers/slapd/main.c @@ -645,7 +645,7 @@ main( int argc, char **argv) { int return_value = 0; slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); - daemon_ports_t arg = {0}; + daemon_ports_t ports_info = {0}; Slapi_Backend *be = NULL; int init_ssl; #ifndef __LP64__ @@ -881,54 +881,6 @@ main( int argc, char **argv) exit( 1 ); #endif - /* - * Detach ourselves from the terminal (unless running in debug mode). - * We must detach before we start any threads since detach forks() on - * UNIX. - */ - detach(); - - /* - * Now write our PID to the startup PID file. - * This is used by the start up script to determine our PID quickly - * after we fork, without needing to wait for the 'real' pid file to be - * written. That could take minutes. And the start script will wait - * that long looking for it. With this new 'early pid' file, it can avoid - * doing that, by detecting the pid and watching for the process exiting. - * This removes the blank stares all round from start-slapd when the server - * fails to start for some reason - */ - write_start_pid_file(); - - /* Make sure we aren't going to run slapd in - * a mode that is going to conflict with other - * slapd processes that are currently running - */ - if ((slapd_exemode != SLAPD_EXEMODE_REFERRAL) && - ( add_new_slapd_process(slapd_exemode, db2ldif_dump_replica, - skip_db_protect_check) == -1 )) { - LDAPDebug( LDAP_DEBUG_ANY, - "Shutting down due to possible conflicts with other slapd processes\n", - 0, 0, 0 ); - exit(1); - } - - - /* - * Now it is safe to log our first startup message. If we were to - * log anything earlier than now it would appear on the admin startup - * screen twice because before we detach everything is sent to both - * stderr and our error log. Yuck. - */ - if (1) { - char *versionstring = config_get_versionstring(); - char *buildnum = config_get_buildnum(); - LDAPDebug( LDAP_DEBUG_ANY, "%s B%s starting up\n", - versionstring, buildnum, 0 ); - slapi_ch_free((void **)&buildnum); - slapi_ch_free((void **)&versionstring); - } - /* * After we read the config file we should make * sure that everything we needed to read in has @@ -946,19 +898,19 @@ main( int argc, char **argv) */ { - arg.n_port = (unsigned short)n_port; + ports_info.n_port = (unsigned short)n_port; if ( slapd_listenhost2addr( config_get_listenhost(), - &arg.n_listenaddr ) != 0 ) { + &ports_info.n_listenaddr ) != 0 ) { return(1); } - arg.s_port = (unsigned short)s_port; + ports_info.s_port = (unsigned short)s_port; if ( slapd_listenhost2addr( config_get_securelistenhost(), - &arg.s_listenaddr ) != 0 ) { + &ports_info.s_listenaddr ) != 0 ) { return(1); } - return_value = daemon_pre_setuid_init(&arg); + return_value = daemon_pre_setuid_init(&ports_info); if (0 != return_value) { LDAPDebug( LDAP_DEBUG_ANY, "Failed to init daemon\n", 0, 0, 0 ); @@ -1006,6 +958,62 @@ main( int argc, char **argv) exit( 1 ); } + if ( init_ssl && ( 0 != slapd_ssl_init2(&ports_info.s_socket, 0) ) ) { + LDAPDebug(LDAP_DEBUG_ANY, + "ERROR: SSL Initialization phase 2 Failed.\n", 0, 0, 0 ); + exit( 1 ); + } + + /* + * Detach ourselves from the terminal (unless running in debug mode). + * We must detach before we start any threads since detach forks() on + * UNIX. + * Have to detach after ssl_init - the user may be prompted for the PIN + * on the terminal, so it must be open. + */ + detach(); + + /* + * Now write our PID to the startup PID file. + * This is used by the start up script to determine our PID quickly + * after we fork, without needing to wait for the 'real' pid file to be + * written. That could take minutes. And the start script will wait + * that long looking for it. With this new 'early pid' file, it can avoid + * doing that, by detecting the pid and watching for the process exiting. + * This removes the blank stares all round from start-slapd when the server + * fails to start for some reason + */ + write_start_pid_file(); + + /* Make sure we aren't going to run slapd in + * a mode that is going to conflict with other + * slapd processes that are currently running + */ + if ((slapd_exemode != SLAPD_EXEMODE_REFERRAL) && + ( add_new_slapd_process(slapd_exemode, db2ldif_dump_replica, + skip_db_protect_check) == -1 )) { + LDAPDebug( LDAP_DEBUG_ANY, + "Shutting down due to possible conflicts with other slapd processes\n", + 0, 0, 0 ); + exit(1); + } + + + /* + * Now it is safe to log our first startup message. If we were to + * log anything earlier than now it would appear on the admin startup + * screen twice because before we detach everything is sent to both + * stderr and our error log. Yuck. + */ + if (1) { + char *versionstring = config_get_versionstring(); + char *buildnum = config_get_buildnum(); + LDAPDebug( LDAP_DEBUG_ANY, "%s B%s starting up\n", + versionstring, buildnum, 0 ); + slapi_ch_free((void **)&buildnum); + slapi_ch_free((void **)&versionstring); + } + /* -sduloutre: compute_init() and entry_computed_attr_init() moved up */ if (slapd_exemode != SLAPD_EXEMODE_REFERRAL) { @@ -1143,7 +1151,7 @@ main( int argc, char **argv) { time( &starttime ); - slapd_daemon(&arg); + slapd_daemon(&ports_info); } LDAPDebug( LDAP_DEBUG_ANY, "slapd stopped.\n", 0, 0, 0 ); reslimit_cleanup();
0
d78de3617b6d6aa3928e3a88b2cba83fec4eaaab
389ds/389-ds-base
590931 - rhds81 import - hardcoded pages_limit for nsslapd-import-cache-autosize Fix Description: 1. Got rid of the old hardcoded limit 200MB. 2. Introduced the memory hard limit and soft limit. Standalone command line import ldif2db behaves as follows: If import cache autosize is enabled: nsslapd-import-cache-autosize: -1 or 1 ~ 99 (if the value is greater than or equal to 100, it's reset to 50 with a warning.) the import cache size is calculated as nsslapd-import-cache-autosize * pages / 125 (./125 instead of ./100 is for adjusting the BDB overhead.) If import cache is disabled: nsslapd-import-cache-autosize: 0 get the nsslapd-import-cachesize. Calculate the memory size left after allocating the import cache size. If the size is less than the hard limit, it issues an error and quit. If the size is greater than the hard limit and less than the soft limit, it issues a warning, but continues the import task. Note: this function is called only if the import is executed as a stand alone command line (ldif2db).
commit d78de3617b6d6aa3928e3a88b2cba83fec4eaaab Author: Noriko Hosoi <[email protected]> Date: Wed May 12 15:48:42 2010 -0700 590931 - rhds81 import - hardcoded pages_limit for nsslapd-import-cache-autosize Fix Description: 1. Got rid of the old hardcoded limit 200MB. 2. Introduced the memory hard limit and soft limit. Standalone command line import ldif2db behaves as follows: If import cache autosize is enabled: nsslapd-import-cache-autosize: -1 or 1 ~ 99 (if the value is greater than or equal to 100, it's reset to 50 with a warning.) the import cache size is calculated as nsslapd-import-cache-autosize * pages / 125 (./125 instead of ./100 is for adjusting the BDB overhead.) If import cache is disabled: nsslapd-import-cache-autosize: 0 get the nsslapd-import-cachesize. Calculate the memory size left after allocating the import cache size. If the size is less than the hard limit, it issues an error and quit. If the size is greater than the hard limit and less than the soft limit, it issues a warning, but continues the import task. Note: this function is called only if the import is executed as a stand alone command line (ldif2db). diff --git a/ldap/servers/slapd/back-ldbm/dblayer.c b/ldap/servers/slapd/back-ldbm/dblayer.c index b3d61db6d..e744321e7 100644 --- a/ldap/servers/slapd/back-ldbm/dblayer.c +++ b/ldap/servers/slapd/back-ldbm/dblayer.c @@ -869,7 +869,7 @@ void dblayer_sys_pages(size_t *pagesize, size_t *pages, size_t *procpages, size_ #ifdef OS_solaris *pagesize = (int)sysconf(_SC_PAGESIZE); *pages = (int)sysconf(_SC_PHYS_PAGES); - *availpages = dblayer_getvirtualmemsize() / *pagesize; + *availpages = dblayer_getvirtualmemsize() / *pagesize; /* solaris has THE most annoying way to get this info */ if (procpages) { struct prpsinfo psi; @@ -1757,59 +1757,105 @@ int dblayer_start(struct ldbminfo *li, int dbmode) return 0; } -void -autosize_import_cache(struct ldbminfo *li) +/* + * If import cache autosize is enabled: + * nsslapd-import-cache-autosize: -1 or 1 ~ 99 + * calculate the import cache size. + * If import cache is disabled: + * nsslapd-import-cache-autosize: 0 + * get the nsslapd-import-cachesize. + * Calculate the memory size left after allocating the import cache size. + * If the size is less than the hard limit, it issues an error and quit. + * If the size is greater than the hard limit and less than the soft limit, + * it issues a warning, but continues the import task. + * + * Note: this function is called only if the import is executed as a stand + * alone command line (ldif2db). + */ +int +check_and_set_import_cache(struct ldbminfo *li) { + size_t import_pages = 0; + size_t pagesize, pages, procpages, availpages; + size_t soft_limit = 0; + size_t hard_limit = 0; + size_t page_delta = 0; + char s[64]; /* big enough to hold %ld */ + + dblayer_sys_pages(&pagesize, &pages, &procpages, &availpages); + if (0 == pagesize || 0 == pages) { + LDAPDebug2Args(LDAP_DEBUG_ANY, "check_and_set_import_cache: " + "Failed to get pagesize: %ld or pages: %ld\n", + pagesize, pages); + return ENOENT; + } + LDAPDebug(LDAP_DEBUG_ANY, "check_and_set_import_cache: " + "pagesize: %ld, pages: %ld, procpages: %ld\n", + pagesize, pages, procpages); + + /* Soft limit: pages equivalent to 1GB (defined in dblayer.h) */ + soft_limit = (DBLAYER_IMPORTCACHESIZE_SL*1024) / (pagesize/1024); + /* Hard limit: pages equivalent to 100MB (defined in dblayer.h) */ + hard_limit = (DBLAYER_IMPORTCACHESIZE_HL*1024) / (pagesize/1024); /* * default behavior for ldif2db import cache, * nsslapd-import-cache-autosize==-1, * autosize 50% mem to import cache */ - if (li->li_import_cache_autosize == -1) { + if (li->li_import_cache_autosize < 0) { li->li_import_cache_autosize = 50; } /* sanity check */ - if (li->li_import_cache_autosize > 100) { - LDAPDebug(LDAP_DEBUG_ANY, - "cache autosizing: bad setting, " - "import cache autosizing value should not be larger than 100(%).\n" - "set: 100(%).\n", NULL, NULL, NULL); - li->li_import_cache_autosize = 100; + if (li->li_import_cache_autosize >= 100) { + LDAPDebug0Args(LDAP_DEBUG_ANY, + "check_and_set_import_cache: " + "import cache autosizing value " + "(nsslapd-import-cache-autosize) should not be " + "greater than or equal to 100(%). Reset to 50(%).\n"); + li->li_import_cache_autosize = 50; } - /* autosizing importCache */ - if (li->li_import_cache_autosize > 0) { - size_t pagesize, pages, procpages, availpages; + if (li->li_import_cache_autosize == 0) { + /* user specified importCache */ + import_pages = li->li_import_cachesize / pagesize; - dblayer_sys_pages(&pagesize, &pages, &procpages, &availpages); - LDAPDebug(LDAP_DEBUG_ANY, "autosize_import_cache: " - "pagesize: %d, pages: %d, procpages: %d\n", - pagesize, pages, procpages); - if (pagesize) { - char s[32]; /* big enough to hold %ld */ - int import_pages; - int pages_limit = (200 * 1024) / (pagesize/1024); - import_pages = (li->li_import_cache_autosize * pages) / 125; - /* We don't want to go wild with memory when auto-sizing, cap the - * cache size at 200 Megs to try to avoid situations where we - * attempt to allocate more memory than there is free page pool for, or - * where there's some system limit on the size of process memory - */ - if (import_pages > pages_limit) { - import_pages = pages_limit; - } - LDAPDebug(LDAP_DEBUG_ANY, "cache autosizing: import cache: %dk \n", - import_pages*(pagesize/1024), NULL, NULL); - LDAPDebug(LDAP_DEBUG_ANY, - "li_import_cache_autosize: %d, import_pages: %d, pagesize: %d\n", - li->li_import_cache_autosize, import_pages, - pagesize); + } else { + /* autosizing importCache */ + /* ./125 instead of ./100 is for adjusting the BDB overhead. */ + import_pages = (li->li_import_cache_autosize * pages) / 125; + } - sprintf(s, "%lu", (unsigned long)(import_pages * pagesize)); - ldbm_config_internal_set(li, CONFIG_IMPORT_CACHESIZE, s); - } + page_delta = pages - import_pages; + if (page_delta < hard_limit) { + LDAPDebug(LDAP_DEBUG_ANY, + "After allocating import cache %ldKB, " + "the available memory is %ldKB, " + "which is less than the hard limit %ldKB. " + "Please decrease the import cache size and rerun import.\n", + import_pages*(pagesize/1024), page_delta*(pagesize/1024), + hard_limit*(pagesize/1024)); + return ENOMEM; + } + if (page_delta < soft_limit) { + LDAPDebug(LDAP_DEBUG_ANY, + "WARNING: After allocating import cache %ldKB, " + "the available memory is %ldKB, " + "which is less than the soft limit %ldKB. " + "You may want to decrease the import cache size and " + "rerun import.\n", + import_pages*(pagesize/1024), page_delta*(pagesize/1024), + soft_limit*(pagesize/1024)); + } + + LDAPDebug1Arg(LDAP_DEBUG_ANY, "Import allocates %ldKB import cache.\n", + import_pages*(pagesize/1024)); + if (li->li_import_cache_autosize > 0) { /* import cache autosizing */ + /* set the calculated import cache size to the config */ + sprintf(s, "%lu", (unsigned long)(import_pages * pagesize)); + ldbm_config_internal_set(li, CONFIG_IMPORT_CACHESIZE, s); } + return 0; } /* mode is one of @@ -2011,15 +2057,13 @@ int dblayer_instance_start(backend *be, int mode) oflags |= DB_PRIVATE; } PR_Lock(li->li_config_mutex); - if ((li->li_flags & SLAPI_TASK_RUNNING_FROM_COMMANDLINE) && - (li->li_import_cache_autosize)) /* Autosizing importCache - * Need to re-eval every time - * to guarantee the memory is - * really available - * (just for command line I/F) - */ - { - autosize_import_cache(li); + /* import cache checking and autosizing is available only + * for the command line */ + if (li->li_flags & SLAPI_TASK_RUNNING_FROM_COMMANDLINE) { + return_value = check_and_set_import_cache(li); + if (return_value) { + goto out; + } } cachesize = li->li_import_cachesize; PR_Unlock(li->li_config_mutex); diff --git a/ldap/servers/slapd/back-ldbm/dblayer.h b/ldap/servers/slapd/back-ldbm/dblayer.h index c30fe434f..d0d05b4a1 100644 --- a/ldap/servers/slapd/back-ldbm/dblayer.h +++ b/ldap/servers/slapd/back-ldbm/dblayer.h @@ -97,6 +97,14 @@ #define DB_REGION_NAME 25 /* DB: named regions, no backing file. */ #endif +/* Used in check_and_set_import_cache */ +/* After allocating the import cache, free memory must be left more than + * the hard limit to run import. */ +/* If the free memory size left is greater than hard limit and less than + * soft limit, the import utility issues a warning, but it runs */ +#define DBLAYER_IMPORTCACHESIZE_HL 100 /* import cache hard limit 100MB */ +#define DBLAYER_IMPORTCACHESIZE_SL 1024 /* import cache soft limit 1GB */ + struct dblayer_private_env { DB_ENV *dblayer_DB_ENV; PRRWLock * dblayer_env_lock; diff --git a/ldap/servers/slapd/back-ldbm/ldif2ldbm.c b/ldap/servers/slapd/back-ldbm/ldif2ldbm.c index 60b72db26..76404d5a8 100644 --- a/ldap/servers/slapd/back-ldbm/ldif2ldbm.c +++ b/ldap/servers/slapd/back-ldbm/ldif2ldbm.c @@ -632,7 +632,6 @@ int ldbm_back_ldif2ldbm( Slapi_PBlock *pb ) li->li_flags |= SLAPI_TASK_RUNNING_FROM_COMMANDLINE; ldbm_config_load_dse_info(li); - autosize_import_cache(li); } /* Find the instance that the ldif2db will be done on. */ @@ -2484,7 +2483,9 @@ int ldbm_back_upgradedb(Slapi_PBlock *pb) { ldbm_config_load_dse_info(li); } - autosize_import_cache(li); + if (check_and_set_import_cache(li) < 0) { + return -1; + } } else { diff --git a/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h b/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h index f12d41d53..41fd54cf2 100644 --- a/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h +++ b/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h @@ -164,7 +164,7 @@ void dblayer_set_recovery_required(struct ldbminfo *li); char *dblayer_get_home_dir(struct ldbminfo *li, int *dbhome); char *dblayer_get_full_inst_dir(struct ldbminfo *li, ldbm_instance *inst, char *buf, int buflen); -void autosize_import_cache(struct ldbminfo *li); +int check_and_set_import_cache(struct ldbminfo *li); int dblayer_db_uses_locking(DB_ENV *db_env); int dblayer_db_uses_transactions(DB_ENV *db_env);
0
e99c735d37b2cec59d46bd270f661b13385cfcdd
389ds/389-ds-base
Bug 690584 - #10690 #10689 attrcrypt_get_ssl_cert_name() - fix coverity resource leak issues https://bugzilla.redhat.com/show_bug.cgi?id=690584 Resolves: bug 690584 Bug Description: #10690 #10689 attrcrypt_get_ssl_cert_name() - fix coverity resource leak issues Reviewed by: nkinder (Thanks!) Branch: master Fix Description: always free token and personality at the end of the function - set personality to NULL if the memory was passed off to cert_name. Platforms tested: RHEL6 x86_64 Flag Day: no Doc impact: no
commit e99c735d37b2cec59d46bd270f661b13385cfcdd Author: Rich Megginson <[email protected]> Date: Fri Mar 25 08:27:52 2011 -0600 Bug 690584 - #10690 #10689 attrcrypt_get_ssl_cert_name() - fix coverity resource leak issues https://bugzilla.redhat.com/show_bug.cgi?id=690584 Resolves: bug 690584 Bug Description: #10690 #10689 attrcrypt_get_ssl_cert_name() - fix coverity resource leak issues Reviewed by: nkinder (Thanks!) Branch: master Fix Description: always free token and personality at the end of the function - set personality to NULL if the memory was passed off to cert_name. Platforms tested: RHEL6 x86_64 Flag Day: no Doc impact: no diff --git a/ldap/servers/slapd/back-ldbm/ldbm_attrcrypt.c b/ldap/servers/slapd/back-ldbm/ldbm_attrcrypt.c index f700434a0..f0ef6923c 100644 --- a/ldap/servers/slapd/back-ldbm/ldbm_attrcrypt.c +++ b/ldap/servers/slapd/back-ldbm/ldbm_attrcrypt.c @@ -161,13 +161,14 @@ attrcrypt_get_ssl_cert_name(char **cert_name) if(!strcasecmp(token, "internal") || !strcasecmp(token, "internal (software)")) { *cert_name = personality; + personality = NULL; /* do not free below */ } else { /* external PKCS #11 token - attach token name */ *cert_name = slapi_ch_smprintf("%s:%s", token, personality); - slapi_ch_free_string(&personality); } - slapi_ch_free_string(&token); } + slapi_ch_free_string(&personality); + slapi_ch_free_string(&token); freeConfigEntry(&config_entry); return 0; }
0
063b36792ed883836ba9fdec7a578038992174ea
389ds/389-ds-base
Ticket 49248 - update eduPerson to 201602 Bug Description: Update the eduPerson schema (commonly used by high education institutes) to match the 201602 release found: http://software.internet2.edu/eduperson/internet2-mace-dir-eduperson-201602.html Fix Description: Update the schema, and add a test asserting the values work as expected. https://pagure.io/389-ds-base/issue/49248 Author: wibrown Review by: mreynolds (Thanks!)
commit 063b36792ed883836ba9fdec7a578038992174ea Author: William Brown <[email protected]> Date: Fri May 12 14:40:31 2017 +1000 Ticket 49248 - update eduPerson to 201602 Bug Description: Update the eduPerson schema (commonly used by high education institutes) to match the 201602 release found: http://software.internet2.edu/eduperson/internet2-mace-dir-eduperson-201602.html Fix Description: Update the schema, and add a test asserting the values work as expected. https://pagure.io/389-ds-base/issue/49248 Author: wibrown Review by: mreynolds (Thanks!) diff --git a/dirsrvtests/tests/suites/schema/test_eduperson.py b/dirsrvtests/tests/suites/schema/test_eduperson.py new file mode 100644 index 000000000..0271b52e1 --- /dev/null +++ b/dirsrvtests/tests/suites/schema/test_eduperson.py @@ -0,0 +1,74 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2017 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# + + +import os +import logging +import pytest +import ldap + +from lib389.idm.user import UserAccounts +from lib389.topologies import topology_st as topology +from lib389._constants import DEFAULT_SUFFIX + +DEBUGGING = os.getenv('DEBUGGING', False) + +if DEBUGGING is not False: + DEBUGGING = True + +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) + +log = logging.getLogger(__name__) + + +def test_account_locking(topology): + """ + Test the eduperson schema works + """ + if DEBUGGING: + # Add debugging steps(if any)... + pass + + users = UserAccounts(topology.standalone, DEFAULT_SUFFIX) + + user_properties = { + 'uid': 'testuser', + 'cn' : 'testuser', + 'sn' : 'user', + 'uidNumber' : '1000', + 'gidNumber' : '2000', + 'homeDirectory' : '/home/testuser', + } + testuser = users.create(properties=user_properties) + + # Extend the user with eduPerson + testuser.add('objectClass', 'eduPerson') + + # now add eduPerson attrs + testuser.add('eduPersonAffiliation', 'value') # From 2002 + testuser.add('eduPersonNickName', 'value') # From 2002 + testuser.add('eduPersonOrgDN', 'ou=People,%s' % DEFAULT_SUFFIX) # From 2002 + testuser.add('eduPersonOrgUnitDN', 'ou=People,%s' % DEFAULT_SUFFIX) # From 2002 + testuser.add('eduPersonPrimaryAffiliation', 'value') # From 2002 + testuser.add('eduPersonPrincipalName', 'value') # From 2002 + testuser.add('eduPersonEntitlement', 'value') # From 2002 + testuser.add('eduPersonPrimaryOrgUnitDN', 'ou=People,%s' % DEFAULT_SUFFIX) # From 2002 + testuser.add('eduPersonScopedAffiliation', 'value') # From 2003 + testuser.add('eduPersonTargetedID', 'value') # From 2003 + testuser.add('eduPersonAssurance', 'value') # From 2008 + testuser.add('eduPersonPrincipalNamePrior', 'value') # From 2012 + testuser.add('eduPersonUniqueId', 'value') # From 2013 + testuser.add('eduPersonOrcid', 'value') # From 2016 + + log.info('Test PASSED') + + diff --git a/ldap/schema/60eduperson.ldif b/ldap/schema/60eduperson.ldif index a0ba3b7df..dacbc990a 100644 --- a/ldap/schema/60eduperson.ldif +++ b/ldap/schema/60eduperson.ldif @@ -1,4 +1,6 @@ # 60eduperson.ldif - See http://middleware.internet2.edu/eduperson/ +# This is the 201602 version of the eduperson schema. +# http://software.internet2.edu/eduperson/internet2-mace-dir-eduperson-201602.html ################################################################################ # dn: cn=schema @@ -83,6 +85,7 @@ attributeTypes: ( NAME 'eduPersonPrimaryOrgUnitDN' DESC 'Primary Organizational Unit' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 + SINGLE-VALUE X-ORIGIN 'http://middleware.internet2.edu/eduperson/' ) # @@ -98,11 +101,67 @@ attributeTypes: ( # ################################################################################ # +attributeTypes:( + 1.3.6.1.4.1.5923.1.1.1.10 + NAME 'eduPersonTargetedID' + DESC 'eduPerson per Internet2 and EDUCAUSE' + EQUALITY caseExactMatch + SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 + X-ORIGIN 'http://middleware.internet2.edu/eduperson/' + ) +# +################################################################################ +# +attributeTypes:( + 1.3.6.1.4.1.5923.1.1.1.11 + NAME 'eduPersonAssurance' + DESC 'eduPerson per Internet2 and EDUCAUSE' + EQUALITY caseExactMatch + SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 + X-ORIGIN 'http://middleware.internet2.edu/eduperson/' + ) +# +################################################################################ +# +attributeTypes:( + 1.3.6.1.4.1.5923.1.1.1.12 + NAME 'eduPersonPrincipalNamePrior' + DESC 'eduPersonPrincipalNamePrior per Internet2' + EQUALITY caseIgnoreMatch + SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 + X-ORIGIN 'http://middleware.internet2.edu/eduperson/' + ) +# +################################################################################ +# +attributeTypes:( + 1.3.6.1.4.1.5923.1.1.1.13 + NAME 'eduPersonUniqueId' + DESC 'eduPersonUniqueId per Internet2' + EQUALITY caseIgnoreMatch + SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 + SINGLE-VALUE + X-ORIGIN 'http://middleware.internet2.edu/eduperson/' + ) +# +################################################################################ +# +attributeTypes:( + 1.3.6.1.4.1.5923.1.1.1.16 + NAME 'eduPersonOrcid' + DESC 'ORCID researcher identifiers belonging to the principal' + EQUALITY caseIgnoreMatch + SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 + X-ORIGIN 'http://middleware.internet2.edu/eduperson/' + ) +# +################################################################################ +# objectClasses: ( 1.3.6.1.4.1.5923.1.1.2 NAME 'eduPerson' AUXILIARY - MAY ( eduPersonAffiliation $ eduPersonNickName $ eduPersonOrgDN $ eduPersonOrgUnitDN $ eduPersonPrimaryAffiliation $ eduPersonPrincipalName $ eduPersonEntitlement $eduPersonPrimaryOrgUnitDN $ eduPersonScopedAffiliation ) + MAY ( eduPersonAffiliation $ eduPersonNickName $ eduPersonOrgDN $ eduPersonOrgUnitDN $ eduPersonPrimaryAffiliation $ eduPersonPrincipalName $ eduPersonEntitlement $eduPersonPrimaryOrgUnitDN $ eduPersonScopedAffiliation $ eduPersonTargetedID $ eduPersonAssurance $ eduPersonPrincipalNamePrior $ eduPersonUniqueId $ eduPersonOrcid) X-ORIGIN 'http://middleware.internet2.edu/eduperson/' ) #
0
470e2c70338440f69b0ab8fc02128fe5f204af3e
389ds/389-ds-base
Bug 630097 - (cov#11938) NULL dereference in mmldif There is a chance that a can be NULL, which we then dereference within the else block. We should not execute the else block if a is NULL.
commit 470e2c70338440f69b0ab8fc02128fe5f204af3e Author: Nathan Kinder <[email protected]> Date: Wed Sep 15 11:45:56 2010 -0700 Bug 630097 - (cov#11938) NULL dereference in mmldif There is a chance that a can be NULL, which we then dereference within the else block. We should not execute the else block if a is NULL. diff --git a/ldap/servers/slapd/tools/mmldif.c b/ldap/servers/slapd/tools/mmldif.c index 665452cb5..b364a19ce 100644 --- a/ldap/servers/slapd/tools/mmldif.c +++ b/ldap/servers/slapd/tools/mmldif.c @@ -1108,7 +1108,7 @@ addmodified(FILE * edf3, attrib1_t * attrib, record_t * first) } while (num_b <= tot_b && stricmp(attribname(b), attrname) == 0); fprintf(edf3, "-\n"); continue; - } else { + } else if (a != NULL) { /* a == b */ int nmods = 0; attrib_t *begin_b = b;
0
c6167e7b68e3983ff0922cbcba3e3ee2fe405174
389ds/389-ds-base
Ticket 47761 - Return all attributes in rootdse without explicit request Bug Description: a search for the rootdse: -s base -b "" only returns the user attributes not the operational attributes like "supportedControl" This is correct inLDAPv3, and behaviour was introduced with fix for #47634, but for backward compatibility the old behaviour should be configurable Fix Description: Introduce a multivalued attribute to specify the attribites which should be returned without specific request https://fedorahosted.org/389/ticket/47634 Reviewed by: richm, thanks
commit c6167e7b68e3983ff0922cbcba3e3ee2fe405174 Author: Ludwig Krispenz <[email protected]> Date: Wed Apr 16 14:35:36 2014 +0200 Ticket 47761 - Return all attributes in rootdse without explicit request Bug Description: a search for the rootdse: -s base -b "" only returns the user attributes not the operational attributes like "supportedControl" This is correct inLDAPv3, and behaviour was introduced with fix for #47634, but for backward compatibility the old behaviour should be configurable Fix Description: Introduce a multivalued attribute to specify the attribites which should be returned without specific request https://fedorahosted.org/389/ticket/47634 Reviewed by: richm, thanks diff --git a/ldap/schema/01core389.ldif b/ldap/schema/01core389.ldif index c5e48779c..6a2d28c7d 100644 --- a/ldap/schema/01core389.ldif +++ b/ldap/schema/01core389.ldif @@ -297,6 +297,7 @@ attributeTypes: ( 2.16.840.1.113730.3.1.2302 NAME 'nsslapd-listen-backlog-size' attributeTypes: ( 2.16.840.1.113730.3.1.2303 NAME 'nsslapd-ignore-time-skew' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE X-ORIGIN 'Netscape Directory Server' ) attributeTypes: ( 2.16.840.1.113730.3.1.2304 NAME 'nsslapd-dynamic-plugins' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE X-ORIGIN 'Netscape Directory Server' ) attributeTypes: ( 2.16.840.1.113730.3.1.2305 NAME 'nsslapd-moddn-aci' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE X-ORIGIN 'Netscape Directory Server' ) +attributeTypes: ( 2.16.840.1.113730.3.1.2306 NAME 'nsslapd-return-default-opattr' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 USAGE directoryOperation X-ORIGIN 'Netscape Directory Server' ) # # objectclasses # diff --git a/ldap/servers/slapd/result.c b/ldap/servers/slapd/result.c index a10dfaef6..7b45e75fd 100644 --- a/ldap/servers/slapd/result.c +++ b/ldap/servers/slapd/result.c @@ -1157,6 +1157,8 @@ static int send_all_attrs(Slapi_Entry *e,char **attrs,Slapi_Operation *op,Slapi_ char *current_type_name = NULL; int rewrite_rfc1274 = 0; int vattr_flags = 0; + char *dn = NULL; + char **default_attrs = NULL; if(real_attrs_only == SLAPI_SEND_VATTR_FLAG_REALONLY) vattr_flags = SLAPI_REALATTRS_ONLY; @@ -1193,6 +1195,10 @@ static int send_all_attrs(Slapi_Entry *e,char **attrs,Slapi_Operation *op,Slapi_ rewrite_rfc1274 = config_get_rewrite_rfc1274(); + dn = slapi_entry_get_dn_const(e); + if (dn == NULL || *dn == '\0' ) { + default_attrs = slapi_entry_attr_get_charray(e, CONFIG_RETURN_DEFAULT_OPATTR); + } /* Send the attrs back to the client */ for (current_type = vattr_typethang_first(typelist); current_type; current_type = vattr_typethang_next(current_type) ) { @@ -1224,6 +1230,14 @@ static int send_all_attrs(Slapi_Entry *e,char **attrs,Slapi_Operation *op,Slapi_ break; } } + if (!sendit && default_attrs) { + for ( i = 0; default_attrs != NULL && default_attrs[i] != NULL; i++ ) { + if ( slapi_attr_type_cmp( default_attrs[i], current_type_name, SLAPI_TYPE_CMP_SUBTYPE ) == 0 ) { + sendit = 1; + break; + } + } + } } /* * it's a user attribute. send it. @@ -1324,6 +1338,9 @@ exit: if (NULL != typelist) { slapi_vattr_attrs_free(&typelist,typelist_flags); } + if (NULL != default_attrs) { + slapi_ch_free((void**)&default_attrs); + } return rc; } diff --git a/ldap/servers/slapd/slap.h b/ldap/servers/slapd/slap.h index f807d8de1..642f3b247 100644 --- a/ldap/servers/slapd/slap.h +++ b/ldap/servers/slapd/slap.h @@ -2150,6 +2150,7 @@ typedef struct _slapdEntryPoints { #define CONFIG_PLUGIN_LOGGING "nsslapd-plugin-logging" #define CONFIG_LISTEN_BACKLOG_SIZE "nsslapd-listen-backlog-size" #define CONFIG_DYNAMIC_PLUGINS "nsslapd-dynamic-plugins" +#define CONFIG_RETURN_DEFAULT_OPATTR "nsslapd-return-default-opattr" /* getenv alternative */ #define CONFIG_MALLOC_MXFAST "nsslapd-malloc-mxfast"
0
0762e393850f54ce8462c45321b3db084bd8a0e1
389ds/389-ds-base
Ticket 49075 - Adjust logging severity levels Description: There are places wherre we log a severity "ERR", when in fact it is a benign message. https://pagure.io/389-ds-base/issue/49075 Reviewed by: firstyear(Thanks!)
commit 0762e393850f54ce8462c45321b3db084bd8a0e1 Author: Mark Reynolds <[email protected]> Date: Wed May 3 14:37:11 2017 -0400 Ticket 49075 - Adjust logging severity levels Description: There are places wherre we log a severity "ERR", when in fact it is a benign message. https://pagure.io/389-ds-base/issue/49075 Reviewed by: firstyear(Thanks!) diff --git a/ldap/servers/slapd/back-ldbm/ldbm_instance_config.c b/ldap/servers/slapd/back-ldbm/ldbm_instance_config.c index d7c3c0841..4ea7fa681 100644 --- a/ldap/servers/slapd/back-ldbm/ldbm_instance_config.c +++ b/ldap/servers/slapd/back-ldbm/ldbm_instance_config.c @@ -126,10 +126,12 @@ ldbm_instance_config_cachememsize_set(void *arg, if (sane == UTIL_CACHESIZE_ERROR){ slapi_create_errormsg(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, "Error: unable to determine system memory limits."); - slapi_log_err(SLAPI_LOG_ERR, "ldbm_instance_config_cachememsize_set", "Enable to determine system memory limits.\n"); + slapi_log_err(SLAPI_LOG_ERR, "ldbm_instance_config_cachememsize_set", + "Enable to determine system memory limits.\n"); return LDAP_UNWILLING_TO_PERFORM; } else if (sane == UTIL_CACHESIZE_REDUCED) { - slapi_log_err(SLAPI_LOG_WARNING, "ldbm_instance_config_cachememsize_set", "delta +%"PRIu64" of request %"PRIu64" reduced to %"PRIu64"\n", delta_original, val, delta); + slapi_log_err(SLAPI_LOG_WARNING, "ldbm_instance_config_cachememsize_set", + "delta +%"PRIu64" of request %"PRIu64" reduced to %"PRIu64"\n", delta_original, val, delta); /* * This works as: value = 100 * delta_original to inst, 20; @@ -141,7 +143,8 @@ ldbm_instance_config_cachememsize_set(void *arg, } } if (inst->inst_cache.c_maxsize < MINCACHESIZE || val < MINCACHESIZE) { - slapi_log_err(SLAPI_LOG_ERR, "ldbm_instance_config_cachememsize_set", "force a minimal value %"PRIu64"\n", MINCACHESIZE); + slapi_log_err(SLAPI_LOG_INFO, "ldbm_instance_config_cachememsize_set", + "force a minimal value %"PRIu64"\n", MINCACHESIZE); /* This value will trigger an autotune next start up, but it should increase only */ val = MINCACHESIZE; } @@ -1187,7 +1190,7 @@ ldbm_instance_post_delete_instance_entry_callback(Slapi_PBlock *pb __attribute__ return SLAPI_DSE_CALLBACK_ERROR; } - slapi_log_err(SLAPI_LOG_ERR, "ldbm_instance_post_delete_instance_entry_callback", + slapi_log_err(SLAPI_LOG_INFO, "ldbm_instance_post_delete_instance_entry_callback", "Removing '%s'.\n", instance_name); cache_destroy_please(&inst->inst_cache, CACHE_TYPE_ENTRY); @@ -1224,9 +1227,9 @@ ldbm_instance_post_delete_instance_entry_callback(Slapi_PBlock *pb __attribute__ dbp = PR_smprintf("%s/%s", inst_dirp, direntry->name); if (NULL == dbp) { slapi_log_err(SLAPI_LOG_ERR, - "ldbm_instance_post_delete_instance_entry_callback", - "Failed to generate db path: %s/%s\n", - inst_dirp, direntry->name); + "ldbm_instance_post_delete_instance_entry_callback", + "Failed to generate db path: %s/%s\n", + inst_dirp, direntry->name); break; }
0
ecf3cc3436accbae478ebe0774e817ead91c6fdf
389ds/389-ds-base
Ticket 394 - modify-delete userpassword Bug Description: Attempting to delete a specific user password results in an error 16 - if you are not using clear-text password storage scheme. Fix Description: The error is caused because it can not find a userpassword attr with the clear-text password - as its usually encoded. If you know the correct userpassword encoded value to delete, then you won't get an error 16, but the unhashed userpassword will not be removed. This fix checks the scheme of the password value to delete, then compares it to all the userpassword attrs. Once we find a match, we change the "value to delete" to the encoded value. If you do supply an encoded password value to delete, we do the opposite. We grab all the clear-text unhashed userpasswords from the password entry extension. Then we compare each one to the hashed value. If we have a match, we know which unhashed userpassword to delete. Also, added a check to make sure we don't add encoded values to the unhashed_password extension. https://fedorahosted.org/389/ticket/394 Reviewed by: richm (Thank you)
commit ecf3cc3436accbae478ebe0774e817ead91c6fdf Author: Mark Reynolds <[email protected]> Date: Mon Nov 19 15:52:32 2012 -0500 Ticket 394 - modify-delete userpassword Bug Description: Attempting to delete a specific user password results in an error 16 - if you are not using clear-text password storage scheme. Fix Description: The error is caused because it can not find a userpassword attr with the clear-text password - as its usually encoded. If you know the correct userpassword encoded value to delete, then you won't get an error 16, but the unhashed userpassword will not be removed. This fix checks the scheme of the password value to delete, then compares it to all the userpassword attrs. Once we find a match, we change the "value to delete" to the encoded value. If you do supply an encoded password value to delete, we do the opposite. We grab all the clear-text unhashed userpasswords from the password entry extension. Then we compare each one to the hashed value. If we have a match, we know which unhashed userpassword to delete. Also, added a check to make sure we don't add encoded values to the unhashed_password extension. https://fedorahosted.org/389/ticket/394 Reviewed by: richm (Thank you) diff --git a/ldap/servers/slapd/modify.c b/ldap/servers/slapd/modify.c index 424badb63..867e3915c 100644 --- a/ldap/servers/slapd/modify.c +++ b/ldap/servers/slapd/modify.c @@ -80,6 +80,7 @@ static void remove_mod (Slapi_Mods *smods, const char *type, Slapi_Mods *smod_un #endif static int op_shared_allow_pw_change (Slapi_PBlock *pb, LDAPMod *mod, char **old_pw, Slapi_Mods *smods); static int hash_rootpw (LDAPMod **mods); +static int valuearray_init_bervalarray_unhashed_only(struct berval **bvals, Slapi_Value ***cvals); #ifdef LDAP_DEBUG static const char* @@ -836,19 +837,137 @@ static void op_shared_modify (Slapi_PBlock *pb, int pw_change, char *old_pw) if (strcasecmp (pw_mod->mod_type, SLAPI_USERPWD_ATTR) != 0) continue; - if (LDAP_MOD_DELETE == pw_mod->mod_op) { + if (SLAPI_IS_MOD_DELETE(pw_mod->mod_op)) { Slapi_Attr *a = NULL; - /* delete pseudo password attribute if it exists in the entry */ - if (!slapi_entry_attr_find(e, unhashed_pw_attr, &a)) { - slapi_mods_add_mod_values(&smods, pw_mod->mod_op, - unhashed_pw_attr, va); + struct pw_scheme *pwsp = NULL; + int remove_unhashed_pw = 1; + char *password = NULL; + char *valpwd = NULL; + + /* if there are mod values, we need to delete a specific userpassword */ + for ( i = 0; pw_mod->mod_bvalues != NULL && pw_mod->mod_bvalues[i] != NULL; i++ ) { + password = slapi_ch_strdup(pw_mod->mod_bvalues[i]->bv_val); + pwsp = pw_val2scheme( password, &valpwd, 1 ); + if(strcmp(pwsp->pws_name, "CLEAR") == 0){ + /* + * CLEAR password + * + * Ok, so now we to check the entry's userpassword values. + * First, find out the password encoding of the entry's pw. + * Then compare our clear text password to the encoded userpassword + * using the proper scheme. If we have a match, we know which + * userpassword value to delete. + */ + Slapi_Attr *pw = NULL; + struct berval bval, *bv[2]; + + if(slapi_entry_attr_find(e, SLAPI_USERPWD_ATTR, &pw) == 0 && pw){ + struct pw_scheme *pass_scheme = NULL; + Slapi_Value **present_values = NULL; + char *pval = NULL; + int ii; + + present_values = attr_get_present_values(pw); + for(ii = 0; present_values && present_values[ii]; ii++){ + const char *userpwd = slapi_value_get_string(present_values[ii]); + + pass_scheme = pw_val2scheme( (char *)userpwd, &pval, 1 ); + if(strcmp(pass_scheme->pws_name,"CLEAR")){ + /* its encoded, so compare it */ + if((*(pass_scheme->pws_cmp))( valpwd, pval ) == 0 ){ + /* + * Match, replace the mod value with the encoded password + */ + slapi_ch_free_string(&pw_mod->mod_bvalues[i]->bv_val); + pw_mod->mod_bvalues[i]->bv_val = strdup(userpwd); + pw_mod->mod_bvalues[i]->bv_len = strlen(userpwd); + free_pw_scheme( pass_scheme ); + break; + } + } else { + /* userpassword is already clear text, nothing to do */ + free_pw_scheme( pass_scheme ); + break; + } + free_pw_scheme( pass_scheme ); + } + } + /* + * Finally, delete the unhashed userpassword + * (this will update the password entry extension) + */ + bval.bv_val = password; + bval.bv_len = strlen(password); + bv[0] = &bval; + bv[1] = NULL; + valuearray_init_bervalarray(bv, &va); + slapi_mods_add_mod_values(&smods, pw_mod->mod_op, unhashed_pw_attr, va); + valuearray_free(&va); + } else { + /* + * Password is encoded, try and find a matching unhashed_password to delete + */ + Slapi_Value **vals; + + /* + * Grab the current unhashed passwords from the password entry extension, + * as the "attribute" is no longer present in the entry. + */ + if(slapi_pw_get_entry_ext(e, &vals) == LDAP_SUCCESS){ + int ii; + + for(ii = 0; vals && vals[ii]; ii++){ + const char *unhashed_pwd = slapi_value_get_string(vals[ii]); + struct pw_scheme *unhashed_pwsp = NULL; + struct berval bval, *bv[2]; + + /* prepare the value to delete from the list of unhashed userpasswords */ + bval.bv_val = (char *)unhashed_pwd; + bval.bv_len = strlen(unhashed_pwd); + bv[0] = &bval; + bv[1] = NULL; + /* + * Compare the clear text unhashed password, to the encoded password + * provided by the client. + */ + unhashed_pwsp = pw_val2scheme( (char *)unhashed_pwd, NULL, 1 ); + if(strcmp(unhashed_pwsp->pws_name, "CLEAR") == 0){ + if((*(pwsp->pws_cmp))((char *)unhashed_pwd , valpwd) == 0 ){ + /* match, add the delete mod for this particular unhashed userpassword */ + valuearray_init_bervalarray(bv, &va); + slapi_mods_add_mod_values(&smods, pw_mod->mod_op, unhashed_pw_attr, va); + valuearray_free(&va); + free_pw_scheme( unhashed_pwsp ); + break; + } + } else { + /* + * We have a hashed unhashed_userpassword! We must delete it. + */ + valuearray_init_bervalarray(bv, &va); + slapi_mods_add_mod_values(&smods, pw_mod->mod_op, unhashed_pw_attr, va); + valuearray_free(&va); + } + free_pw_scheme( unhashed_pwsp ); + } + } else { + + } + } + remove_unhashed_pw = 0; /* mark that we already removed the unhashed userpassword */ + slapi_ch_free_string(&password); + free_pw_scheme( pwsp ); + } + if (remove_unhashed_pw && !slapi_entry_attr_find(e, unhashed_pw_attr, &a)){ + slapi_mods_add_mod_values(&smods, pw_mod->mod_op,unhashed_pw_attr, va); } } else { /* add pseudo password attribute */ - valuearray_init_bervalarray(pw_mod->mod_bvalues, &va); - slapi_mods_add_mod_values(&smods, pw_mod->mod_op, - unhashed_pw_attr, va); - valuearray_free(&va); + valuearray_init_bervalarray_unhashed_only(pw_mod->mod_bvalues, &va); + if(va){ + slapi_mods_add_mod_values(&smods, pw_mod->mod_op, unhashed_pw_attr, va); + valuearray_free(&va); + } } /* Init new value array for hashed value */ @@ -859,6 +978,7 @@ static void op_shared_modify (Slapi_PBlock *pb, int pw_change, char *old_pw) /* remove current clear value of userpassword */ ber_bvecfree(pw_mod->mod_bvalues); + /* add the cipher in the structure */ valuearray_get_bervalarray(va, &pw_mod->mod_bvalues); @@ -1020,6 +1140,38 @@ free_and_return: } } +/* + * Only add password mods that are in clear text. The console likes to send two mods: + * - Already encoded password + * - Clear text password + * + * We don't want to add the encoded value to the unhashed_userpassword attr + */ +static int +valuearray_init_bervalarray_unhashed_only(struct berval **bvals, Slapi_Value ***cvals) +{ + int n; + + for(n=0; bvals != NULL && bvals[n] != NULL; n++); + if(n==0){ + *cvals = NULL; + } else { + struct pw_scheme *pwsp = NULL; + int i,p; + + *cvals = (Slapi_Value **) slapi_ch_malloc((n + 1) * sizeof(Slapi_Value *)); + for(i=0,p=0;i<n;i++){ + pwsp = pw_val2scheme( bvals[i]->bv_val, NULL, 1 ); + if(strcmp(pwsp->pws_name, "CLEAR") == 0){ + (*cvals)[p++] = slapi_value_new_berval(bvals[i]); + } + free_pw_scheme( pwsp ); + } + (*cvals)[p] = NULL; + } + return n; +} + #if 0 /* not used */ static void remove_mod (Slapi_Mods *smods, const char *type, Slapi_Mods *smod_unhashed) {
0
955510cd06cd6802699c332551f1e0a6c0879b2b
389ds/389-ds-base
Ticket 49453 - passwd.py to use pwdhash defaults. Bug Description: pwdhash now uses the defaults from libslapd, so we do not need to code this into passwd.py Fix Description: Change the default args for passwd.py to simplify the call. https://pagure.io/389-ds-base/issue/49453 Author: wibrown Review by: alisha17, spichugi (Thank you!)
commit 955510cd06cd6802699c332551f1e0a6c0879b2b Author: William Brown <[email protected]> Date: Wed Nov 15 15:41:06 2017 +1000 Ticket 49453 - passwd.py to use pwdhash defaults. Bug Description: pwdhash now uses the defaults from libslapd, so we do not need to code this into passwd.py Fix Description: Change the default args for passwd.py to simplify the call. https://pagure.io/389-ds-base/issue/49453 Author: wibrown Review by: alisha17, spichugi (Thank you!) diff --git a/src/lib389/lib389/passwd.py b/src/lib389/lib389/passwd.py index f36d73eab..e4227353a 100644 --- a/src/lib389/lib389/passwd.py +++ b/src/lib389/lib389/passwd.py @@ -15,37 +15,20 @@ import subprocess import random import string import os -import sys - -BESTSCHEME = 'SSHA512' -MAJOR, MINOR, _, _, _ = sys.version_info - -# We need a dict of the schemes I think .... -PWSCHEMES = [ - 'SHA1', - 'SHA256', - 'SHA512', - 'SSHA', - 'SSHA256', - 'SSHA512', -] - # How do we feed our prefix into this? -def password_hash(pw, scheme=BESTSCHEME, bin_dir='/bin'): +def password_hash(pw, scheme=None, bin_dir='/bin'): # Check that the binary exists - assert(scheme in PWSCHEMES) pwdhashbin = os.path.join(bin_dir, 'pwdhash') assert(os.path.isfile(pwdhashbin)) - h = subprocess.check_output([pwdhashbin, '-s', scheme, pw]).strip() + if scheme is None: + h = subprocess.check_output([pwdhashbin, pw]).strip() + else: + h = subprocess.check_output([pwdhashbin, '-s', scheme, pw]).strip() return h.decode('utf-8') def password_generate(length=64): - pw = None - if MAJOR >= 3: - pw = [random.choice(string.ascii_letters) for x in range(length - 1)] - else: - pw = [random.choice(string.letters) for x in xrange(length - 1)] + pw = [random.choice(string.ascii_letters) for x in range(length - 1)] pw.append('%s' % random.randint(0, 9)) return "".join(pw)
0
639535771d13de86446a361a515713ae15f65722
389ds/389-ds-base
Ticket 49231 - fix sasl mech handling Bug Description: In our sasl code we had two issues. One was that we did not correctly apply the list of sasl allowed mechs to our rootdse list in ids_sasl_listmech. The second was that on config reset, we did not correctly set null to the value. Fix Description: Fix the handling of the mech lists to allow reset, and allow the mech list to be updated properly. https://pagure.io/389-ds-base/issue/49231 Author: wibrown Review by: mreynolds (Thanks!)
commit 639535771d13de86446a361a515713ae15f65722 Author: William Brown <[email protected]> Date: Wed Apr 26 15:48:30 2017 +1000 Ticket 49231 - fix sasl mech handling Bug Description: In our sasl code we had two issues. One was that we did not correctly apply the list of sasl allowed mechs to our rootdse list in ids_sasl_listmech. The second was that on config reset, we did not correctly set null to the value. Fix Description: Fix the handling of the mech lists to allow reset, and allow the mech list to be updated properly. https://pagure.io/389-ds-base/issue/49231 Author: wibrown Review by: mreynolds (Thanks!) diff --git a/dirsrvtests/tests/suites/sasl/allowed_mechs.py b/dirsrvtests/tests/suites/sasl/allowed_mechs.py new file mode 100644 index 000000000..a3e385e4d --- /dev/null +++ b/dirsrvtests/tests/suites/sasl/allowed_mechs.py @@ -0,0 +1,43 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2017 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# + +import pytest +import ldap + +import time + +from lib389.topologies import topology_st + +def test_sasl_allowed_mechs(topology_st): + standalone = topology_st.standalone + + # Get the supported mechs. This should contain PLAIN, GSSAPI, EXTERNAL at least + orig_mechs = standalone.rootdse.supported_sasl() + print(orig_mechs) + assert('GSSAPI' in orig_mechs) + assert('PLAIN' in orig_mechs) + assert('EXTERNAL' in orig_mechs) + + # Now edit the supported mechs. CHeck them again. + standalone.config.set('nsslapd-allowed-sasl-mechanisms', 'EXTERNAL, PLAIN') + + limit_mechs = standalone.rootdse.supported_sasl() + print(limit_mechs) + assert('PLAIN' in limit_mechs) + assert('EXTERNAL' in limit_mechs) + assert('GSSAPI' not in limit_mechs) + + # Do a config reset + standalone.config.reset('nsslapd-allowed-sasl-mechanisms') + + # check the supported list is the same as our first check. + final_mechs = standalone.rootdse.supported_sasl() + print(final_mechs) + assert(set(final_mechs) == set(orig_mechs)) + diff --git a/ldap/servers/slapd/charray.c b/ldap/servers/slapd/charray.c index 5551dccb2..6b89714ee 100644 --- a/ldap/servers/slapd/charray.c +++ b/ldap/servers/slapd/charray.c @@ -348,8 +348,9 @@ slapi_str2charray_ext( char *str, char *brkstr, int allow_dups ) } } - if ( !dup_found ) + if ( !dup_found ) { res[i++] = slapi_ch_strdup( s ); + } } res[i] = NULL; @@ -413,10 +414,11 @@ charray_subtract(char **a, char **b, char ***c) char **bp, **cp, **tmp; char **p; - if (c) + if (c) { tmp = *c = cool_charray_dup(a); - else + } else { tmp = a; + } for (cp = tmp; cp && *cp; cp++) { for (bp = b; bp && *bp; bp++) { @@ -433,12 +435,48 @@ charray_subtract(char **a, char **b, char ***c) for (p = cp+1; *p && *p == (char *)SUBTRACT_DEL; p++) ; *cp = *p; - if (*p == NULL) + if (*p == NULL) { break; - else + } else { *p = SUBTRACT_DEL; + } + } + } +} + +/* + * Provides the intersection of two arrays. + * IE if you have: + * (A, B, C) + * (B, D, E) + * result is (B,) + * a and b are NOT consumed in the process. + */ +char ** +charray_intersection(char **a, char **b) { + char **result; + size_t rp = 0; + + if (a == NULL || b == NULL) { + return NULL; + } + + size_t a_len = 0; + /* Find how long A is. */ + for (; a[a_len] != NULL; a_len++); + + /* Allocate our result, it can't be bigger than A */ + result = (char **)slapi_ch_calloc(1, sizeof(char *) * (a_len + 1)); + + /* For each in A, see if it's in b */ + for (size_t i = 0; a[i] != NULL; i++) { + if (charray_get_index(b, a[i]) != -1) { + result[rp] = slapi_ch_strdup(a[i]); + rp++; } } + + return result; } int diff --git a/ldap/servers/slapd/libglobs.c b/ldap/servers/slapd/libglobs.c index 9c4a61b6a..e48888527 100644 --- a/ldap/servers/slapd/libglobs.c +++ b/ldap/servers/slapd/libglobs.c @@ -7091,9 +7091,30 @@ config_set_entryusn_import_init( const char *attrname, char *value, return retVal; } +char ** +config_get_allowed_sasl_mechs_array(void) +{ + /* + * array of mechs. If is null, returns NULL thanks to ch_array_dup. + * Caller must free! + */ + char **retVal; + slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); + + CFG_LOCK_READ(slapdFrontendConfig); + retVal = slapi_ch_array_dup(slapdFrontendConfig->allowed_sasl_mechs_array); + CFG_UNLOCK_READ(slapdFrontendConfig); + + return retVal; +} + char * -config_get_allowed_sasl_mechs() +config_get_allowed_sasl_mechs(void) { + /* + * Space seperated list of allowed mechs + * if this is NULL, means *all* mechs are allowed! + */ char *retVal; slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); @@ -7114,22 +7135,35 @@ config_set_allowed_sasl_mechs(const char *attrname, char *value, char *errorbuf return LDAP_SUCCESS; } - /* cyrus sasl doesn't like comma separated lists */ - remove_commas(value); + /* During a reset, the value is "", so we have to handle this case. */ + if (strcmp(value, "") != 0) { + /* cyrus sasl doesn't like comma separated lists */ + remove_commas(value); + + if(invalid_sasl_mech(value)){ + slapi_log_err(SLAPI_LOG_ERR,"config_set_allowed_sasl_mechs", + "Invalid value/character for sasl mechanism (%s). Use ASCII " + "characters, upto 20 characters, that are upper-case letters, " + "digits, hyphens, or underscores\n", value); + return LDAP_UNWILLING_TO_PERFORM; + } - if(invalid_sasl_mech(value)){ - slapi_log_err(SLAPI_LOG_ERR,"config_set_allowed_sasl_mechs", - "Invalid value/character for sasl mechanism (%s). Use ASCII " - "characters, upto 20 characters, that are upper-case letters, " - "digits, hyphens, or underscores\n", value); - return LDAP_UNWILLING_TO_PERFORM; + CFG_LOCK_WRITE(slapdFrontendConfig); + slapi_ch_free_string(&slapdFrontendConfig->allowed_sasl_mechs); + slapi_ch_array_free(slapdFrontendConfig->allowed_sasl_mechs_array); + slapdFrontendConfig->allowed_sasl_mechs = slapi_ch_strdup(value); + slapdFrontendConfig->allowed_sasl_mechs_array = slapi_str2charray_ext(value, " ", 0); + CFG_UNLOCK_WRITE(slapdFrontendConfig); + } else { + /* If this value is "", we need to set the list to *all* possible mechs */ + CFG_LOCK_WRITE(slapdFrontendConfig); + slapi_ch_free_string(&slapdFrontendConfig->allowed_sasl_mechs); + slapi_ch_array_free(slapdFrontendConfig->allowed_sasl_mechs_array); + slapdFrontendConfig->allowed_sasl_mechs = NULL; + slapdFrontendConfig->allowed_sasl_mechs_array = NULL; + CFG_UNLOCK_WRITE(slapdFrontendConfig); } - CFG_LOCK_WRITE(slapdFrontendConfig); - slapi_ch_free_string(&slapdFrontendConfig->allowed_sasl_mechs); - slapdFrontendConfig->allowed_sasl_mechs = slapi_ch_strdup(value); - CFG_UNLOCK_WRITE(slapdFrontendConfig); - return LDAP_SUCCESS; } diff --git a/ldap/servers/slapd/proto-slap.h b/ldap/servers/slapd/proto-slap.h index fdb4bf0e7..9696ead44 100644 --- a/ldap/servers/slapd/proto-slap.h +++ b/ldap/servers/slapd/proto-slap.h @@ -553,6 +553,7 @@ size_t config_get_ndn_cache_size(void); int config_get_ndn_cache_enabled(void); int config_get_return_orig_type_switch(void); char *config_get_allowed_sasl_mechs(void); +char **config_get_allowed_sasl_mechs_array(void); int config_set_allowed_sasl_mechs(const char *attrname, char *value, char *errorbuf, int apply); int config_get_schemamod(void); int config_set_ignore_vattrs(const char *attrname, char *value, char *errorbuf, int apply); diff --git a/ldap/servers/slapd/saslbind.c b/ldap/servers/slapd/saslbind.c index 328d91944..3c85b1c4d 100644 --- a/ldap/servers/slapd/saslbind.c +++ b/ldap/servers/slapd/saslbind.c @@ -753,7 +753,10 @@ void ids_sasl_server_new(Connection *conn) */ char **ids_sasl_listmech(Slapi_PBlock *pb) { - char **ret, **others; + char **ret; + char **config_ret; + char **sup_ret; + char **others; const char *str; char *dupstr; sasl_conn_t *sasl_conn; @@ -766,32 +769,43 @@ char **ids_sasl_listmech(Slapi_PBlock *pb) slapi_pblock_get(pb, SLAPI_CONNECTION, &pb_conn); /* hard-wired mechanisms and slapi plugin registered mechanisms */ - ret = slapi_get_supported_saslmechanisms_copy(); + sup_ret = slapi_get_supported_saslmechanisms_copy(); - if (pb_conn == NULL) { - return ret; + /* If we have a connection, get the provided list from SASL */ + if (pb_conn != NULL) { + sasl_conn = (sasl_conn_t*)pb_conn->c_sasl_conn; + if (sasl_conn != NULL) { + /* sasl library mechanisms are connection dependent */ + PR_EnterMonitor(pb_conn->c_mutex); + if (sasl_listmech(sasl_conn, + NULL, /* username */ + "", ",", "", + &str, NULL, NULL) == SASL_OK) { + slapi_log_err(SLAPI_LOG_TRACE, "ids_sasl_listmech", "sasl library mechs: %s\n", str); + /* merge into result set */ + dupstr = slapi_ch_strdup(str); + others = slapi_str2charray_ext(dupstr, ",", 0 /* don't list duplicate mechanisms */); + charray_merge(&sup_ret, others, 1); + charray_free(others); + slapi_ch_free((void**)&dupstr); + } + PR_ExitMonitor(pb_conn->c_mutex); + } } - sasl_conn = (sasl_conn_t*)pb_conn->c_sasl_conn; - if (sasl_conn == NULL) { - return ret; - } + /* Get the servers "allowed" list */ + config_ret = config_get_allowed_sasl_mechs_array(); - /* sasl library mechanisms are connection dependent */ - PR_EnterMonitor(pb_conn->c_mutex); - if (sasl_listmech(sasl_conn, - NULL, /* username */ - "", ",", "", - &str, NULL, NULL) == SASL_OK) { - slapi_log_err(SLAPI_LOG_TRACE, "ids_sasl_listmech", "sasl library mechs: %s\n", str); - /* merge into result set */ - dupstr = slapi_ch_strdup(str); - others = slapi_str2charray_ext(dupstr, ",", 0 /* don't list duplicate mechanisms */); - charray_merge(&ret, others, 1); - charray_free(others); - slapi_ch_free((void**)&dupstr); + /* Remove any content that isn't in the allowed list */ + if (config_ret != NULL) { + /* Get the set of supported mechs in the insection of the two */ + ret = charray_intersection(sup_ret, config_ret); + charray_free(sup_ret); + charray_free(config_ret); + } else { + /* The allowed list was empty, just take our supported list. */ + ret = sup_ret; } - PR_ExitMonitor(pb_conn->c_mutex); slapi_log_err(SLAPI_LOG_TRACE, "ids_sasl_listmech", "<=\n"); diff --git a/ldap/servers/slapd/slap.h b/ldap/servers/slapd/slap.h index 52c9c403e..5469b5104 100644 --- a/ldap/servers/slapd/slap.h +++ b/ldap/servers/slapd/slap.h @@ -2425,6 +2425,7 @@ typedef struct _slapdFrontendConfig { int pagedsizelimit; char *default_naming_context; /* Default naming context (normalized) */ char *allowed_sasl_mechs; /* comma/space separated list of allowed sasl mechs */ + char **allowed_sasl_mechs_array; /* Array of allow sasl mechs */ int sasl_max_bufsize; /* The max receive buffer size for SASL */ /* disk monitoring */ diff --git a/ldap/servers/slapd/slapi-private.h b/ldap/servers/slapd/slapi-private.h index f7041bd99..5b76e892b 100644 --- a/ldap/servers/slapd/slapi-private.h +++ b/ldap/servers/slapd/slapi-private.h @@ -831,6 +831,7 @@ int charray_remove(char **a, const char *s, int freeit); char ** cool_charray_dup( char **a ); void cool_charray_free( char **array ); void charray_subtract( char **a, char **b, char ***c ); +char **charray_intersection(char **a, char **b); int charray_get_index(char **array, char *s); int charray_normdn_add(char ***chararray, char *dn, char *errstr);
0
9186bcec620233feb49716c92d463646afeb027d
389ds/389-ds-base
Ticket 49571 - perl subpackage and python installer by default Bug Description: With 1.4.0 fast coming, we need to split the legacy perl out to a subpackage, and coerce our tests to python installer by default. Fix Description: Add a legacy tools subpackage https://pagure.io/389-ds-base/issue/49571 Author: wibrown & vashirov(Thanks!!)
commit 9186bcec620233feb49716c92d463646afeb027d Author: Mark Reynolds <[email protected]> Date: Tue Jun 5 14:13:14 2018 -0400 Ticket 49571 - perl subpackage and python installer by default Bug Description: With 1.4.0 fast coming, we need to split the legacy perl out to a subpackage, and coerce our tests to python installer by default. Fix Description: Add a legacy tools subpackage https://pagure.io/389-ds-base/issue/49571 Author: wibrown & vashirov(Thanks!!) diff --git a/rpm/389-ds-base.spec.in b/rpm/389-ds-base.spec.in index b773136c8..25ffb6300 100644 --- a/rpm/389-ds-base.spec.in +++ b/rpm/389-ds-base.spec.in @@ -63,7 +63,7 @@ BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) Obsoletes: %{name}-selinux Conflicts: selinux-policy-base < 3.9.8 # upgrade path from monolithic %{name} (including -libs & -devel) to %{name} + %{name}-snmp -Obsoletes: %{name} <= 1.3.5.4 +Obsoletes: %{name} <= 1.4.0.9 Provides: ldif2ldbm # Attach the buildrequires to the top level package: @@ -140,8 +140,6 @@ Requires: policycoreutils-python Requires: libsemanage-python # the following are needed for some of our scripts Requires: openldap-clients -# use_openldap assumes perl-Mozilla-LDAP is built with openldap support -Requires: perl-Mozilla-LDAP # this is needed to setup SSL if you are not using the # administration server package Requires: nss-tools @@ -159,19 +157,8 @@ Requires: perl(:MODULE_COMPAT_%(eval "`%{__perl} -V:version`"; echo $ver # Needed by logconv.pl Requires: perl-DB_File Requires: perl-Archive-Tar -# for the init script -Requires(post): systemd-units -Requires(preun): systemd-units -Requires(postun): systemd-units -# for setup-ds.pl -Requires: bind-utils -# for setup-ds.pl to support ipv6 -%if %{use_Socket6} -Requires: perl-Socket6 -%else -Requires: perl-Socket -%endif -Requires: perl-NetAddr-IP +# Picks up our systemd deps. +%{?systemd_requires} Source0: http://www.port389.org/sources/%{name}-%{version}%{?prerel}.tar.bz2 # 389-ds-git.sh should be used to generate the source tarball from git @@ -229,6 +216,30 @@ Core libraries for the 389 Directory Server base package. These libraries are used by the main package and the -devel package. This allows the -devel package to be installed with just the -libs package and without the main package. +%package legacy-tools +Summary: Legacy utilities for 389 Directory Server (%{variant}) +Group: System Environment/Daemons +Obsoletes: %{name} <= 1.4.0.9 +Requires: %{name} = %{version}-%{release} +%if %{use_perl} +# for setup-ds.pl to support ipv6 +%if %{use_Socket6} +Requires: perl-Socket6 +%else +Requires: perl-Socket +%endif +Requires: perl-NetAddr-IP +# use_openldap assumes perl-Mozilla-LDAP is built with openldap support +Requires: perl-Mozilla-LDAP +# for setup-ds.pl +Requires: bind-utils +%endif +# End use perl + +%description legacy-tools +Legacy (and deprecated) utilities for 389 Directory Server. This includes +the old account management and task scripts. These are deprecated in favour of +the dscreate, dsctl, dsconf and dsidm tools. %package devel Summary: Development libraries for 389 Directory Server (%{variant}) @@ -450,8 +461,13 @@ if ! make DESTDIR="$RPM_BUILD_ROOT" check; then cat ./test-suite.log && false; f rm -rf $RPM_BUILD_ROOT %post -output=/dev/null -output2=/dev/null +if [ -n "$DEBUGPOSTTRANS" ] ; then + output=$DEBUGPOSTTRANS + output2=${DEBUGPOSTTRANS}.upgrade +else + output=/dev/null + output2=/dev/null +fi # reload to pick up any changes to systemd files /bin/systemctl daemon-reload >$output 2>&1 || : # reload to pick up any shared lib changes @@ -459,10 +475,6 @@ output2=/dev/null # find all instances instances="" # instances that require a restart after upgrade ninst=0 # number of instances found in total -if [ -n "$DEBUGPOSTTRANS" ] ; then - output=$DEBUGPOSTTRANS - output2=${DEBUGPOSTTRANS}.upgrade -fi # https://fedoraproject.org/wiki/Packaging:UsersAndGroups#Soft_static_allocation # Soft static allocation for UID and GID @@ -484,8 +496,41 @@ fi # Reload our sysctl before we restart (if we can) sysctl --system &> $output; true +%preun +if [ $1 -eq 0 ]; then # Final removal + # remove instance specific service files/links + rm -rf %{_sysconfdir}/systemd/system/%{groupname}.wants/* > /dev/null 2>&1 || : +fi + +%postun +/sbin/ldconfig +if [ $1 = 0 ]; then # Final removal + rm -rf /var/run/%{pkgname} +fi + +%post snmp +%systemd_post %{pkgname}-snmp.service + +%preun snmp +%systemd_preun %{pkgname}-snmp.service %{groupname} + +%postun snmp +%systemd_postun_with_restart %{pkgname}-snmp.service + +%post legacy-tools + + %if %{use_perl} # START UPGRADE SCRIPT + +if [ -n "$DEBUGPOSTTRANS" ] ; then + output=$DEBUGPOSTTRANS + output2=${DEBUGPOSTTRANS}.upgrade +else + output=/dev/null + output2=/dev/null +fi + echo looking for instances in %{_sysconfdir}/%{pkgname} > $output 2>&1 || : instbase="%{_sysconfdir}/%{pkgname}" for dir in $instbase/slapd-* ; do @@ -534,27 +579,6 @@ done exit 0 -%preun -if [ $1 -eq 0 ]; then # Final removal - # remove instance specific service files/links - rm -rf %{_sysconfdir}/systemd/system/%{groupname}.wants/* > /dev/null 2>&1 || : -fi - -%postun -/sbin/ldconfig -if [ $1 = 0 ]; then # Final removal - rm -rf /var/run/%{pkgname} -fi - -%post snmp -%systemd_post %{pkgname}-snmp.service - -%preun snmp -%systemd_preun %{pkgname}-snmp.service %{groupname} - -%postun snmp -%systemd_postun_with_restart %{pkgname}-snmp.service - %files %defattr(-,root,root,-) %if %{bundle_jemalloc} @@ -573,11 +597,30 @@ fi %config(noreplace)%{_sysconfdir}/sysconfig/%{pkgname} %config(noreplace)%{_sysconfdir}/sysconfig/%{pkgname}.systemd %{_datadir}/%{pkgname} +%exclude %{_datadir}/%{pkgname}/script-templates +%exclude %{_datadir}/%{pkgname}/updates +%exclude %{_datadir}/%{pkgname}/properties/*.res %{_datadir}/gdb/auto-load/* %{_unitdir} -%{_bindir}/* +%{_bindir}/dbscan +%{_mandir}/man1/dbscan.1.gz +%{_bindir}/ds-replcheck +%{_mandir}/man1/ds-replcheck.1.gz +%{_bindir}/ds-logpipe.py +%{_mandir}/man1/ds-logpipe.py.1.gz +%{_bindir}/ldclt +%{_mandir}/man1/ldclt.1.gz +%{_sbindir}/ldif2ldap +%{_mandir}/man8/ldif2ldap.8.gz +%{_bindir}/logconv.pl +%{_mandir}/man1/logconv.pl.1.gz +%{_bindir}/pwdhash +%{_mandir}/man1/pwdhash.1.gz +%{_bindir}/readnsstate +%{_mandir}/man1/readnsstate.1.gz # We have to seperate this from being a glob to ensure the caps are applied. %caps(CAP_NET_BIND_SERVICE=pe) %{_sbindir}/ns-slapd +%{_mandir}/man8/ns-slapd.8.gz %if 0%{?rhel} > 7 || 0%{?fedora} %{_sbindir}/dsconf %{_sbindir}/dscreate @@ -587,48 +630,30 @@ fi %{_libexecdir}/%{pkgname}/ds_selinux_enabled %{_libexecdir}/%{pkgname}/ds_selinux_port_query %{_libexecdir}/%{pkgname}/ds_systemd_ask_password_acl -%if %{use_perl} -%{_sbindir}/monitor %{_sbindir}/bak2db -%{_sbindir}/bak2db.pl -%{_sbindir}/cleanallruv.pl +%{_mandir}/man8/bak2db.8.gz %{_sbindir}/db2bak -%{_sbindir}/db2bak.pl +%{_mandir}/man8/db2bak.8.gz %{_sbindir}/db2index -%{_sbindir}/db2index.pl +%{_mandir}/man8/db2index.8.gz %{_sbindir}/db2ldif -%{_sbindir}/db2ldif.pl -%{_sbindir}/dbmon.sh +%{_mandir}/man8/db2ldif.8.gz %{_sbindir}/dbverify -%{_sbindir}/dn2rdn -%{_sbindir}/fixup-linkedattrs.pl -%{_sbindir}/fixup-memberof.pl +%{_mandir}/man8/dbverify.8.gz %{_sbindir}/ldif2db -%{_sbindir}/ldif2db.pl -%{_sbindir}/ldif2ldap -%{_sbindir}/migrate-ds.pl -%{_sbindir}/ns-accountstatus.pl -%{_sbindir}/ns-activate.pl -%{_sbindir}/ns-inactivate.pl -%{_sbindir}/ns-newpwpolicy.pl -%{_sbindir}/remove-ds.pl +%{_mandir}/man8/ldif2db.8.gz %{_sbindir}/restart-dirsrv -%{_sbindir}/restoreconfig -%{_sbindir}/saveconfig -%{_sbindir}/schema-reload.pl -%{_sbindir}/setup-ds.pl +%{_mandir}/man8/restart-dirsrv.8.gz %{_sbindir}/start-dirsrv +%{_mandir}/man8/start-dirsrv.8.gz %{_sbindir}/status-dirsrv +%{_mandir}/man8/status-dirsrv.8.gz %{_sbindir}/stop-dirsrv -%{_sbindir}/suffix2instance -%{_sbindir}/syntax-validate.pl +%{_mandir}/man8/stop-dirsrv.8.gz %{_sbindir}/upgradedb -%{_sbindir}/upgradednformat -%{_sbindir}/usn-tombstone-cleanup.pl -%{_sbindir}/verify-db.pl +%{_mandir}/man8/upgradedb.8.gz %{_sbindir}/vlvindex -%{_libdir}/%{pkgname}/perl -%endif +%{_mandir}/man8/vlvindex.8.gz %{_libdir}/%{pkgname}/python %dir %{_libdir}/%{pkgname}/plugins %{_libdir}/%{pkgname}/plugins/*.so @@ -638,8 +663,6 @@ fi %dir %{_localstatedir}/lib/%{pkgname} %dir %{_localstatedir}/log/%{pkgname} %ghost %dir %{_localstatedir}/lock/%{pkgname} -%{_mandir}/man1/* -%{_mandir}/man8/* %exclude %{_sbindir}/ldap-agent* %exclude %{_mandir}/man1/ldap-agent.1.gz %exclude %{_unitdir}/%{pkgname}-snmp.service @@ -685,6 +708,87 @@ fi %{_libdir}/%{pkgname}/librsds.so %endif +%files legacy-tools +%defattr(-,root,root,-) +%doc LICENSE LICENSE.GPLv3+ LICENSE.openssl README.devel +%{_bindir}/infadd +%{_mandir}/man1/infadd.1.gz +%{_bindir}/ldif +%{_mandir}/man1/ldif.1.gz +%{_bindir}/migratecred +%{_mandir}/man1/migratecred.1.gz +%{_bindir}/mmldif +%{_mandir}/man1/mmldif.1.gz +%{_bindir}/rsearch +%{_mandir}/man1/rsearch.1.gz +%{_sbindir}/monitor +%{_mandir}/man8/monitor.8.gz +%{_sbindir}/dbmon.sh +%{_mandir}/man8/dbmon.sh.8.gz +%{_sbindir}/dn2rdn +%{_mandir}/man8/dn2rdn.8.gz +%{_sbindir}/restoreconfig +%{_mandir}/man8/restoreconfig.8.gz +%{_sbindir}/saveconfig +%{_mandir}/man8/saveconfig.8.gz +%{_sbindir}/stop-dirsrv +%{_sbindir}/suffix2instance +%{_mandir}/man8/suffix2instance.8.gz +%{_sbindir}/upgradednformat +%{_mandir}/man8/upgradednformat.8.gz +%if %{use_perl} +%{_datadir}/%{pkgname}/properties/*.res +%{_datadir}/%{pkgname}/script-templates +%{_datadir}/%{pkgname}/updates +%{_mandir}/man1/dbgen.pl.1.gz +%{_bindir}/repl-monitor +%{_mandir}/man1/repl-monitor.1.gz +%{_bindir}/repl-monitor.pl +%{_bindir}/cl-dump +%{_mandir}/man1/cl-dump.1.gz +%{_bindir}/cl-dump.pl +%{_bindir}/dbgen.pl +%{_mandir}/man8/bak2db.pl.8.gz +%{_sbindir}/bak2db.pl +%{_sbindir}/cleanallruv.pl +%{_mandir}/man8/cleanallruv.pl.8.gz +%{_sbindir}/db2bak.pl +%{_mandir}/man8/db2bak.pl.8.gz +%{_sbindir}/db2index.pl +%{_mandir}/man8/db2index.pl.8.gz +%{_sbindir}/db2ldif.pl +%{_mandir}/man8/db2ldif.pl.8.gz +%{_sbindir}/fixup-linkedattrs.pl +%{_mandir}/man8/fixup-linkedattrs.pl.8.gz +%{_sbindir}/fixup-memberof.pl +%{_mandir}/man8/fixup-memberof.pl.8.gz +%{_sbindir}/ldif2db.pl +%{_mandir}/man8/ldif2db.pl.8.gz +%{_sbindir}/migrate-ds.pl +%{_mandir}/man8/migrate-ds.pl.8.gz +%{_sbindir}/ns-accountstatus.pl +%{_mandir}/man8/ns-accountstatus.pl.8.gz +%{_sbindir}/ns-activate.pl +%{_mandir}/man8/ns-activate.pl.8.gz +%{_sbindir}/ns-inactivate.pl +%{_mandir}/man8/ns-inactivate.pl.8.gz +%{_sbindir}/ns-newpwpolicy.pl +%{_mandir}/man8/ns-newpwpolicy.pl.8.gz +%{_sbindir}/remove-ds.pl +%{_mandir}/man8/remove-ds.pl.8.gz +%{_sbindir}/schema-reload.pl +%{_mandir}/man8/schema-reload.pl.8.gz +%{_sbindir}/setup-ds.pl +%{_mandir}/man8/setup-ds.pl.8.gz +%{_sbindir}/syntax-validate.pl +%{_mandir}/man8/syntax-validate.pl.8.gz +%{_sbindir}/usn-tombstone-cleanup.pl +%{_mandir}/man8/usn-tombstone-cleanup.pl.8.gz +%{_sbindir}/verify-db.pl +%{_mandir}/man8/verify-db.pl.8.gz +%{_libdir}/%{pkgname}/perl +%endif + %files snmp %defattr(-,root,root,-) %doc LICENSE LICENSE.GPLv3+ LICENSE.openssl README.devel
0
73ea0816dabff27277f4b991535e2d9218a1f128
389ds/389-ds-base
Bug(s) fixed: 147585 Bug Description: The TARGET dn is normalized before the search plugins get the DN. We should provide the original DN. For all other operations we pass in the SLAPI_ORIGINAL_TARGET_DN in the pblock - we can do the same for search as well. This bug was reported by Verisign. Reviewed by: Noriko (Thanks!) Fix Description: Use the pblock param SLAPI_ORIGINAL_TARGET_DN for search operations. Platforms tested: RHEL3 Flag Day: no Doc impact: Yes. Need to document this in the SLAPI doc. QA impact: should be covered by regular nightly and manual testing New Tests integrated into TET: none
commit 73ea0816dabff27277f4b991535e2d9218a1f128 Author: Rich Megginson <[email protected]> Date: Wed Feb 9 22:09:20 2005 +0000 Bug(s) fixed: 147585 Bug Description: The TARGET dn is normalized before the search plugins get the DN. We should provide the original DN. For all other operations we pass in the SLAPI_ORIGINAL_TARGET_DN in the pblock - we can do the same for search as well. This bug was reported by Verisign. Reviewed by: Noriko (Thanks!) Fix Description: Use the pblock param SLAPI_ORIGINAL_TARGET_DN for search operations. Platforms tested: RHEL3 Flag Day: no Doc impact: Yes. Need to document this in the SLAPI doc. QA impact: should be covered by regular nightly and manual testing New Tests integrated into TET: none diff --git a/ldap/servers/slapd/plugin_internal_op.c b/ldap/servers/slapd/plugin_internal_op.c index 63a46e1bb..c73d0d798 100644 --- a/ldap/servers/slapd/plugin_internal_op.c +++ b/ldap/servers/slapd/plugin_internal_op.c @@ -458,6 +458,7 @@ slapi_free_search_results_internal(Slapi_PBlock *pb) For dn based search: SLAPI_TARGET_DN set to search base + SLAPI_ORIGINAL_TARGET_DN set to original un-normalized search base SLAPI_SEARCH_SCOPE set to search scope SLAPI_SEARCH_STRFILTER set to search filter SLAPI_CONTROLS_ARG set to request controls if present @@ -717,6 +718,7 @@ static int search_internal_callback_pb (Slapi_PBlock *pb, void *callback_data, * changed base search strings */ slapi_pblock_get(pb, SLAPI_SEARCH_TARGET, &original_base); + slapi_pblock_set(pb, SLAPI_ORIGINAL_TARGET_DN, slapi_ch_strdup(original_base)); op_shared_search (pb, 1); @@ -733,6 +735,10 @@ done: if(original_base != new_base) slapi_ch_free((void**)new_base); + /* we strdup'd this above - need to free */ + slapi_pblock_get(pb, SLAPI_ORIGINAL_TARGET_DN, &original_base); + slapi_ch_free_string(&original_base); + return(rc); } diff --git a/ldap/servers/slapd/psearch.c b/ldap/servers/slapd/psearch.c index f1df43a66..d7b894956 100644 --- a/ldap/servers/slapd/psearch.c +++ b/ldap/servers/slapd/psearch.c @@ -360,6 +360,11 @@ ps_send_results( void *arg ) slapi_pblock_set( ps->ps_pblock, SLAPI_SEARCH_TARGET, NULL ); slapi_ch_free_string(&base); + /* we strdup'd this in search.c - need to free */ + slapi_pblock_get( ps->ps_pblock, SLAPI_ORIGINAL_TARGET_DN, &base ); + slapi_pblock_set( ps->ps_pblock, SLAPI_ORIGINAL_TARGET_DN, NULL ); + slapi_ch_free_string(&base); + slapi_pblock_get( ps->ps_pblock, SLAPI_SEARCH_STRFILTER, &fstr ); slapi_pblock_set( ps->ps_pblock, SLAPI_SEARCH_STRFILTER, NULL ); slapi_ch_free_string(&fstr); diff --git a/ldap/servers/slapd/search.c b/ldap/servers/slapd/search.c index 471d3157c..c82101589 100644 --- a/ldap/servers/slapd/search.c +++ b/ldap/servers/slapd/search.c @@ -224,6 +224,7 @@ do_search( Slapi_PBlock *pb ) } slapi_pblock_set( pb, SLAPI_SEARCH_TARGET, base ); + slapi_pblock_set( pb, SLAPI_ORIGINAL_TARGET_DN, slapi_ch_strdup(base) ); slapi_pblock_set( pb, SLAPI_SEARCH_SCOPE, &scope ); slapi_pblock_set( pb, SLAPI_SEARCH_DEREF, &deref ); slapi_pblock_set( pb, SLAPI_SEARCH_FILTER, filter ); @@ -269,6 +270,9 @@ free_and_return:; if (psearch){ operation->o_flags &= ~OP_FLAG_PS; } + /* we strdup'd this above - need to free */ + slapi_pblock_get(pb, SLAPI_ORIGINAL_TARGET_DN, &base); + slapi_ch_free_string(&base); } }
0
8807440b4954fc810c50f67a10e1b928851b926c
389ds/389-ds-base
Ticket 49576 - ds-replcheck: fix certificate directory verification Description: The tool would crash if you attempted to use a certificate directory for conntacting replicas. https://pagure.io/389-ds-base/issue/49576 Reviewed by: ?
commit 8807440b4954fc810c50f67a10e1b928851b926c Author: Mark Reynolds <[email protected]> Date: Mon Jun 11 11:52:57 2018 -0400 Ticket 49576 - ds-replcheck: fix certificate directory verification Description: The tool would crash if you attempted to use a certificate directory for conntacting replicas. https://pagure.io/389-ds-base/issue/49576 Reviewed by: ? diff --git a/ldap/admin/src/scripts/ds-replcheck b/ldap/admin/src/scripts/ds-replcheck index 661c9e0ce..62f911034 100755 --- a/ldap/admin/src/scripts/ds-replcheck +++ b/ldap/admin/src/scripts/ds-replcheck @@ -1225,7 +1225,7 @@ def main(): # Validate certdir opts['certdir'] = None if args.certdir: - if os.path.exists() and os.path.isdir(certdir): + if os.path.exists(args.certdir) and os.path.isdir(args.certdir): opts['certdir'] = args.certdir else: print("certificate directory ({}) does not exist or is not a directory".format(args.certdir))
0
b408ffcd5f101c73b6045eb72a5bef076071aea6
389ds/389-ds-base
Ticket 48145 - Allow merged logging of audit events Bug Description: The auditfail logging should be able to be directed to the same audit file, or to it's own seperate file. Fix Description: When nsslapd-auditfaillog is not specified the value of nsslapd-auditlog will be used for audit and auditfail events. If auditfaillog is specified, all results with RC != LDAP_SUCCESS (0) will go to the auditfail handler. https://fedorahosted.org/389/ticket/48145 Author: wibrown Review by: mreynolds (Thanks!)
commit b408ffcd5f101c73b6045eb72a5bef076071aea6 Author: William Brown <[email protected]> Date: Tue Nov 24 07:58:38 2015 +1000 Ticket 48145 - Allow merged logging of audit events Bug Description: The auditfail logging should be able to be directed to the same audit file, or to it's own seperate file. Fix Description: When nsslapd-auditfaillog is not specified the value of nsslapd-auditlog will be used for audit and auditfail events. If auditfaillog is specified, all results with RC != LDAP_SUCCESS (0) will go to the auditfail handler. https://fedorahosted.org/389/ticket/48145 Author: wibrown Review by: mreynolds (Thanks!) diff --git a/ldap/ldif/template-dse.ldif.in b/ldap/ldif/template-dse.ldif.in index a25295b7b..1ec80099a 100644 --- a/ldap/ldif/template-dse.ldif.in +++ b/ldap/ldif/template-dse.ldif.in @@ -52,11 +52,8 @@ nsslapd-auditlog-mode: 600 nsslapd-auditlog-maxlogsize: 100 nsslapd-auditlog-logrotationtime: 1 nsslapd-auditlog-logrotationtimeunit: day -nsslapd-auditfaillog: %log_dir%/auditfail -nsslapd-auditfaillog-mode: 600 -nsslapd-auditfaillog-maxlogsize: 100 -nsslapd-auditfaillog-logrotationtime: 1 -nsslapd-auditfaillog-logrotationtimeunit: day +nsslapd-auditlog-logging-enabled: off +nsslapd-auditfaillog-logging-enabled: off nsslapd-rootdn: %rootdn% nsslapd-rootpw: %ds_passwd% nsslapd-maxdescriptors: 1024 diff --git a/ldap/servers/slapd/auditlog.c b/ldap/servers/slapd/auditlog.c index 2ddfad078..45ef16e15 100644 --- a/ldap/servers/slapd/auditlog.c +++ b/ldap/servers/slapd/auditlog.c @@ -78,7 +78,7 @@ write_audit_log_entry( Slapi_PBlock *pb ) curtime = current_time(); /* log the raw, unnormalized DN */ dn = slapi_sdn_get_udn(sdn); - write_audit_file(SLAPD_AUDIT_LOG, operation_get_type(op), dn, change, flag, curtime, 0); + write_audit_file(SLAPD_AUDIT_LOG, operation_get_type(op), dn, change, flag, curtime, LDAP_SUCCESS); } void @@ -129,8 +129,13 @@ write_auditfail_log_entry( Slapi_PBlock *pb ) curtime = current_time(); /* log the raw, unnormalized DN */ dn = slapi_sdn_get_udn(sdn); - /* If we are combined */ - write_audit_file(SLAPD_AUDITFAIL_LOG, operation_get_type(op), dn, change, flag, curtime, pbrc); + if (config_get_auditfaillog() == NULL || strlen(config_get_auditfaillog()) == 0) { + /* If no auditfail log write to audit log */ + write_audit_file(SLAPD_AUDIT_LOG, operation_get_type(op), dn, change, flag, curtime, pbrc); + } else { + /* If we have our own auditfail log path */ + write_audit_file(SLAPD_AUDITFAIL_LOG, operation_get_type(op), dn, change, flag, curtime, pbrc); + } }
0
3e5e21c68afc5ff38d0d843fafaddd145e4d38f5
389ds/389-ds-base
bump version to 1.2.6.a2
commit 3e5e21c68afc5ff38d0d843fafaddd145e4d38f5 Author: Rich Megginson <[email protected]> Date: Mon Feb 8 08:53:44 2010 -0700 bump version to 1.2.6.a2 diff --git a/VERSION.sh b/VERSION.sh index 051b14bfc..00cdcdf86 100644 --- a/VERSION.sh +++ b/VERSION.sh @@ -14,7 +14,7 @@ VERSION_MAINT=6 # if this is a PRERELEASE, set VERSION_PREREL # otherwise, comment it out # be sure to include the dot prefix in the prerel -VERSION_PREREL=.a1 +VERSION_PREREL=.a2 # NOTES on VERSION_PREREL # use aN for an alpha release e.g. a1, a2, etc. # use rcN for a release candidate e.g. rc1, rc2, etc.
0
44c2d8f572229c386f57265b39413b43ad7a49aa
389ds/389-ds-base
Ticket 49520 - Cockpit UI - Add database chaining HTML Description: Add chaining pages/forms https://pagure.io/389-ds-base/issue/49520 Reviewed by: ?
commit 44c2d8f572229c386f57265b39413b43ad7a49aa Author: Mark Reynolds <[email protected]> Date: Tue Jan 9 16:13:51 2018 -0500 Ticket 49520 - Cockpit UI - Add database chaining HTML Description: Add chaining pages/forms https://pagure.io/389-ds-base/issue/49520 Reviewed by: ? diff --git a/Makefile.am b/Makefile.am index 4e76c1bbe..82b178263 100644 --- a/Makefile.am +++ b/Makefile.am @@ -290,6 +290,7 @@ gdbautoloaddir = $(prefixdir)/share/gdb/auto-load$(sbindir) cockpitdir = $(datadir)@cockpitdir@ cockpitstaticdir = $(datadir)@cockpitstaticdir@ cockpitimagesdir = $(datadir)@cockpitimagesdir@ +cockpitstaticimagesdir = $(datadir)@cockpitstaticimagesdir@ cockpitjsdir = $(datadir)@cockpitjsdir@/ cockpitcssdir = $(datadir)@cockpitcssdir@ @@ -900,13 +901,16 @@ cockpitstatic_DATA = src/cockpit/389-console/static/32px.png \ src/cockpit/389-console/static/style.min.css \ src/cockpit/389-console/static/throbber.gif -cockpitimages_DATA = src/cockpit/389-console/static/images/ui-icons_444444_256x240.png \ +cockpitstaticimages_DATA = src/cockpit/389-console/static/images/ui-icons_444444_256x240.png \ src/cockpit/389-console/static/images/ui-icons_777620_256x240.png \ src/cockpit/389-console/static/images/ui-icons_cc0000_256x240.png \ src/cockpit/389-console/static/images/ui-icons_555555_256x240.png \ src/cockpit/389-console/static/images/ui-icons_777777_256x240.png \ src/cockpit/389-console/static/images/ui-icons_ffffff_256x240.png +cockpitimages_DATA = src/cockpit/389-console/images/sort_asc.png \ + src/cockpit/389-console/images/sort_both.png + cockpitjs_DATA = src/cockpit/389-console/js/backend.js \ src/cockpit/389-console/js/ds.js \ src/cockpit/389-console/js/monitor.js \ diff --git a/configure.ac b/configure.ac index 46f13b9a2..981f8e21f 100644 --- a/configure.ac +++ b/configure.ac @@ -379,7 +379,8 @@ if test "$with_fhs_opt" = "yes"; then # relative to datadir cockpitdir=/389-console cockpitstaticdir=/389-console/static - cockpitimagesdir=/389-console/static/images + cockpitimagesdir=/389-console/images + cockpitstaticimagesdir=/389-console/static/images cockpitjsdir=/389-console/js cockpitcssdir=/389-console/css else @@ -421,7 +422,8 @@ else # relative to datadir cockpitdir=/$PACKAGE_NAME/389-console cockpitstaticdir=/$PACKAGE_NAME/389-console/static - cockpitimagesdir=/$PACKAGE_NAME/389-console/static/images + cockpitimagesdir=/$PACKAGE_NAME/389-console/images + cockpitstaticimagesdir=/$PACKAGE_NAME/389-console/static/images cockpitjsdir=/$PACKAGE_NAME/389-console/js cockpitcssdir=/$PACKAGE_NAME/389-console/css fi @@ -514,6 +516,7 @@ AC_SUBST(updatedir) AC_SUBST(cockpitdir) AC_SUBST(cockpitstaticdir) AC_SUBST(cockpitimagesdir) +AC_SUBST(cockpitstaticimagesdir) AC_SUBST(cockpitjsdir) AC_SUBST(cockpitcssdir) AC_SUBST(defaultuser) diff --git a/src/cockpit/389-console/backend.html b/src/cockpit/389-console/backend.html index 25158f11a..7b48f97fd 100644 --- a/src/cockpit/389-console/backend.html +++ b/src/cockpit/389-console/backend.html @@ -3,12 +3,15 @@ <li title="cn=config,cn=ldbm database,cn=plugins,cn=config " id="root" class="jstree-open ds-treenode" data-jstree='{"icon":"glyphicon glyphicon-tree-deciduous", "opened":true, "selected":true}'>Database <ul> - <li title="suffix" id="dc=example,dc=com" data-jstree='{"icon":"glyphicon glyphicon-tree-conifer"}'>dc=example,dc=com + <li title="suffix" id="suffix-dc=example,dc=com" data-jstree='{"icon":"glyphicon glyphicon-tree-conifer"}'>dc=example,dc=com <ul> - <li title="sub suffix" id="ou=people,dc=example,dc=com" data-jstree='{"icon":"glyphicon glyphicon-leaf"}'>ou=people,dc=example,dc=com</li> + <li title="sub suffix" id="suffix-ou=people-dc=example,dc=com" data-jstree='{"icon":"glyphicon glyphicon-leaf"}'>ou=people,dc=example,dc=com</li> </ul> </li> - <li title="suffix" id="o=ipaca" data-jstree='{"icon":"glyphicon glyphicon-tree-conifer"}'>o=ipaca.com + <li title="suffix" id="suffix-o=ipaca.com" data-jstree='{"icon":"glyphicon glyphicon-tree-conifer"}'>o=ipaca.com + <ul> + <li title="chained suffix" id="dblink-o=ipaca.com" data-jstree='{"icon":"glyphicon glyphicon-link"}'>MyChainingDBLink</li> + </ul> </li> </ul> </li> @@ -47,7 +50,7 @@ </div> <p></p> - <input type="button" class="accordion cache-accordion ds-agmt-wiz-button ds-accordion-spacing" id="cache-accordion" value="Cache Settings &#9660;"/> + <input type="button" class="accordion cache-accordion ds-accordion-button ds-accordion-spacing" id="cache-accordion" value="Cache Settings &#9660;"/> <div class="ds-accordion-panel"> <div class="ds-container"> <div class="ds-split"> @@ -98,23 +101,23 @@ <p></p> </div> - <input type="button" class="accordion db-accordion ds-agmt-wiz-button ds-accordion-spacing" id="db-accordion" value="Advanced Database Settings &#9660;"/> + <input type="button" class="accordion db-accordion ds-accordion-button ds-accordion-spacing" id="db-accordion" value="Advanced Database Settings &#9660;"/> <div class="ds-accordion-panel"> <div class="ds-container"> <div> - <label for="nsslapd-db-logdirectory" class="ds-config-label" title= + <label for="nsslapd-db-logdirectory" class="ds-config-label ds-cache-label" title= "Database Transaction Log Location (nsslapd-db-logdirectory)."><b >Transaction Logs Directory</b></label><input class="ds-input" type="text" id="nsslapd-db-logdirectory" size="15"/> - <label for="nsslapd-db-home-directory" class="ds-config-label" title= + <label for="nsslapd-db-home-directory" class="ds-config-label ds-cache-label" title= "Location for database memory mapped files. You must specify a subdirectory of a tempfs type filesystem (nsslapd-db-home-directory)."><b >Database Home Directory</b></label><input class="ds-input" type="text" id="nsslapd-db-home-directory" size="15"/> - <label for="nsslapd-db-locks" class="ds-config-label" title= + <label for="nsslapd-db-locks" class="ds-config-label ds-cache-label" title= "The number of database locks (nsslapd-db-locks)."><b >Database Locks</b></label><input class="ds-input" type="text" id="nsslapd-db-locks" size="15"/> - <label for="nsslapd-db-checkpoint-interval" class="ds-config-label" title= + <label for="nsslapd-db-checkpoint-interval" class="ds-config-label ds-cache-label" title= "Amount of time in seconds after which the Directory Server sends a checkpoint entry to the database transaction log (nsslapd-db-checkpoint-interval)."><b >Database Checkpoint Interval</b></label><input class="ds-input" type="text" id="nsslapd-db-checkpoint-interval" size="15"/> - <label for="nsslapd-db-checkpoint-interval" class="ds-config-label" title= + <label for="nsslapd-db-checkpoint-interval" class="ds-config-label ds-cache-label" title= "The interval in seconds when the database is compacted (nsslapd-db-compactdb-interval)."><b >Database Compact Interval</b></label><input class="ds-input" type="text" id="nsslapd-db-checkpoint-interval" size="15"/> <p></p> @@ -131,18 +134,211 @@ "sets whether database transaction log entries are immediately written to the disk. (nsslapd-db-durable-transactions)."> Enable Durable transactions</label> <p></p> </div> + <p></p><br> </div> + <p></p> </div> + + <!-- Database Chaining --> + <input type="button" class="accordion suffix-accordion ds-accordion-button ds-accordion-spacing" id="suffix-chaining-accordion" value="Database Chaining Settings &#9660;"/> + <div class="ds-accordion-panel"> + <div class="ds-accordian-div"> + <h3>Database Link Settings</h3> + <hr class="ds-hr"> + <p></p> + <div class="ds-container"> + <div class="ds-chaining-split"> + <form> + <label class="ds-config-label" for "chaining-oid-list" title= + "A list of LDAP control OIDs to be forwarded through chaining"><b>Forwarded LDAP Controls</b></label> + <select id="chaining-oid-list" class="ds-chaining-list" name="nstransmittedcontrols" multiple> + <option value="1.5.6.5.5.6.88.4553.344">1.5.6.5.5.6.88.4553.344</option> + </select> + </form> + <div class="clearfix ds-container"> + <div class="ds-panel-left"> + <button type="button" id="chaining-oid-button" class="ds-button-left">Add</button> + </div> + <div class="ds-panel-right"> + <button type="button" id="delete-chaining-oid-button" class="ds-button-right">Delete</button> + </div> + </div> + </div> + <div class="ds-chaining-divider"></div> + <div class="ds-chaining-split"> + <form> + <label class="ds-config-label" for "chaining-comp-list" title= + "A list of components to go through chaining"><b>Components to Chain</b></label> + <select id="chaining-comp-list" class="ds-chaining-list" name="nsactivechainingcomponents" multiple> + <option value="1.5.6.5.5.6.88.4553.344">cn=roles,cn=components,cn=config</option> + </select> + </form> + <div class="clearfix ds-container"> + <div class="ds-panel-left"> + <button type="button" id="chaining-comp-button" class="ds-button-left">Add</button> + </div> + <div class="ds-panel-right"> + <button type="button" id="delete-chaining-comp-button" class="ds-button-right">Delete</button> + </div> + </div> + </div> + </div> + </div> + <p></p> + + <div class="ds-accordian-div"> + <h3>Default Database Link Creation Settings</h3> + <hr class="ds-hr"> + <div class="ds-container"> + <div class="ds-split"> + <label for="chaining-size-limit" class="ds-config-label" title= + "The size limit of entries returned over a database link (nsslapd-sizelimit)."><b + >Size Limit</b></label><input class="ds-input" type="text" id="chaining-size-limit" size="15"/> + <label for="chaining-time-limit" class="ds-config-label" title= + "The time limit of an operation over a database link (nsslapd-timelimit)."><b + >Time Limit</b></label><input class="ds-input" type="text" id="chaining-time-limit" size="15"/> + <label for="nsbindconnectionslimit" class="ds-config-label" title= + "The maximum number of TCP connections the database link establishes with the remote server. (nsbindconnectionslimit)."><b + >Max TCP Connections</b></label><input class="ds-input" type="text" id="nsbindconnectionslimit" size="15"/> + <label for="nsoperationconnectionslimit" class="ds-config-label" title= + "The maximum number of connections allowed over the database link. (nsoperationconnectionslimit)."><b + >Max LDAP Connections</b></label><input class="ds-input" type="text" id="nsoperationconnectionslimit" size="15"/> + <label for="nsconcurrentbindlimit" class="ds-config-label" title= + "The maximum number of concurrent bind operations per TCP connection. (nsconcurrentbindlimit)."><b + >Max Binds Per Connection</b></label><input class="ds-input" type="text" id="nsconcurrentbindlimit" size="15"/> + <label for="nsbindtimeout" class="ds-config-label" title= + "The amount of time before the bind attempt times out. (nsbindtimeout)."><b + >Bind Timeout</b></label><input class="ds-input" type="text" id="nsbindtimeout" size="15"/> + </div> + <div class="ds-divider"></div> + <div class="ds-split"> + <label for="nsbindretrylimit" class="ds-config-label" title= + "The number of times the database link tries to bind with the remote server after a connection failure. (nsbindretrylimit)."><b + >Bind Retry Limit</b></label><input class="ds-input" type="text" id="nsbindretrylimit" size="15"/> + <label for="nsconcurrentoperationslimit" class="ds-config-label" title= + "The maximum number of operations per connections. (nsconcurrentoperationslimit)."><b + >Max Operations Per Connection</b></label><input class="ds-input" type="text" id="nsconcurrentoperationslimit" size="15"/> + <label for="nsconnectionlife" class="ds-config-label" title= + "The life of a database link connection to the remote server. 0 is unlimited (nsconnectionlife)."><b + >Connection Lifetime (in seconds)</b></label><input class="ds-input" type="text" id="nsconnectionlife" size="15"/> + <label for="nsabandonedsearchcheckinterval" class="ds-config-label" title= + "The number of seconds that pass before the server checks for abandoned operations. (nsabandonedsearchcheckinterval)."><b + >Abandoned Op Check Interval</b></label><input class="ds-input" type="text" id="nsabandonedsearchcheckinterval" size="15"/> + <label for="nshoplimit" class="ds-config-label" title= + "The maximum number of times a request can be forwarded from one database link to another. (nshoplimit)."><b + >Database Link Hop Limit</b></label><input class="ds-input" type="text" id="nshoplimit" size="15"/> + <p></p> + <input type="checkbox" class="ds-config-checkbox" id="nschecklocalaci"><label + for="nschecklocalaci" class="ds-label" title= + "Sets whether ACIs are evaluated on the database link as well as the remote data server (nschecklocalaci)."> Check Local ACIs</label> + <input type="checkbox" class="ds-config-checkbox" id="nsreferralonscopedsearch"><label + for="nsreferralonscopedsearch" class="ds-label" title= + "Sets whether referrals are returned by scoped searches (meaning 'one-level' or 'subtree' scoped searches). (nsreferralonscopedsearch)."> Send Referral On Scoped Search</label> + </div> + </div> + </div> + </div> + <p></p> + <hr class="ds-hr"> <div class="ds-button-border"> <button id="server-config-save-btn" class="btn btn-default ds-button">Save</button> </div> </div> + + <!-- Database Link Page --> + <div id="chaining" hidden> + <h2 id="chaining-header">Database Chaining Configuration</h2> + <hr class="ds-hr"> + <div class="ds-container"> + <div> + <label for="nsfarmserverurl" class="ds-config-label" title= + "The URL for the remote server. Add additional failure servers URLs separated by a space. (nsfarmserverurl)"><b + >Remote Server URL(s)</b></label><input class="ds-dblink-input" type="text" id="nsfarmserverurl"/> + <label for="nsmultiplexorbinddn" class="ds-config-label" title="Bind DN used to authenticate against the remote server (nsmultiplexorbinddn)."><b>Remote Server Bind DN</b></label><input + class="ds-dblink-input" type="text" placeholder="Bind DN" id="nsmultiplexorbinddn" name="name"> + <label for="nsmultiplexorcredentials" class="ds-config-label" title="Replication Bind DN (nsDS5ReplicaCredentials)."><b>Bind DN Credentials</b></label><input + class="ds-dblink-input" type="password" placeholder="Enter password" id="nsmultiplexorcredentials" name="name"> + <label for="nsmultiplexorcredentials-confirm" class="ds-config-label" title="Confirm password"><b>Confirm Password</b></label><input + class="ds-dblink-input" type="password" placeholder="Confirm password" id="nsmultiplexorcredentials-confirm" name="name"> + <label for="dblink-conn" class="ds-config-label" title="The connection protocol for the remote server."><b>Connection Protocol</b></label><select + class="btn btn-default dropdown ds-dblink-dropdown" id="dblink-conn"> + <option>LDAP</option> + <option>LDAPS</option> + <option>Start TLS</option> + </select> + <label for="nsbindmechanism" class="ds-config-label" title="The bind method for contacting the remote server (nsbindmechanism)."><b>Bind Method</b></label><select + class="btn btn-default dropdown ds-dblink-dropdown" id="nsbindmechanism"> + <option>Simple</option> + <option>SASL/DIGEST-MD5</option> + <option>SASL/GSSAPI</option> + </select> + </div> + </div> + <p></p> + + <input type="button" class="accordion chaining-adv-accordion ds-accordion-button ds-accordion-spacing" id="chaining-adv-accordion" value="Advanced Database Link Settings &#9660;"/> + <div class="ds-accordion-panel"> + <div class="ds-accordian-div"> + <div class="ds-container"> + <div class="ds-split"> + <label for="chaining-size-limit" class="ds-config-label" title= + "The size limit of entries returned over a database link (nsslapd-sizelimit)."><b + >Size Limit</b></label><input class="ds-input" type="text" id="chaining-size-limit" size="15"/> + <label for="chaining-time-limit" class="ds-config-label" title= + "The time limit of an operation over a database link (nsslapd-timelimit)."><b + >Time Limit</b></label><input class="ds-input" type="text" id="chaining-time-limit" size="15"/> + <label for="nsbindconnectionslimit" class="ds-config-label" title= + "The maximum number of TCP connections the database link establishes with the remote server. (nsbindconnectionslimit)."><b + >Max TCP Connections</b></label><input class="ds-input" type="text" id="nsbindconnectionslimit" size="15"/> + <label for="nsoperationconnectionslimit" class="ds-config-label" title= + "The maximum number of connections allowed over the database link. (nsoperationconnectionslimit)."><b + >Max LDAP Connections</b></label><input class="ds-input" type="text" id="nsoperationconnectionslimit" size="15"/> + <label for="nsconcurrentbindlimit" class="ds-config-label" title= + "The maximum number of concurrent bind operations per TCP connection. (nsconcurrentbindlimit)."><b + >Max Binds Per Connection</b></label><input class="ds-input" type="text" id="nsconcurrentbindlimit" size="15"/> + <label for="nsbindtimeout" class="ds-config-label" title= + "The amount of time before the bind attempt times out. (nsbindtimeout)."><b + >Bind Timeout</b></label><input class="ds-input" type="text" id="nsbindtimeout" size="15"/> + </div> + <div class="ds-divider"></div> + <div class="ds-split"> + <label for="nsbindretrylimit" class="ds-config-label" title= + "The number of times the database link tries to bind with the remote server after a connection failure. (nsbindretrylimit)."><b + >Bind Retry Limit</b></label><input class="ds-input" type="text" id="nsbindretrylimit" size="15"/> + <label for="nsconcurrentoperationslimit" class="ds-config-label" title= + "The maximum number of operations per connections. (nsconcurrentoperationslimit)."><b + >Max Operations Per Connection</b></label><input class="ds-input" type="text" id="nsconcurrentoperationslimit" size="15"/> + <label for="nsconnectionlife" class="ds-config-label" title= + "The life of a database link connection to the remote server. 0 is unlimited (nsconnectionlife)."><b + >Connection Lifetime (in seconds)</b></label><input class="ds-input" type="text" id="nsconnectionlife" size="15"/> + <label for="nsabandonedsearchcheckinterval" class="ds-config-label" title= + "The number of seconds that pass before the server checks for abandoned operations. (nsabandonedsearchcheckinterval)."><b + >Abandoned Op Check Interval</b></label><input class="ds-input" type="text" id="nsabandonedsearchcheckinterval" size="15"/> + <label for="nshoplimit" class="ds-config-label" title= + "The maximum number of times a request can be forwarded from one database link to another. (nshoplimit)."><b + >Database Link Hop Limit</b></label><input class="ds-input" type="text" id="nshoplimit" size="15"/> + <p></p> + <input type="checkbox" class="ds-config-checkbox" id="nschecklocalaci"><label + for="nschecklocalaci" class="ds-label" title= + "Sets whether ACIs are evaluated on the database link as well as the remote data server (nschecklocalaci)."> Check Local ACIs</label> + <input type="checkbox" class="ds-config-checkbox" id="nsreferralonscopedsearch"><label + for="nsreferralonscopedsearch" class="ds-label" title= + "Sets whether referrals are returned by scoped searches (meaning 'one-level' or 'subtree' scoped searches). (nsreferralonscopedsearch)."> Send Referral On Scoped Search</label> + </div> + </div> + </div> + </div> + <p></p> + </div> + + + <!-- Suffix Configuration Page --> <div id="suffix" hidden=> <h2 id="suffix-header">Suffix Configuration</h2> <hr class="ds-hr"> - <div id="db" class="ds-container"> + <div class="ds-container"> <div> <label for="nsslapd-cachememsize" class="ds-config-label" title= "The size for the available memory space for the entry cache (nsslapd-cachememsize)."><b @@ -186,7 +382,7 @@ </div> <hr class="ds-hr"> - <input type="button" class="accordion ds-agmt-wiz-button ds-accordion-spacing suffix-accordion" id="suffix-index-accordion" value="Database Indexes &#9660;"/> + <input type="button" class="accordion ds-accordion-button ds-accordion-spacing suffix-accordion" id="suffix-index-accordion" value="Database Indexes &#9660;"/> <div class="ds-accordion-panel"> <h2>System Indexes</h2> <hr class="ds-hr"> @@ -286,7 +482,7 @@ <hr class="ds-hr"> </div> - <input type="button" class="accordion suffix-accordion ds-agmt-wiz-button ds-accordion-spacing" id="suffix-attrencrypt-accordion" value="Attribute Encryption &#9660;"/> + <input type="button" class="accordion suffix-accordion ds-accordion-button ds-accordion-spacing" id="suffix-attrencrypt-accordion" value="Attribute Encryption &#9660;"/> <div class="ds-accordion-panel"> <table id="attr-encrypt-table" class="display ds-repl-table" cellspacing="0" width="100%"> <thead> @@ -308,23 +504,223 @@ <p></p> <hr class="ds-hr"> </div> + </div> - <input type="button" class="accordion suffix-accordion ds-agmt-wiz-button ds-accordion-spacing" id="suffix-chaining-accordion" value="Database Chaining &#9660;"/> - <div class="ds-accordion-panel"> - <h3>Default Chaining Configuration Settings</h3> - <div id="db" class="ds-container"> - <div> - <label for="nsslapd-ZZZZZZZ" class="ds-config-label" title= - "HOVER TEXT."><b - >Some Chaining Setting</b></label><input class="ds-input" type="text" id="nsslapd-ZZZZZZZ" size="15"/> + <!-- Modals/Popups/Wizards --> + + <div id="chaining-oids-form" class="modal"> + <form class="modal-content animate"> + <div class="container"> + <h2 id=""><b>Select Forwarded Chaining Controls</b> <span class="close" id="chain-oid-close">&times;</span></h2> + <hr class="ds-hr"> + <p></p> + <form> + <label class="ds-config-label" for "avail-chaining-oid-list" title= + "A list of LDAP control OIDs to be forwarded through chaining"><b>Available LDAP Controls</b></label> + <select + id="avail-chaining-oid-list" class="ds-chaining-form-list" name="availcontrols" multiple> + <option value="1.5.6.5.5.6.88.4553.344">1.5.6.5.5.6.88.4553.344</option> + <option value="1.5.6.5.5.6.88.4553.344">1.5.6.5.5.6.88.4553.345</option> + <option value="1.5.6.5.5.6.88.4553.344">1.5.6.5.5.6.88.4553.346</option> + <option value="1.5.6.5.5.6.88.4553.344">1.5.6.5.5.6.88.4553.347</option> + <option value="1.5.6.5.5.6.88.4553.344">1.5.6.5.5.6.88.4553.348</option> + <option value="1.5.6.5.5.6.88.4553.344">1.5.6.5.5.6.88.4553.349</option> + <option value="1.5.6.5.5.6.88.4553.344">1.5.6.5.5.6.88.4553.340</option> + <option value="1.5.6.5.5.6.88.4553.344">1.5.6.5.5.6.88.4553.351</option> + </select> + </form> + <p></p> + <hr class="ds-hr"> + <div class="clearfix ds-container"> + <div class="ds-panel-left"> + <button type="button" id="chaining-oid-cancel" class="ds-button-left">Cancel</button> + </div> + <div class="ds-panel-right"> + <button type="submit" id="chaining-oid-save" class="ds-button-right">Add Controls</button> + </div> </div> - <div class="ds-divider"></div> - <div class="ds-divider"></div> - <div class="ds-split"> + </div> + </form> + </div> + + <div id="chaining-comp-form" class="modal"> + <form class="modal-content animate"> + <div class="container"> + <h2 id=""><b>Select Chaining Components</b> <span class="close" id="chain-comp-close">&times;</span></h2> + <hr class="ds-hr"> + <p></p> + <form> + <label class="ds-config-label" for "chaining-comp-list" title= + "A list of components to work with chaining"><b>Available LDAP Controls</b></label> + <select + id="chaining-comp-list" class="ds-chaining-form-list" name="availcomps" multiple> + <option value="cn=roles,cn=compnments,cn=config">cn=roles,cn=components,cn=config</option> + <option value="cn=Password Policy,cn=components,cn=config">cn=Password Policy,cn=components,cn=config</option> + </select> + </form> + <p></p> + <hr class="ds-hr"> + <div class="clearfix ds-container"> + <div class="ds-panel-left"> + <button type="button" id="chaining-comp-cancel" class="ds-button-left">Cancel</button> + </div> + <div class="ds-panel-right"> + <button type="submit" id="chaining-comp-save" class="ds-button-right">Add Components</button> + </div> </div> - <hr class="ds-hr"> - </div> + </div> + </form> + </div> + + <div id="create-db-link-form" class="modal"> + <form class="modal-content animate"> + <div class="container"> + <h2 id=""><b>Create Database Link</b> <span class="close" id="create-chain-close">&times;</span></h2> + <hr class="ds-hr"> + <div class="ds-container"> + <div> + <label for="nsfarmserverurl" class="ds-config-label" title= + "The URL for the remote server. Add additional failure servers URLs separated by a space. (nsfarmserverurl)"><b + >Remote Server URL(s)</b></label><input class="ds-dblink-form-input" type="text" id="nsfarmserverurl"/> + <label for="nsmultiplexorbinddn" class="ds-config-label" title="Bind DN used to authenticate against the remote server (nsmultiplexorbinddn)."><b>Remote Server Bind DN</b></label><input + class="ds-dblink-form-input" type="text" placeholder="Bind DN" id="nsmultiplexorbinddn" name="name"> + <label for="nsmultiplexorcredentials" class="ds-config-label" title="Replication Bind DN (nsDS5ReplicaCredentials)."><b>Bind DN Credentials</b></label><input + class="ds-dblink-form-input" type="password" placeholder="Enter password" id="nsmultiplexorcredentials" name="name"> + <label for="nsmultiplexorcredentials-confirm" class="ds-config-label" title="Confirm password"><b>Confirm Password</b></label><input + class="ds-dblink-form-input" type="password" placeholder="Confirm password" id="nsmultiplexorcredentials-confirm" name="name"> + <label for="dblink-conn" class="ds-config-label" title="The connection protocol for the remote server."><b>Connection Protocol</b></label><select + class="btn btn-default dropdown ds-dblink-dropdown" id="dblink-conn"> + <option>LDAP</option> + <option>LDAPS</option> + <option>Start TLS</option> + </select> + <label for="nsbindmechanism" class="ds-config-label" title="The bind method for contacting the remote server (nsbindmechanism)."><b>Bind Method</b></label><select + class="btn btn-default dropdown ds-dblink-dropdown" id="nsbindmechanism"> + <option>Simple</option> + <option>SASL/DIGEST-MD5</option> + <option>SASL/GSSAPI</option> + </select> + </div> + </div> + <p></p> + <hr class="ds-hr"> + <div class="clearfix ds-container"> + <div class="ds-panel-left"> + <button type="button" id="chaining-cancel" class="ds-button-left">Cancel</button> + </div> + <div class="ds-panel-right"> + <button type="submit" id="chaining-save" class="ds-button-right">Create Link</button> + </div> + </div> + </div> + </form> + </div> + + + + + + + <div id="create-index" class="modal"> + <form class="modal-content animate"> + <div class="container"> + <h2 id=""><b>Create Index</b> <span class="close" id="create-index-close">&times;</span></h2> + <hr class="ds-hr"> + + <p></p> + <hr class="ds-hr"> + <div class="clearfix ds-container"> + <div class="ds-panel-left"> + <button type="button" id="create-index-cancel" class="ds-button-left">Cancel</button> + </div> + <div class="ds-panel-right"> + <button type="submit" id="create-index-save" class="ds-button-right">Create Index</button> + </div> + </div> + </div> + </form> + </div> + <div id="edit-index" class="modal"> + <form class="modal-content animate"> + <div class="container"> + <h2 id=""><b>Edit Index</b> <span class="close" id="edit-index-close">&times;</span></h2> + <hr class="ds-hr"> + + <p></p> + <hr class="ds-hr"> + <div class="clearfix ds-container"> + <div class="ds-panel-left"> + <button type="button" id="edit-index-cancel" class="ds-button-left">Cancel</button> + </div> + <div class="ds-panel-right"> + <button type="submit" id="edit-index-save" class="ds-button-right">Save</button> + </div> + </div> + </div> + </form> + </div> + + <div id="add-encrypted-attr" class="modal"> + <form class="modal-content animate"> + <div class="container"> + <h2 id=""><b>Add Encrypted Attribute</b> <span class="close" id="encrypted-attr-close">&times;</span></h2> + <hr class="ds-hr"> + + <p></p> + <hr class="ds-hr"> + <div class="clearfix ds-container"> + <div class="ds-panel-left"> + <button type="button" id="add-encrypted-cancel" class="ds-button-left">Cancel</button> + </div> + <div class="ds-panel-right"> + <button type="submit" id="add-encrypted-save" class="ds-button-right">Add Attribute</button> + </div> + </div> + </div> + </form> + </div> + + <div id="create-suffix" class="modal"> + <form class="modal-content animate"> + <div class="container"> + <h2 id=""><b>Create Suffix</b> <span class="close" id="create-suffix-close">&times;</span></h2> + <hr class="ds-hr"> + + <p></p> + <hr class="ds-hr"> + <div class="clearfix ds-container"> + <div class="ds-panel-left"> + <button type="button" id="add-encrypted-cancel" class="ds-button-left">Cancel</button> + </div> + <div class="ds-panel-right"> + <button type="submit" id="add-encrypted-save" class="ds-button-right">Create Suffix</button> + </div> + </div> + </div> + </form> + </div> + + <div id="create-sub-suffix" class="modal"> + <form class="modal-content animate"> + <div class="container"> + <h2 id=""><b>Create Sub Suffix</b> <span class="close" id="create-sub-suffix-close">&times;</span></h2> + <hr class="ds-hr"> + + <p></p> + <hr class="ds-hr"> + <div class="clearfix ds-container"> + <div class="ds-panel-left"> + <button type="button" id="create-sub-suffix-cancel" class="ds-button-left">Cancel</button> + </div> + <div class="ds-panel-right"> + <button type="submit" id="create-sub-suffix-save" class="ds-button-right">Create Sub-Suffix</button> + </div> + </div> + </div> + </form> </div> + +</div> diff --git a/src/cockpit/389-console/css/ds.css b/src/cockpit/389-console/css/ds.css index 71b8f4510..14c05d446 100644 --- a/src/cockpit/389-console/css/ds.css +++ b/src/cockpit/389-console/css/ds.css @@ -188,20 +188,31 @@ } .ds-input { + height: 37px !important; margin-top: 5px; padding-right: 10px; } +.ds-input-lrg { + width: 315px !important; /* <input size="40"> */ + height: 37px !important; + margin-top: 5px; + padding-right: 10px; + height: 37px !important; +} + .ds-wiz-input { - width: 180px !important; + width: 415px !important; /* 180px */ margin-top: 5px; padding-right: 10px; + height: 37px !important; } .ds-history-input { margin-top: !important; margin-right: 5px; margin-left: 40px; + height: 37px !important; } .ds-divider { width: 35px; @@ -303,6 +314,40 @@ text-align: left; } +.ds-dblink-dropdown { + color: black !important; + padding: 0px !important; + line-height: 0 !important; + height: 40px; + width: 175px !important; + text-align: left; + outline: 0 !important; + margin-top: 5px; +} + +.ds-dblink-dropdown a { + padding: 0px !important; + padding-left: 10px !important; + line-height: 0 !important; + height: 40px; + width: 175px !important; + text-align: left; +} + +.ds-dblink-input { + width: 300px !important; + margin-top: 5px; + padding-right: 10px; + height: 37px !important; +} + +.ds-dblink-form-input { + width: 415px !important; + margin-top: 5px; + padding-right: 10px; + height: 37px !important; +} + .ds-agmt-init-dropdown { margin-top: 0px !important; margin-left: 10px; @@ -456,15 +501,25 @@ background-color: #f7f7f7; margin: 2px auto 15% auto; /* 5% from the top, 15% from the bottom and centered */ border: 1px solid black; - width: 650px; /* Could be more or less, depending on screen size */ + width: 650px; /* Could be more or less, depending on screen size 650px */ +} + +/* The Modal/Wizard Close Button */ +.close { + color: #aaa; + float: right; + font-size: 28px; + font-weight: bold; } .close:hover, .close:focus { - color: red; - cursor: pointer; + color: black; + text-decoration: none; + cursor: pointer; } + /* Clear floats */ .clearfix::after { content: ""; @@ -494,10 +549,15 @@ .ds-agmt-wiz-button { height: 40px; - width: 400px; + width: 630px; font-weight: bold; } +.ds-accordion-button { + height: 40px; + width: 400px; + font-weight: bold; +} .ds-agmt-wiz-dropdown { width: 180px !important; margin-top: 5px; @@ -705,12 +765,33 @@ line-height:0; } +.ds-accordian-div { + margin-left: 20px; +} + +.ds-chaining-list { + width: 350px; + height: 300px; +} + +.ds-chaining-split { + width: 350px; +} + +.ds-chaining-divider { + width: 85px; +} + +.ds-chaining-form-list { + width: 350px; + height: 400px; +} textarea { margin-top: 5px; padding-top: 5px; vertical-align: top; - width: 315px; + width: 315px !important; height: 80px; resize: none; word-wrap: break-word !important; @@ -727,4 +808,3 @@ select { outline: 0 !important; } - diff --git a/src/cockpit/389-console/js/backend.js b/src/cockpit/389-console/js/backend.js index 5350b98c1..00631a826 100644 --- a/src/cockpit/389-console/js/backend.js +++ b/src/cockpit/389-console/js/backend.js @@ -1,51 +1,63 @@ function customMenu (node) { - var root_items = { - "create_suffix": { - "label": "Create Suffix", - "icon": "glyphicon glyphicon-plus", - "action": function (data) { - // Create suffix - } + var root_items = { + "create_suffix": { + "label": "Create Suffix", + "icon": "glyphicon glyphicon-plus", + "action": function (data) { + // TODO Create suffix } - }; - - var suffix_items = { + } + }; + + var dblink_items = { + "delete_link": { + "label": "Delete DB Link", + "icon": "glyphicon glyphicon-trash", + "action": function (data) { + // TODO Delete db link + } + } + }; + + var suffix_items = { 'import': { "label": "Initialize Suffix", "icon": "glyphicon glyphicon-circle-arrow-right", "action": function (data) { - // Create suffix + // TODO Import suffix } }, 'export': { "label": "Export Suffix", "icon": "glyphicon glyphicon-circle-arrow-left", "action": function (data) { - // Create suffix + // TODO Export suffix } }, 'reindex': { "label": "Reindex Suffix", "icon": "glyphicon glyphicon-wrench", "action": function (data) { - // Create suffix + // TODO Reindex suffix } }, "create_db_link": { "label": "Create Database Link", "icon": "glyphicon glyphicon-link", "action": function (data) { - // Create suffix - + var suffix_id = $(node).attr('id'); + var parent_suffix = suffix_id.substring(suffix_id.indexOf('-')+1); + //clear_chaining_form(); //TODO + $("#create-db-link-form").css('display', 'block'); } }, "create_sub_suffix": { "label": "Create Sub-Suffix", "icon": "glyphicon glyphicon-triangle-bottom", "action": function (data) { - // Create suffix + // TODO reate suffix } }, @@ -53,15 +65,18 @@ function customMenu (node) { "label": "Delete Suffix", "icon": "glyphicon glyphicon-remove", "action": function (data) { - // Create suffix + // TODO Delete suffix } } }; if ( $(node).attr('id') == "root" ) { - return root_items; + return root_items; + } else if ( $(node).attr('id').startsWith('suffix') ){ + return suffix_items; } else { - return suffix_items; + // chaining + return dblink_items; } }; @@ -74,14 +89,25 @@ function load_jstree() { }); $('#tree').on("changed.jstree", function (e, data) { - console.log("The selected nodes are:"); - console.log(data.selected); - var suffix = data.selected; - if (suffix == "root"){ + var node_type = data.selected[0]; + var suffix = data.instance.get_node(data.selected[0]).text.replace(/(\r\n|\n|\r)/gm,""); + + console.log("The selected nodes are: " + node_type + " = " + suffix); + + if (node_type == "root"){ + $("#suffix").hide(); + $("#chaining").hide(); $("#db").show(); - $("#suffix").hide(); + } else if (node_type.startsWith("dblink")) { + var parent_suffix = node_type.substring(node_type.indexOf('-')+1); + $("#db").hide(); + $("#suffix").hide(); + $("#chaining-header").html("Database Chaining Configuration <font size=\"2\">(<b>" + parent_suffix + "</b>)</font>"); + $("#chaining").show(); } else { + // suffix $("#db").hide(); + $("#chaining").hide(); $("#suffix-header").html("Suffix Configuration <font size=\"2\">(<b>" + suffix + "</b>)</font>"); $("#suffix").show(); } @@ -109,7 +135,11 @@ $(document).ready( function() { "dom": '<"pull-left"f><"pull-right"l>tip', "language": { "emptyTable": "No Referrals" - } + }, + "columnDefs": [ { + "targets": 1, + "orderable": false + } ] }); $('#system-index-table').DataTable( { @@ -127,6 +157,10 @@ $(document).ready( function() { "language": { "emptyTable": "No Indexes" }, + "columnDefs": [ { + "targets": 6, + "orderable": false + } ] }); $('#attr-encrypt-table').DataTable( { "paging": true, @@ -135,6 +169,10 @@ $(document).ready( function() { "language": { "emptyTable": "No Encrypted Attributes" }, + "columnDefs": [ { + "targets": 2, + "orderable": false + } ] }); // Accordion opening/closings @@ -164,7 +202,7 @@ $(document).ready( function() { } } } - + var cache_acc = document.getElementsByClassName("cache-accordion"); for (var i = 0; i < cache_acc.length; i++) { cache_acc[i].onclick = function() { @@ -178,8 +216,22 @@ $(document).ready( function() { } } + var chain_adv_acc = document.getElementsByClassName("chaining-adv-accordion"); + for (var i = 0; i < chain_adv_acc.length; i++) { + chain_adv_acc[i].onclick = function() { + this.classList.toggle("active"); + var panel = this.nextElementSibling; + if (panel.style.display === "block") { + panel.style.display = "none"; + } else { + panel.style.display = "block"; + } + } + } + + $(".index-type").attr('readonly', 'readonly'); - + if ( $("#manual-cache").is(":checked") ){ $("#auto-cache-form").hide(); $("#manual-cache-form").show(); @@ -201,7 +253,7 @@ $(document).ready( function() { $("#manual-import-cache-form").hide(); $("#auto-import-cache-form").show(); } - + $(".cache-role").on("change", function() { var role = $("input[name=cache-role]:checked").val(); if (role == "manual-cache") { @@ -231,6 +283,95 @@ $(document).ready( function() { $("#auto-import-cache-form").show(); } }); + + // Based on the db-link connection type change the agmt-auth options + $("#dblink-conn").change(function() { + var ldap_opts = {"Simple": "Simple", + "SASL/DIGEST-MD5": "SASL/DIGEST-MD5", + "SASL/GSSAPI": "SASL/GSSAPI"}; + var ldaps_opts = {"Simple": "Simple", + "SSL Client Authentication": "SSL Client Authentication", + "SASL/DIGEST-MD5": "SASL/DIGEST-MD5"}; + var $auth = $("#nsbindmechanism"); + $auth.empty(); + var conn = $('#dblink-conn').val(); + if (conn == "LDAP"){ + $.each(ldap_opts, function(key, value) { + $auth.append($("<option></option>").attr("value", value).text(key)); + }); + } else { + // TLS options + $.each(ldaps_opts, function(key, value) { + $auth.append($("<option></option>").attr("value", value).text(key)); + }); + } + $("#nsmultiplexorbinddn").prop('disabled', false); + $("#nsmultiplexorcredentials").prop('disabled', false); + $("#nsmultiplexorcredentials-confirm").prop('disabled', false); + }); + + // Check for auth changes and disable/enable bind DN & password for db-links + $("#nsbindmechanism").change(function() { + var authtype = $('#nsbindmechanism').val(); + if (authtype == "SSL Client Authentication") { + $("#nsmultiplexorbinddn").prop('disabled', true); + $("#nsmultiplexorcredentials").prop('disabled', true); + $("#nsmultiplexorcredentials-confirm").prop('disabled', true); + } else { + $("#nsmultiplexorbinddn").prop('disabled', false); + $("#nsmultiplexorcredentials").prop('disabled', false); + $("#nsmultiplexorcredentials-confirm").prop('disabled', false); + } + }); + + // + // Modal Forms + // + + // Chaining OIDS + $("#chain-oid-close").on("click", function() { + $("#chaining-oids-form").css('display', 'none'); + }); + $("#chaining-oid-cancel").on("click", function() { + $("#chaining-oids-form").css('display', 'none'); + }); + $("#chaining-oid-button").on("click", function() { + // Update oids + $("#chaining-oids-form").css('display', 'block'); + }) + $("#chaining-oid-save").on("click", function() { + // Update oids + $("#chaining-oids-form").css('display', 'none'); + }); + + // Chaining Comps + $("#chain-comp-close").on("click", function() { + $("#chaining-comp-form").css('display', 'none'); + }); + $("#chaining-comp-cancel").on("click", function() { + $("#chaining-comp-form").css('display', 'none'); + }); + $("#chaining-comp-button").on("click", function() { + // Update Comps + $("#chaining-comp-form").css('display', 'block'); + }) + $("#chaining-comp-save").on("click", function() { + // Update comps + $("#chaining-comp-form").css('display', 'none'); + }); + + // Create DB Link + $("#create-chain-close").on("click", function() { + $("#create-db-link-form").css('display', 'none'); + }); + $("#chaining-cancel").on("click", function() { + $("#create-db-link-form").css('display', 'none'); + }); + $("#chaining-save").on("click", function() { + // Create DB link, if LDAPS is selected replace remotefarmUrl "ldap://" with "ldaps://", and visa versa to remove ldaps:// + $("#create-db-link-form").css('display', 'none'); + }); + }); }); diff --git a/src/cockpit/389-console/js/plugins.js b/src/cockpit/389-console/js/plugins.js index 456a2730f..75cb84331 100644 --- a/src/cockpit/389-console/js/plugins.js +++ b/src/cockpit/389-console/js/plugins.js @@ -4,7 +4,11 @@ $(document).ready( function() { $('#plugin-table').DataTable ( { "lengthMenu": [[50, 100, -1], [50, 100, "All"]], "bAutoWidth": false, - "dom": '<"pull-left"f><"pull-right"l>tip' + "dom": '<"pull-left"f><"pull-right"l>tip', + "columnDefs": [ { + "targets": 3, + "orderable": false + } ] }); }); }); diff --git a/src/cockpit/389-console/js/replication.js b/src/cockpit/389-console/js/replication.js index d3f6d41e0..efa6b3bc3 100644 --- a/src/cockpit/389-console/js/replication.js +++ b/src/cockpit/389-console/js/replication.js @@ -118,7 +118,11 @@ $(document).ready( function() { //"lengthMenu": [ 16, 32, 64, 128], "language": { "emptyTable": "No agreements configured" - } + }, + "columnDefs": [ { + "targets": 4, + "orderable": false + } ] }); // Set up CleanAllRUV Table @@ -128,7 +132,11 @@ $(document).ready( function() { "dom": '<"pull-left"f><"pull-right"l>tip', "language": { "emptyTable": "No agreements configured" - } + }, + "columnDefs": [ { + "targets": 3, + "orderable": false + } ] }); $('#repl-summary-table').DataTable( { @@ -144,14 +152,14 @@ $(document).ready( function() { // Repl Agreement Wizard $("#agmt-close").on("click", function() { - $("#agmt-wizard").css('display', 'none'); + $("#agmt-form").css('display', 'none'); }); $("#agmt-cancel").on("click", function() { - $("#agmt-wizard").css('display', 'none'); + $("#agmt-form").css('display', 'none'); }); $("#create-agmt").on("click", function() { clear_agmt_wizard(); - $("#agmt-wizard").css('display', 'block'); + $("#agmt-form").css('display', 'block'); }); // Handle disabling/enabling of agmt schedule panel diff --git a/src/cockpit/389-console/js/schema.js b/src/cockpit/389-console/js/schema.js index af95bb420..c6727c21c 100644 --- a/src/cockpit/389-console/js/schema.js +++ b/src/cockpit/389-console/js/schema.js @@ -23,6 +23,7 @@ $(document).ready( function() { "lengthMenu": [[25, 50, 100, -1], [25, 50, 100, "All"]] }); + $('#custom-oc-table').DataTable ({ "paging": true, "bAutoWidth": false, @@ -30,7 +31,11 @@ $(document).ready( function() { "language": { "emptyTable": "No custom objectclasses defined" }, - "lengthMenu": [[25, 50, 100, -1], [25, 50, 100, "All"]] + "lengthMenu": [[25, 50, 100, -1], [25, 50, 100, "All"]], + "columnDefs": [ { + "targets": 5, + "orderable": false + } ] }); $('#custom-attr-table').DataTable({ "paging": true, @@ -39,7 +44,11 @@ $(document).ready( function() { "language": { "emptyTable": "No custom attributes defined" }, - "lengthMenu": [[25, 50, 100, -1], [25, 50, 100, "All"]] + "lengthMenu": [[25, 50, 100, -1], [25, 50, 100, "All"]], + "columnDefs": [ { + "targets": 7, + "orderable": false + } ] }); $('#schema-mr-table').DataTable({ diff --git a/src/cockpit/389-console/js/security.js b/src/cockpit/389-console/js/security.js index 7c5aed879..f2c0856bf 100644 --- a/src/cockpit/389-console/js/security.js +++ b/src/cockpit/389-console/js/security.js @@ -58,6 +58,10 @@ $(document).ready( function() { "language": { "emptyTable": "No Certificates In Database" }, + "columnDefs": [ { + "targets": 2, + "orderable": false + } ] }); diff --git a/src/cockpit/389-console/js/servers.js b/src/cockpit/389-console/js/servers.js index 87770ec66..c9e5c3c34 100644 --- a/src/cockpit/389-console/js/servers.js +++ b/src/cockpit/389-console/js/servers.js @@ -80,7 +80,11 @@ $(document).ready( function() { "dom": '<"pull-left"f><"pull-right"l>tip', "language": { "emptyTable": "No SASL Mappings" - } + }, + "columnDefs": [ { + "targets": 5, + "orderable": false + } ] }); $("#passwordhistory").change(function() { @@ -201,7 +205,11 @@ $(document).ready( function() { //"lengthMenu": [ 16, 32, 64, 128], "language": { "emptyTable": "No local policies" - } + }, + "columnDefs": [ { + "targets": 2, + "orderable": false + } ] }); // Accordion opening/closings @@ -245,6 +253,19 @@ $(document).ready( function() { } } + var pwp_acc = document.getElementsByClassName("pwp-accordion"); + for (var i = 0; i < pwp_acc.length; i++) { + pwp_acc[i].onclick = function() { + this.classList.toggle("active"); + var panel = this.nextElementSibling; + if (panel.style.display === "block") { + panel.style.display = "none"; + } else { + panel.style.display = "block"; + } + } + } + var localpwp_acc = document.getElementsByClassName("localpwp-accordion"); for (var i = 0; i < localpwp_acc.length; i++) { localpwp_acc[i].onclick = function() { diff --git a/src/cockpit/389-console/plugins.html b/src/cockpit/389-console/plugins.html index 429447cc5..ed90bc8d7 100644 --- a/src/cockpit/389-console/plugins.html +++ b/src/cockpit/389-console/plugins.html @@ -1,26 +1,258 @@ <div class="ds-plugin-panel"> -<table id="plugin-table" class="display ds-plugin-table" cellspacing="0"> - <thead class="ds-plugin-header"> - <tr> - <th>Plugin Name</th> - <th>Plugin Type</th> - <th>Enabled</th> - <th></th> - </tr> - </thead> - <tbody id="plugin-body"> - <tr> - <td>MemberOf Plugin</td> - <td>betxnpostoperation</td> - <td>on</td> - <td><button class="btn btn-default ds-agmt-dropdown-button" type="button" id="edit-plugin-2">Edit Plugin</button></td> - </tr> - <tr> - <td>Replication Plugin</td> - <td>betxnpostoperation</td> - <td>on</td> - <td><button class="btn btn-default ds-agmt-dropdown-button" type="button" id="edit-plugin-1">Edit Plugin</button></td> - </tr> - </tbody> -</table> + <table id="plugin-table" class="display ds-plugin-table" cellspacing="0"> + <thead class="ds-plugin-header"> + <tr> + <th>Plugin Name</th> + <th>Plugin Type</th> + <th>Enabled</th> + <th></th> + </tr> + </thead> + <tbody id="plugin-body"> + <tr> + <td>MemberOf Plugin</td> + <td>betxnpostoperation</td> + <td>on</td> + <td><button class="btn btn-default ds-agmt-dropdown-button" type="button" id="edit-plugin-2">Edit Plugin</button></td> + </tr> + <tr> + <td>Replication Plugin</td> + <td>betxnpostoperation</td> + <td>on</td> + <td><button class="btn btn-default ds-agmt-dropdown-button" type="button" id="edit-plugin-1">Edit Plugin</button></td> + </tr> + </tbody> + </table> + + + + <!-- Modals/Popups/Wizards --> + + <div id="edit-generic-plugin" class="modal"> + <form class="modal-content animate"> + <div class="container"> + <h2 id=""><b>Edit Plugin</b> <span class="close" id="generic-plugin-close">&times;</span></h2> + <hr class="ds-hr"> + + <div class="clearfix ds-container"> + <div class="ds-panel-left"> + <button type="button" id="edit-generic-plugin-cancel" class="ds-button-left">Cancel</button> + </div> + <div class="ds-panel-right"> + <button type="submit" id="edit-generic-plugin-save" class="ds-button-right">Save</button> + </div> + </div> + </div> + </form> + </div> + + <!-- Automember --> + <div id="automember-plugin" class="modal"> + <form class="modal-content animate"> + <div class="container"> + <h2 id=""><b>Automembership Plugin</b> <span class="close" id="automember-close">&times;</span></h2> + <hr class="ds-hr"> + + <div class="clearfix ds-container"> + <div class="ds-panel-left"> + <button type="button" id="automember-plugin-cancel" class="ds-button-left">Cancel</button> + </div> + <div class="ds-panel-right"> + <button type="submit" id="automember-plugin-save" class="ds-button-right">Save</button> + </div> + </div> + </div> + </form> + </div> + + <!-- Acct Policy ? --> + <div id="acct-policy-plugin" class="modal"> + <form class="modal-content animate"> + <div class="container"> + <h2 id=""><b>Account Policy Plugin</b> <span class="close" id="acct-policy-close">&times;</span></h2> + <hr class="ds-hr"> + + <div class="clearfix ds-container"> + <div class="ds-panel-left"> + <button type="button" id="acct-policy-plugin-cancel" class="ds-button-left">Cancel</button> + </div> + <div class="ds-panel-right"> + <button type="submit" id="acct-policy-plugin-save" class="ds-button-right">Save</button> + </div> + </div> + </div> + </form> + </div> + + <!-- Acct Usability ? --> + <div id="acct-use-plugin" class="modal"> + <form class="modal-content animate"> + <div class="container"> + <h2 id=""><b>Account Usability Plugin</b> <span class="close" id="acct-use-close">&times;</span></h2> + <hr class="ds-hr"> + + <div class="clearfix ds-container"> + <div class="ds-panel-left"> + <button type="button" id="acct-use-plugin-cancel" class="ds-button-left">Cancel</button> + </div> + <div class="ds-panel-right"> + <button type="submit" id="acct-use-plugin-save" class="ds-button-right">Save</button> + </div> + </div> + </div> + </form> + </div> + + <!-- Attr Uniqueness --> + <div id="attr-uniq-plugin" class="modal"> + <form class="modal-content animate"> + <div class="container"> + <h2 id=""><b>Attribute Uniqueness Plugin</b> <span class="close" id="attr-uniq-close">&times;</span></h2> + <hr class="ds-hr"> + + <div class="clearfix ds-container"> + <div class="ds-panel-left"> + <button type="button" id="attr-uniq-plugin-cancel" class="ds-button-left">Cancel</button> + </div> + <div class="ds-panel-right"> + <button type="submit" id="attr-uniq-plugin-save" class="ds-button-right">Save</button> + </div> + </div> + </div> + </form> + </div> + + <!-- Content Sync ? --> + <div id="content-sync-plugin" class="modal"> + <form class="modal-content animate"> + <div class="container"> + <h2 id=""><b>Content Synchronization Plugin</b> <span class="close" id="content-sync-close">&times;</span></h2> + <hr class="ds-hr"> + + <div class="clearfix ds-container"> + <div class="ds-panel-left"> + <button type="button" id="content-sync-plugin-cancel" class="ds-button-left">Cancel</button> + </div> + <div class="ds-panel-right"> + <button type="submit" id="content-sync-plugin-save" class="ds-button-right">Save</button> + </div> + </div> + </div> + </form> + </div> + + <!-- Linked Attrs --> + <div id="linked-attrs-plugin" class="modal"> + <form class="modal-content animate"> + <div class="container"> + <h2 id=""><b>Linked Attributes Plugin</b> <span class="close" id="linked-attrs-close">&times;</span></h2> + <hr class="ds-hr"> + + <div class="clearfix ds-container"> + <div class="ds-panel-left"> + <button type="button" id="linked-attrs-plugin-cancel" class="ds-button-left">Cancel</button> + </div> + <div class="ds-panel-right"> + <button type="submit" id="linked-attrs-plugin-save" class="ds-button-right">Save</button> + </div> + </div> + </div> + </form> + </div> + + <!-- Managed Entries Plugin --> + <div id="managed-plugin" class="modal"> + <form class="modal-content animate"> + <div class="container"> + <h2 id=""><b>Managed Entries Plugin</b> <span class="close" id="managed-close">&times;</span></h2> + <hr class="ds-hr"> + + <div class="clearfix ds-container"> + <div class="ds-panel-left"> + <button type="button" id="managed-plugin-cancel" class="ds-button-left">Cancel</button> + </div> + <div class="ds-panel-right"> + <button type="submit" id="managed-plugin-save" class="ds-button-right">Save</button> + </div> + </div> + </div> + </form> + </div> + + <!-- MemberOf Plugin --> + <div id="memberof-plugin" class="modal"> + <form class="modal-content animate"> + <div class="container"> + <h2 id=""><b>MemberOf Plugin</b> <span class="close" id="memberof-close">&times;</span></h2> + <hr class="ds-hr"> + + <div class="clearfix ds-container"> + <div class="ds-panel-left"> + <button type="button" id="memberof-plugin-cancel" class="ds-button-left">Cancel</button> + </div> + <div class="ds-panel-right"> + <button type="submit" id="memberof-plugin-save" class="ds-button-right">Save</button> + </div> + </div> + </div> + </form> + </div> + + <!-- RI Plugin --> + <div id="ri-plugin" class="modal"> + <form class="modal-content animate"> + <div class="container"> + <h2 id=""><b>Referential Integrity Plugin</b> <span class="close" id="ri-close">&times;</span></h2> + <hr class="ds-hr"> + + <div class="clearfix ds-container"> + <div class="ds-panel-left"> + <button type="button" id="ri-plugin-cancel" class="ds-button-left">Cancel</button> + </div> + <div class="ds-panel-right"> + <button type="submit" id="ri-plugin-save" class="ds-button-right">Save</button> + </div> + </div> + </div> + </form> + </div> + + <!-- RetroCL Plugin --> + <div id="retrocl-plugin" class="modal"> + <form class="modal-content animate"> + <div class="container"> + <h2 id=""><b>Retro Changelog Plugin</b> <span class="close" id="retrocl-close">&times;</span></h2> + <hr class="ds-hr"> + + <div class="clearfix ds-container"> + <div class="ds-panel-left"> + <button type="button" id="retrocl-plugin-cancel" class="ds-button-left">Cancel</button> + </div> + <div class="ds-panel-right"> + <button type="submit" id="retrocl-plugin-save" class="ds-button-right">Save</button> + </div> + </div> + </div> + </form> + </div> + + <!-- Root DN Access Control plugin --> + <div id="rootdn-plugin" class="modal"> + <form class="modal-content animate"> + <div class="container"> + <h2 id=""><b>Root DN Access Control Plugin</b> <span class="close" id="rootdn-close">&times;</span></h2> + <hr class="ds-hr"> + + <div class="clearfix ds-container"> + <div class="ds-panel-left"> + <button type="button" id="rootdn-plugin-cancel" class="ds-button-left">Cancel</button> + </div> + <div class="ds-panel-right"> + <button type="submit" id="rootdn-plugin-save" class="ds-button-right">Save</button> + </div> + </div> + </div> + </form> + </div> + </div> \ No newline at end of file diff --git a/src/cockpit/389-console/replication.html b/src/cockpit/389-console/replication.html index daa5891fe..189939537 100644 --- a/src/cockpit/389-console/replication.html +++ b/src/cockpit/389-console/replication.html @@ -103,7 +103,7 @@ <button class="btn btn-default ds-repl-managers-buttons" id="delete-repl-manager">Delete Replication Manager</button> <p></p> - <input type="button" class="accordion repl-config-accordion ds-agmt-wiz-button ds-accordion-spacing" id="repl-config-accordion" value="Advanced Replication Settings &#9660;"/> + <input type="button" class="accordion repl-config-accordion ds-accordion-button ds-accordion-spacing" id="repl-config-accordion" value="Advanced Replication Settings &#9660;"/> <div class="ds-accordion-panel"> <div class="ds-container"> <div> @@ -265,10 +265,10 @@ Replication Agreement wizard ----------------------------------------------------------- --> - <div id="agmt-wizard" class="modal"> - <form id="agmt-form" class="modal-content animate"> + <div id="agmt-form" class="modal"> + <form id="" class="modal-content animate"> <div class="container"> - <h2 id="agmt-wizard-title"><b>Create Replication Agreement</b></h2> + <h2 id="agmt-wizard-title"><b>Create Replication Agreement</b> <span class="close" id="agmt-close">&times;</span> </h2> <hr class="ds-hr"> <div class="ds-container"> <div> @@ -278,6 +278,12 @@ class="ds-wiz-input" type="text" placeholder="Consumer hostname" id="nsds5replicahost" name="port" required> <label for="nsds5replicaport" class="ds-config-label" title="Agreement name (nsDS5ReplicaPort)."><b>Consumer Port</b></label><input class="ds-wiz-input" type="text" placeholder="Consumer port number" id="nsds5replicaport" name="name" required> + <label for="nsds5replicabinddn" class="ds-config-label" title="Replication Bind DN (nsDS5ReplicaBindDN)."><b>Replication Bind DN</b></label><input + class="ds-wiz-input" type="text" placeholder="Bind DN" id="nsds5replicabinddn" name="name" required> + <label for="nsds5replicacredentials" class="ds-config-label" title="Replication Bind DN (nsDS5ReplicaCredentials)."><b>Replication Bind DN Credentials</b></label><input + class="ds-wiz-input" type="password" placeholder="Enter password" id="nsds5replicacredentials" name="name" required> + <label for="nsds5replicacredentials-confirm" class="ds-config-label" title="Confirm password"><b>Confirm Password</b></label><input + class="ds-wiz-input" type="password" placeholder="Confirm password" id="nsds5replicacredentials-confirm" name="name" required> <label for="nsds5replicatransportinfo" class="ds-config-label" title="Agreement name (nsDS5ReplicaTransportInfo)."><b>Connection Protocol</b></label><select class="btn btn-default dropdown ds-agmt-wiz-dropdown" id="nsds5replicatransportinfo"> <option>LDAP</option> @@ -290,12 +296,6 @@ <option>SASL/DIGEST-MD5</option> <option>SASL/GSSAPI</option> </select> - <label for="nsds5replicabinddn" class="ds-config-label" title="Replication Bind DN (nsDS5ReplicaBindDN)."><b>Replication Bind DN</b></label><input - class="ds-wiz-input" type="text" placeholder="Bind DN" id="nsds5replicabinddn" name="name" required> - <label for="nsds5replicacredentials" class="ds-config-label" title="Replication Bind DN (nsDS5ReplicaCredentials)."><b>Replication Bind DN Credentials</b></label><input - class="ds-wiz-input" type="password" placeholder="Enter password" id="nsds5replicacredentials" name="name" required> - <label for="nsds5replicacredentials-confirm" class="ds-config-label" title="Confirm password"><b>Confirm Password</b></label><input - class="ds-wiz-input" type="password" placeholder="Confirm password" id="nsds5replicacredentials-confirm" name="name" required> </div> </div> @@ -429,12 +429,31 @@ </select></p> </div> <hr class="ds-hr"> - <div class=" clearfix ds-container"> <!-- class=clearfix --> + <div class="clearfix ds-container"> <div class="ds-panel-left"> <button type="button" id="agmt-cancel" class="ds-button-left">Cancel</button> </div> <div class="ds-panel-right"> - <button type="submit" class="ds-button-right">Save</button> + <button type="submit" id="agmt-save" class="ds-button-right">Save</button> + </div> + </div> + </div> + </form> + </div> + + + <div id="cleanallruv" class="modal"> + <form class="modal-content animate"> + <div class="container"> + <h2 id=""><b>Create CleanAllRUV Task</b> <span class="close" id="create-cleanallruv-close">&times;</span></h2> + <hr class="ds-hr"> + + <div class="clearfix ds-container"> + <div class="ds-panel-left"> + <button type="button" id="cleanallruv-cancel" class="ds-button-left">Cancel</button> + </div> + <div class="ds-panel-right"> + <button type="submit" id="cleanallruv-save" class="ds-button-right">Create Task</button> </div> </div> </div> diff --git a/src/cockpit/389-console/schema.html b/src/cockpit/389-console/schema.html index e01e23e46..4bf6aba5d 100644 --- a/src/cockpit/389-console/schema.html +++ b/src/cockpit/389-console/schema.html @@ -1,132 +1,239 @@ - <div class="ds-fixed ds-buttons"> - <button id="schema-standard-btn" class="btn btn-default ds-button ds-button-control active">Standard Schema</button> - <button id="schema-custom-btn" class="btn btn-default ds-button ds-button-control active">Custom Schema</button> - <button id="schema-mr-btn" class="btn btn-default ds-button ds-button-control active">Matching Rules</button> - <hr> - <button id="schema-reload" class="btn btn-default ds-button">Reload Schema Files</button> +<div class="ds-fixed ds-buttons"> + <button id="schema-standard-btn" class="btn btn-default ds-button ds-button-control active">Standard Schema</button> + <button id="schema-custom-btn" class="btn btn-default ds-button ds-button-control active">Custom Schema</button> + <button id="schema-mr-btn" class="btn btn-default ds-button ds-button-control active">Matching Rules</button> + <hr> + <button id="schema-reload" class="btn btn-default ds-button">Reload Schema Files</button> +</div> + +<div class="ds-flex" id="schema-content"> + + <div id="schema-standard" class="schema-ctrl" hidden> + <div class="ds-border"> + <h2><b>Standard Objectclasses</b><hr class="ds-hr"></h2> + <table id="standard-oc-table" class="display ds-table" cellspacing="0" width="100%"> + <thead> + <tr class="ds-table-header"> + <th>Objectclass Name</th> + <th>OID</th> + <th>Parent</th> + <th>Required Attributes</th> + <th>Allowed Attributes</th> + </tr> + </thead> + <tbody id="standard-body"> + <tr> + <td>person</td> + <td>1.0.34.54.7.2.3.4.3.2</td> + <td>top</td> + <td>cn givenname sn</td> + <td>description uid</td> + </tr> + </tbody> + </table> + </div> + <div class="myborder"> + <h2><br><b>Standard Attributes</b><hr class="ds-hr"></h2> + <table id="standard-attr-table" class="display ds-table" cellspacing="0" width="100%"> + <thead> + <tr class="ds-table-header"> + <th>Attribute Name</th> + <th>OID</th> + <th>Syntax</th> + <th>Multivalued</th> + <th>Equality Matching Rules</th> + <th>Ordering Matching Rules</th> + <th>Substring Matching Rules</th> + </tr> + </thead> + <tbody id="standard-attr-body"> + </tbody> + </table> + </div> </div> - <div class="ds-flex" id="schema-content"> + <div id="schema-custom" class="schema-ctrl" hidden> + <div class="ds-border"> + <h2><b>Custom Objectclasses</b><hr class="ds-hr"></h2> + <table id="custom-oc-table" class="display ds-table" cellspacing="0" width="100%"> + <thead> + <tr class="ds-table-header"> + <th>Objectclass Name</th> + <th>OID</th> + <th>Parent</th> + <th>Required Attributes</th> + <th>Allowed Attributes</th> + <th>Action</th> + </tr> + </thead> + <tbody id="custom-body"> + <tr> + <td>InetOrgPerson</td> + <td>1.1.1.1.1.1.1.1.1.1</td> + <td>top</td> + <td>cn</td> + <td>sn uid</td> + <td> + <div class="dropdown"> + <button class="btn btn-default dropdown-toggle ds-agmt-dropdown-button" type="button" id="dropdownMenu1" data-toggle="dropdown"> + Choose Action... + <span class="caret"></span> + </button> + <ul class="dropdown-menu ds-agmt-dropdown" role="menu" aria-labelledby="dropdownMenu1"> + <li role=""><a role="menuitem" tabindex="0" href="#">Edit Objectclass</a></li> + <li role=""><a role="menuitem" tabindex="1" href="#">Delete Objectclass</a></li> + </ul> + </div> + </td> + </tr> + </tbody> + </table> + <button id="create-oc" name="create-oc" class="btn btn-default ds-button">Create Objectclass</button> + </div> + <div class="ds-border"> + <h2><br><b>Custom Attributes</b><hr class="ds-hr"></h2> + <table id="custom-attr-table" class="display ds-table" cellspacing="0" width="100%"> + <thead> + <tr class="ds-table-header"> + <th>Attribute Name</th> + <th>OID</th> + <th>Syntax</th> + <th>Multivalued</th> + <th>Equality Matching Rules</th> + <th>Ordering Matching Rules</th> + <th>Substring Matching Rules</th> + <th>Action</th> + </tr> + </thead> + <tbody id="custom-attr-body"> + <td>ssn</td> + <td>1.1.1.1.1.1.1.1</td> + <td>DirectoryString</td> + <td>no</td> + <td></td> + <td></td> + <td></td> + <td> + <div class="dropdown"> + <button class="btn btn-default dropdown-toggle ds-agmt-dropdown-button" type="button" id="dropdownMenu1" data-toggle="dropdown"> + Choose Action... + <span class="caret"></span> + </button> + <ul class="dropdown-menu ds-agmt-dropdown" role="menu" aria-labelledby="dropdownMenu1"> + <li role=""><a role="menuitem" tabindex="0" href="#">Edit Attribute</a></li> + <li role=""><a role="menuitem" tabindex="1" href="#">Delete Attribute</a></li> + </ul> + </div> + </td> + </tbody> + </table> + <button id="create-at" name="create-at" class="btn btn-default ds-button">Create Attribute</button> + </div> + </div> - <div id="schema-standard" class="schema-ctrl" hidden> - <div class="ds-border"> - <h2><b>Standard Objectclasses</b><hr class="ds-hr"></h2> - <table id="standard-oc-table" class="display ds-table" cellspacing="0" width="100%"> - <thead> - <tr class="ds-table-header"> - <th>Objectclass Name</th> - <th>OID</th> - <th>Parent</th> - <th>Required Attributes</th> - <th>Allowed Attributes</th> - </tr> - </thead> - <tbody id="standard-body"> - <tr> - <td>person</td> - <td>1.0.34.54.7.2.3.4.3.2</td> - <td>top</td> - <td>cn givenname sn</td> - <td>description uid</td> - </tr> - </tbody> - </table> - </div> - <div class="myborder"> - <h2><br><b>Standard Attributes</b><hr class="ds-hr"></h2> - <table id="standard-attr-table" class="display ds-table" cellspacing="0" width="100%"> - <thead> - <tr class="ds-table-header"> - <th>Attribute Name</th> - <th>OID</th> - <th>Syntax</th> - <th>Multivalued</th> - <th>Equality Matching Rules</th> - <th>Ordering Matching Rules</th> - <th>Substring Matching Rules</th> - </tr> - </thead> - <tbody id="standard-attr-body"> - </tbody> - </table> - </div> + <div id="schema-mr" class="schema-ctrl" hidden> + <div class="ds-border"> + <h2><b>Matching Rules</b><hr class="ds-hr"></h2> + <table id="schema-mr-table" class="display ds-table" cellspacing="0" width="100%"> + <thead> + <tr class="ds-table-header"> + <th>Matching Rule Name</th> + <th>OID</th> + <th>Syntax</th> + <th>Description</th> + </tr> + </thead> + <tbody id="standard-body"> + <tr> + <td>equality</td> + <td>9.5.6.7.3.5.4.45.54.4</td> + <td>DirectoryString</td> + <td>My really long description for some reason</td> + </tr> + <tr> + <td>equality-2</td> + <td>999.5.6.7.3.5.4.45.54.4</td> + <td>DirectoryString</td> + <td>My really long description for some reason</td> + </tr> + </tbody> + </table> </div> + </div> - <div id="schema-custom" class="schema-ctrl" hidden> - <div class="ds-border"> - <h2><b>Custom Objectclasses</b><hr class="ds-hr"></h2> - <table id="custom-oc-table" class="display ds-table" cellspacing="0" width="100%"> - <thead> - <tr class="ds-table-header"> - <th>Objectclass Name</th> - <th>OID</th> - <th>Parent</th> - <th>Required Attributes</th> - <th>Allowed Attributes</th> - </tr> - </thead> - <tbody id="custom-body"> - <tr> - <td>InetOrgPerson</td> - <td>1.1.1.1.1.1.1.1.1.1</td> - <td>top</td> - <td>cn</td> - <td>sn uid</td> - </tr> - </tbody> - </table> - <button id="create-oc" name="create-oc" class="btn btn-default ds-button">Create Objectclass</button> + + <!-- Modals/Popups/Wizards --> + + <div id="add-oc" class="modal"> + <form class="modal-content animate"> + <div class="container"> + <h2 id=""><b>Add Objectclass</b> <span class="close" id="add-oc-close">&times;</span></h2> + <hr class="ds-hr"> + + <div class="clearfix ds-container"> + <div class="ds-panel-left"> + <button type="button" id="add-oc-cancel" class="ds-button-left">Cancel</button> + </div> + <div class="ds-panel-right"> + <button type="submit" id="add-oc-save" class="ds-button-right">Add Objectclass</button> + </div> + </div> </div> - <div class="ds-border"> - <h2><br><b>Custom Attributes</b><hr class="ds-hr"></h2> - <table id="custom-attr-table" class="display ds-table" cellspacing="0" width="100%"> - <thead> - <tr class="ds-table-header"> - <th>Attribute Name</th> - <th>OID</th> - <th>Syntax</th> - <th>Multivalued</th> - <th>Equality Matching Rules</th> - <th>Ordering Matching Rules</th> - <th>Substring Matching Rules</th> - </tr> - </thead> - <tbody id="custom-attr-body"> - </tbody> - </table> - <button id="create-at" name="create-at" class="btn btn-default ds-button">Create Attribute</button> + </form> + </div> + + <div id="edit-oc" class="modal"> + <form class="modal-content animate"> + <div class="container"> + <h2 id=""><b>Edit Objectclass</b> <span class="close" id="edit-oc-close">&times;</span></h2> + <hr class="ds-hr"> + + <div class="clearfix ds-container"> + <div class="ds-panel-left"> + <button type="button" id="edit-oc-cancel" class="ds-button-left">Cancel</button> + </div> + <div class="ds-panel-right"> + <button type="submit" id="edit-oc-save" class="ds-button-right">Save</button> + </div> + </div> </div> - </div> + </form> + </div> - <div id="schema-mr" class="schema-ctrl" hidden> - <div class="ds-border"> - <h2><b>Matching Rules</b><hr class="ds-hr"></h2> - <table id="schema-mr-table" class="display ds-table" cellspacing="0" width="100%"> - <thead> - <tr class="ds-table-header"> - <th>Matching Rule Name</th> - <th>OID</th> - <th>Syntax</th> - <th>Description</th> - </tr> - </thead> - <tbody id="standard-body"> - <tr> - <td>equality</td> - <td>9.5.6.7.3.5.4.45.54.4</td> - <td>DirectoryString</td> - <td>My really long description for some reason</td> - </tr> - <tr> - <td>equality-2</td> - <td>999.5.6.7.3.5.4.45.54.4</td> - <td>DirectoryString</td> - <td>My really long description for some reason</td> - </tr> - </tbody> - </table> + <div id="add-attr" class="modal"> + <form class="modal-content animate"> + <div class="container"> + <h2 id=""><b>Add Attribute</b> <span class="close" id="add-attr-close">&times;</span></h2> + <hr class="ds-hr"> + + <div class="clearfix ds-container"> + <div class="ds-panel-left"> + <button type="button" id="add-attr-cancel" class="ds-button-left">Cancel</button> + </div> + <div class="ds-panel-right"> + <button type="submit" id="add-attr-save" class="ds-button-right">Add Attribute</button> + </div> + </div> </div> - </div> + </form> + </div> + <div id="edit-attr" class="modal"> + <form class="modal-content animate"> + <div class="container"> + <h2 id=""><b>Edit Attribute</b> <span class="close" id="edit-attr-close">&times;</span></h2> + <hr class="ds-hr"> + + <div class="clearfix ds-container"> + <div class="ds-panel-left"> + <button type="button" id="edit-attr-cancel" class="ds-button-left">Cancel</button> + </div> + <div class="ds-panel-right"> + <button type="submit" id="edit-attr-save" class="ds-button-right">Save</button> + </div> + </div> + </div> + </form> </div> +</div> diff --git a/src/cockpit/389-console/security.html b/src/cockpit/389-console/security.html index c6695b1a1..c2a5205df 100644 --- a/src/cockpit/389-console/security.html +++ b/src/cockpit/389-console/security.html @@ -12,9 +12,9 @@ <hr class="ds-hr"> <div class="ds-expired-div" id="cert-attrs"> <label for="nsslapd-secureport" class="ds-config-label" title="The server's secure port number (nsslapd-secureport)."><b>Server Secure Port</b></label><input - class="ds-input" type="text" id="nsslapd-secureport" size="40"/> + class="ds-input-lrg" type="text" id="nsslapd-secureport" size="40"/> <label for="nsslapd-securelistenhost" class="ds-config-label" title="This parameter can be used to restrict the Directory Server instance to a single IP interface (hostname, or IP address). This parameter specifically sets what interface to use for TLS traffic. Requires restart. (nsslapd-securelistenhost)."><b>Secure Listen Host Address</b></label><input - class="ds-input" type="text" id="nsslapd-securelistenhost" size="40"/> + class="ds-input-lrg" type="text" id="nsslapd-securelistenhost" size="40px"/> <label for="cipher-area" class="ds-sec-label"><b>Allowed Ciphers</b></label><textarea id="cipher-area" rows="5" cols="100">+all</textarea> <label for="sec-sslmin" class="ds-sec-label" title="The minimum SSL/TLS version the server will accept (sslversionmin).">Minimum SSL/TLS Version </label><select class="btn btn-default dropdown ds-agmt-wiz-dropdown" id="sec-sslmin"> @@ -160,6 +160,43 @@ </div> </div> + <!-- Modals/Popups/Wizards --> + + <div id="import-cert" class="modal"> + <form class="modal-content animate"> + <div class="container"> + <h2 id=""><b>Import Certificate</b> <span class="close" id="import-cert-close">&times;</span></h2> + <hr class="ds-hr"> + + <div class="clearfix ds-container"> + <div class="ds-panel-left"> + <button type="button" id="import-cert-cancel" class="ds-button-left">Cancel</button> + </div> + <div class="ds-panel-right"> + <button type="submit" id="import-cert-save" class="ds-button-right">Import Certificate</button> + </div> + </div> + </div> + </form> + </div> + + <div id="export-cert" class="modal"> + <form class="modal-content animate"> + <div class="container"> + <h2 id=""><b>Export Certificate</b> <span class="close" id="export-cert-close">&times;</span></h2> + <hr class="ds-hr"> + + <div class="clearfix ds-container"> + <div class="ds-panel-left"> + <button type="button" id="export-cert-cancel" class="ds-button-left">Cancel</button> + </div> + <div class="ds-panel-right"> + <button type="submit" id="export-cert-save" class="ds-button-right">Export Certificate</button> + </div> + </div> + </div> + </form> + </div> </div> diff --git a/src/cockpit/389-console/servers.html b/src/cockpit/389-console/servers.html index 99e44f264..b0be4246b 100644 --- a/src/cockpit/389-console/servers.html +++ b/src/cockpit/389-console/servers.html @@ -50,7 +50,7 @@ </div> <p></p> - <input type="button" class="accordion config-accordion ds-agmt-wiz-button ds-accordion-spacing" id="config-accordion" value="Advanced Settings &#9660;"/> + <input type="button" class="accordion config-accordion ds-accordion-button ds-accordion-spacing" id="config-accordion" value="Advanced Settings &#9660;"/> <div class="ds-accordion-panel"> <div class="ds-container"> <div class=""> @@ -93,7 +93,7 @@ </div> <p></p> - <input type="button" class="accordion rootdn-accordion ds-agmt-wiz-button ds-accordion-spacing" id="rootdn-accordion" value="Directory Manager Settings &#9660;"/> + <input type="button" class="accordion rootdn-accordion ds-accordion-button ds-accordion-spacing" id="rootdn-accordion" value="Directory Manager Settings &#9660;"/> <div class="ds-accordion-panel"> <div> <label for="nsslapd-rootdn" class="ds-config-label" title="The DN of the unrestricted directory manager (nsslapd-rootdn)."><b>Directory Manager DN</b></label><input @@ -192,152 +192,159 @@ <div id="server-pwpolicy" class="server-cfg-ctrl" hidden> <h2>Password Policy Management Settings</h2> <hr class="ds-hr"> - <div class="ds-container"> - <div class="ds-split"> - <h3><br>General Settings</h3> - <hr class="ds-hr"> - <label for="passwordstoragescheme" class="ds-server-label" title="Set the password storage scheme (passwordstoragescheme).">Password Storage Scheme</label><select - class="btn btn-default dropdown ds-passwd-dropdown" id="passwordstoragescheme"> - <option>PBKDF2_SHA256</option> - <option>SSHA512</option> - <option>SSHA384</option> - <option>SSHA256</option> - <option>SSHA</option> - <option>NS-MTA-MD5</option> - <option>MD5</option> - <option>SMD5</option> - <option>CRYPT-MD5</option> - <option>CRYPT-SHA512</option> - <option>CRYPT-SHA256</option> - <option>CRYPT</option> - <option>CLEAR</option> - </select> - <input type="checkbox" class="ds-config-checkbox" id="nsslapd-pwpolicy-local" checked><label - for="nsslapd-pwpolicy-local" class="ds-label" title="Allow subtree/user defined password policies (nsslapd-pwpolicy-local)."> Allow Local Password Policies</label> - <input type="checkbox" class="ds-config-checkbox" id="nsslapd-pwpolicy-inherit-global" checked><label - for="nsslapd-pwpolicy-inherit-global" class="ds-label" title= - "If a local password policy does not defined any syntax rules then inherit the local policy syntax (nsslapd-pwpolicy-inherit-global)."> Local Policy Inherits Global Policy</label> - <input type="checkbox" class="ds-config-checkbox" id="nsslapd-allow-hashed-passwords"><label - for="nsslapd-allow-hashed-passwords" class="ds-label" title="Allow anyone to add a prehashed password (nsslapd-allow-hashed-passwords)."> Allow Adding Pre-Hashed Passwords</label> - <input type="checkbox" class="ds-config-checkbox" id="passwordisglobalpolicy" checked><label - for="passwordisglobalpolicy" class="ds-label" title="Allow password policy state attributes to replicate (passwordIsGlobalPolicy)."> Replicate Password Policy State Attributes</label> - <input type="checkbox" class="ds-config-checkbox" id="passwordtrackupdatetime" checked><label - for="passwordtrackupdatetime" class="ds-label" title= - "Record a separate timestamp specifically for the last time that the password for an entry was changed. If this is enabled, then it adds the pwdUpdateTime operational attribute to the user account entry (passwordTrackUpdateTime)."> Track Password Update Time</label> - <label for="passwordAdminDN" class="ds-server-label" title="The DN for a password administrator or administrator group (passwordAdminDN)">Password Administrator</label><input - class="ds-input" type="text" id="passwordAdminDN" size="40"/> - </div> - <div class="ds-divider"></div> - <div class="ds-split"> - <h3><br>User Password Settings</h3> - <hr class="ds-hr"> - <input type="checkbox" class="ds-config-checkbox" id="passwordchange" checked><label - for="passwordchange" class="ds-label" title="Allow user's to change their passwords (passwordChange)."> Allow Users To Change Their Passwords</label> - <input type="checkbox" class="ds-config-checkbox" id="passwordmustchange" checked><label - for="passwordmustchange" class="ds-label" title="User must change its password after its been reset by an administrator (passwordMustChange).">User Must Change Password After Reset</label> - <input type="checkbox" class="ds-config-checkbox" id="passwordhistory" checked><label - for="passwordhistory" class="ds-label" title="Maintain a password history (passwordHistory).">Keep Password History</label> - <input class="ds-history-input" type="text" id="passwordinhistory" size="2"/>Passwords In History - <label for="passwordminage" class="ds-minage-label" title="Indicates the number of seconds that must pass before a user can change their password. (passwordMinAge)">Allow Password Changes (in seconds)</label><input - class="ds-input" type="text" id="passwordminage" size="10"/> - <p></p> - </div> - </div> - <div class="ds-container"> - <div class="ds-split"> - <h3><br>Password Expiration Settings</h3> - <hr class="ds-hr"> - <input type="checkbox" class="ds-config-checkbox" id="passwordexp" checked><label - for="passwordexp" class="ds-label" title="Enable a password expiration policy (passwordExp).">Enable Password Expiration</label> - <div class="ds-expired-div" id="expiration-attrs"> - <label for="passwordmaxage" class="ds-expire-label" title="The server's local hostname (passwordMaxAge)."><b>Password Expiration Time (in seconds)</b></label><input - class="ds-input" type="text" id="passwordmaxage" size="5"/> - <label for="passwordgracelimit" class="ds-expire-label" title="The server's local hostname (passwordGraceLimit)."><b>Allowed Logins After Password Expires</b></label><input - class="ds-input" type="text" id="passwordgracelimit" size="5"/> - <label for="passwordwarning" class="ds-expire-label" title="Set the time (in seconds), before a password is about to expire, to send a warning. (passwordWarning)."><b>Send Password Expiring Warning (in seconds)</b></label><input - class="ds-input" type="text" id="passwordwarning" size="5"/> - <input type="checkbox" class="ds-send-expiring-checkbox" id="passwordsendexpiringtime"><label - for="passwordsendexpiringtime" class="ds-label" title="Always return a password expiring control when requested (passwordSendExpiringTime).">Always Send <i>Password Expiring</i> Control</label> + <p></p> + + <input type="button" class="accordion pwp-accordion ds-accordion-button ds-accordion-spacing" id="pwd-accordion" value="Global Password Policy &#9660;" title= + "Configure the global password policy settings"/> + <div class="ds-accordion-panel"> + <div class="ds-container"> + <div class="ds-split"> + <h3>General Settings</h3> + <hr class="ds-hr"> + <label for="passwordstoragescheme" class="ds-server-label" title="Set the password storage scheme (passwordstoragescheme).">Password Storage Scheme</label><select + class="btn btn-default dropdown ds-passwd-dropdown" id="passwordstoragescheme"> + <option>PBKDF2_SHA256</option> + <option>SSHA512</option> + <option>SSHA384</option> + <option>SSHA256</option> + <option>SSHA</option> + <option>NS-MTA-MD5</option> + <option>MD5</option> + <option>SMD5</option> + <option>CRYPT-MD5</option> + <option>CRYPT-SHA512</option> + <option>CRYPT-SHA256</option> + <option>CRYPT</option> + <option>CLEAR</option> + </select> + <input type="checkbox" class="ds-config-checkbox" id="nsslapd-pwpolicy-local" checked><label + for="nsslapd-pwpolicy-local" class="ds-label" title="Allow subtree/user defined password policies (nsslapd-pwpolicy-local)."> Allow Local Password Policies</label> + <input type="checkbox" class="ds-config-checkbox" id="nsslapd-pwpolicy-inherit-global" checked><label + for="nsslapd-pwpolicy-inherit-global" class="ds-label" title= + "If a local password policy does not defined any syntax rules then inherit the local policy syntax (nsslapd-pwpolicy-inherit-global)."> Local Policy Inherits Global Policy</label> + <input type="checkbox" class="ds-config-checkbox" id="nsslapd-allow-hashed-passwords"><label + for="nsslapd-allow-hashed-passwords" class="ds-label" title="Allow anyone to add a prehashed password (nsslapd-allow-hashed-passwords)."> Allow Adding Pre-Hashed Passwords</label> + <input type="checkbox" class="ds-config-checkbox" id="passwordisglobalpolicy" checked><label + for="passwordisglobalpolicy" class="ds-label" title="Allow password policy state attributes to replicate (passwordIsGlobalPolicy)."> Replicate Password Policy State Attributes</label> + <input type="checkbox" class="ds-config-checkbox" id="passwordtrackupdatetime" checked><label + for="passwordtrackupdatetime" class="ds-label" title= + "Record a separate timestamp specifically for the last time that the password for an entry was changed. If this is enabled, then it adds the pwdUpdateTime operational attribute to the user account entry (passwordTrackUpdateTime)."> Track Password Update Time</label> + <label for="passwordAdminDN" class="ds-server-label" title="The DN for a password administrator or administrator group (passwordAdminDN)">Password Administrator</label><input + class="ds-input" type="text" id="passwordAdminDN" size="40"/> + </div> + <div class="ds-divider"></div> + <div class="ds-split"> + <h3>User Password Settings</h3> + <hr class="ds-hr"> + <input type="checkbox" class="ds-config-checkbox" id="passwordchange" checked><label + for="passwordchange" class="ds-label" title="Allow user's to change their passwords (passwordChange)."> Allow Users To Change Their Passwords</label> + <input type="checkbox" class="ds-config-checkbox" id="passwordmustchange" checked><label + for="passwordmustchange" class="ds-label" title="User must change its password after its been reset by an administrator (passwordMustChange).">User Must Change Password After Reset</label> + <input type="checkbox" class="ds-config-checkbox" id="passwordhistory" checked><label + for="passwordhistory" class="ds-label" title="Maintain a password history (passwordHistory).">Keep Password History</label> + <input class="ds-history-input" type="text" id="passwordinhistory" size="2"/>Passwords In History + <label for="passwordminage" class="ds-minage-label" title="Indicates the number of seconds that must pass before a user can change their password. (passwordMinAge)">Allow Password Changes (in seconds)</label><input + class="ds-input" type="text" id="passwordminage" size="10"/> + <p></p> </div> </div> - <div class="ds-divider"></div> - <div class="ds-split"> - <h3><br>Account Lockout Settings</h3> - <hr class="ds-hr"> - <input type="checkbox" class="ds-config-checkbox" id="passwordlockout" checked><label - for="passwordlockout" class="ds-label" title="Enable account lockout (passwordLockout).">Enable Account Lockout</label> - <div class="ds-expired-div" id="lockout-attrs"> - <label for="passwordmaxfailure" class="ds-expire-label" title= - "The maximum number of failed logins before account gets locked (passwordMaxFailure)."><b>Number of Failed Logins to Lockout Account</b></label><input - class="ds-input" type="text" id="passwordmaxfailure" size="5"/> - <label for="passwordresetfailurecount" class="ds-expire-label" title= - "The number of seconds until an accounts failure count is reset (passwordResetFailureCount)."><b>Time Before Failure Count Reset </b></label><input - class="ds-input" type="text" id="passwordresetfailurecount" size="5"/> - <div> - <label title="Lock out the account forever (passwordUnlock)."><input - class="ds-radio" type="radio" id="passwordunlock" value="passwordunlock" name="account-lockout" checked="checked"> Lockout Account Forever</label> - <label title="The number of seconds before account gets unlocked (passwordLockoutDuration)."><input - class="ds-radio" type="radio" name="account-lockout" value="passwordlockoutduration"> Time Until Account Unlocked <input - class="ds-input" type="text" id="passwordlockoutduration" size="5"/></label> + <div class="ds-container"> + <div class="ds-split"> + <h3><br>Password Expiration Settings</h3> + <hr class="ds-hr"> + <input type="checkbox" class="ds-config-checkbox" id="passwordexp" checked><label + for="passwordexp" class="ds-label" title="Enable a password expiration policy (passwordExp).">Enable Password Expiration</label> + <div class="ds-expired-div" id="expiration-attrs"> + <label for="passwordmaxage" class="ds-expire-label" title="The server's local hostname (passwordMaxAge)."><b>Password Expiration Time (in seconds)</b></label><input + class="ds-input" type="text" id="passwordmaxage" size="5"/> + <label for="passwordgracelimit" class="ds-expire-label" title="The server's local hostname (passwordGraceLimit)."><b>Allowed Logins After Password Expires</b></label><input + class="ds-input" type="text" id="passwordgracelimit" size="5"/> + <label for="passwordwarning" class="ds-expire-label" title="Set the time (in seconds), before a password is about to expire, to send a warning. (passwordWarning)."><b>Send Password Expiring Warning (in seconds)</b></label><input + class="ds-input" type="text" id="passwordwarning" size="5"/> + <input type="checkbox" class="ds-send-expiring-checkbox" id="passwordsendexpiringtime"><label + for="passwordsendexpiringtime" class="ds-label" title="Always return a password expiring control when requested (passwordSendExpiringTime).">Always Send <i>Password Expiring</i> Control</label> </div> </div> - <p></p> + <div class="ds-divider"></div> + <div class="ds-split"> + <h3><br>Account Lockout Settings</h3> + <hr class="ds-hr"> + <input type="checkbox" class="ds-config-checkbox" id="passwordlockout" checked><label + for="passwordlockout" class="ds-label" title="Enable account lockout (passwordLockout).">Enable Account Lockout</label> + <div class="ds-expired-div" id="lockout-attrs"> + <label for="passwordmaxfailure" class="ds-expire-label" title= + "The maximum number of failed logins before account gets locked (passwordMaxFailure)."><b>Number of Failed Logins to Lockout Account</b></label><input + class="ds-input" type="text" id="passwordmaxfailure" size="5"/> + <label for="passwordresetfailurecount" class="ds-expire-label" title= + "The number of seconds until an accounts failure count is reset (passwordResetFailureCount)."><b>Time Before Failure Count Reset </b></label><input + class="ds-input" type="text" id="passwordresetfailurecount" size="5"/> + <div> + <label title="Lock out the account forever (passwordUnlock)."><input + class="ds-radio" type="radio" id="passwordunlock" value="passwordunlock" name="account-lockout" checked="checked"> Lockout Account Forever</label> + <label title="The number of seconds before account gets unlocked (passwordLockoutDuration)."><input + class="ds-radio" type="radio" name="account-lockout" value="passwordlockoutduration"> Time Until Account Unlocked <input + class="ds-input" type="text" id="passwordlockoutduration" size="5"/></label> + </div> + </div> + <p></p> + </div> </div> - </div> - <h3><br>Password Syntax Settings</h3> - <hr class="ds-hr"> - <input type="checkbox" class="ds-config-checkbox" id="passwordchecksyntax" checked><label - for="passwordchecksyntax" class="ds-label" title="Enable account lockout (passwordCheckSyntax).">Check Password Syntax</label> - <div class="ds-container ds-expired-div" id="syntax-attrs"> - <div> - <label for="passwordminlength" class="ds-expire-label" title= - "The minimum number of characters in the password (passwordMinLength)."><b>Password Minimum Length </b></label><input - class="ds-input" type="text" id="passwordminlength" size="5"/> - <label for="passwordmindigits" class="ds-expire-label" title= - "Reject passwords with fewer than this many digit characters (0-9) (passwordMinDigits)."><b>Minimum Digit Characters </b></label><input - class="ds-input" type="text" id="passwordmindigits" size="5"/> - <label for="passwordmindigits" class="ds-expire-label" title= - "Reject passwords with fewer than this many alpha characters (passwordMinAlphas)."><b>Minimum Alpha Characters </b></label><input - class="ds-input" type="text" id="passwordminalphas" size="5"/> - <label for="passwordminuppers" class="ds-expire-label" title= - "Reject passwords with fewer than this many uppercase characters (passwordMinUppers)."><b>Minimum Uppercase Characters </b></label><input - class="ds-input" type="text" id="passwordminuppers" size="5"/> - <label for="passwordminlowers" class="ds-expire-label" title= - "Reject passwords with fewer than this many lowercase characters (passwordMinLowers)."><b>Minimum Lowercase Characters </b></label><input - class="ds-input" type="text" id="passwordminlowers" size="5"/> - </div> - <div class="ds-divider"></div> - <div class="ds-divider"></div> - <div class="ds-divider"></div> - <div> - <label for="passwordminspecials" class="ds-expire-label" title= - "Reject passwords with fewer than this many special non-alphanumeric characters (passwordMinSpecials)."><b>Minimum Special Characters </b></label><input - class="ds-input" type="text" id="passwordminspecials" size="5"/> - <label for="passwordmin8bit" class="ds-expire-label" title= - "Reject passwords with fewer than this many 8-bit or multi-byte characters (passwordMin8Bit)."><b>Minimum 8-bit Characters </b></label><input - class="ds-input" type="text" id="passwordmin8bit" size="5"/> - <label for="passwordmaxrepeats" class="ds-expire-label" title= - "The maximum number of times the same character can sequentially appear in a password (passwordMaxRepeats)."><b>Maximum Number Of Repeated Characters </b></label><input - class="ds-input" type="text" id="passwordmaxrepeats" size="5"/> - <label for="passwordmincategories" class="ds-expire-label" title= - "The minimum number of character categories that a password must contain (categories are upper, lower, digit, special, and 8-bit) (passwordMinCategories)."><b>Minimum Required Character Categories </b></label><input - class="ds-input" type="text" id="passwordmincategories" size="5"/> - <label for="passwordmintokenlength" class="ds-expire-label" title= - "The smallest attribute value used when checking if the password contains any of the user's account information (passwordMinTokenLength)."><b>Minimum Token Length </b></label><input - class="ds-input" type="text" id="passwordmintokenlength" size="5"/> + <h3><br>Password Syntax Settings</h3> + <hr class="ds-hr"> + <input type="checkbox" class="ds-config-checkbox" id="passwordchecksyntax" checked><label + for="passwordchecksyntax" class="ds-label" title="Enable account lockout (passwordCheckSyntax).">Check Password Syntax</label> + <div class="ds-container ds-expired-div" id="syntax-attrs"> + <div> + <label for="passwordminlength" class="ds-expire-label" title= + "The minimum number of characters in the password (passwordMinLength)."><b>Password Minimum Length </b></label><input + class="ds-input" type="text" id="passwordminlength" size="5"/> + <label for="passwordmindigits" class="ds-expire-label" title= + "Reject passwords with fewer than this many digit characters (0-9) (passwordMinDigits)."><b>Minimum Digit Characters </b></label><input + class="ds-input" type="text" id="passwordmindigits" size="5"/> + <label for="passwordmindigits" class="ds-expire-label" title= + "Reject passwords with fewer than this many alpha characters (passwordMinAlphas)."><b>Minimum Alpha Characters </b></label><input + class="ds-input" type="text" id="passwordminalphas" size="5"/> + <label for="passwordminuppers" class="ds-expire-label" title= + "Reject passwords with fewer than this many uppercase characters (passwordMinUppers)."><b>Minimum Uppercase Characters </b></label><input + class="ds-input" type="text" id="passwordminuppers" size="5"/> + <label for="passwordminlowers" class="ds-expire-label" title= + "Reject passwords with fewer than this many lowercase characters (passwordMinLowers)."><b>Minimum Lowercase Characters </b></label><input + class="ds-input" type="text" id="passwordminlowers" size="5"/> + </div> + <div class="ds-divider"></div> + <div class="ds-divider"></div> + <div class="ds-divider"></div> + <div> + <label for="passwordminspecials" class="ds-expire-label" title= + "Reject passwords with fewer than this many special non-alphanumeric characters (passwordMinSpecials)."><b>Minimum Special Characters </b></label><input + class="ds-input" type="text" id="passwordminspecials" size="5"/> + <label for="passwordmin8bit" class="ds-expire-label" title= + "Reject passwords with fewer than this many 8-bit or multi-byte characters (passwordMin8Bit)."><b>Minimum 8-bit Characters </b></label><input + class="ds-input" type="text" id="passwordmin8bit" size="5"/> + <label for="passwordmaxrepeats" class="ds-expire-label" title= + "The maximum number of times the same character can sequentially appear in a password (passwordMaxRepeats)."><b>Maximum Number Of Repeated Characters </b></label><input + class="ds-input" type="text" id="passwordmaxrepeats" size="5"/> + <label for="passwordmincategories" class="ds-expire-label" title= + "The minimum number of character categories that a password must contain (categories are upper, lower, digit, special, and 8-bit) (passwordMinCategories)."><b>Minimum Required Character Categories </b></label><input + class="ds-input" type="text" id="passwordmincategories" size="5"/> + <label for="passwordmintokenlength" class="ds-expire-label" title= + "The smallest attribute value used when checking if the password contains any of the user's account information (passwordMinTokenLength)."><b>Minimum Token Length </b></label><input + class="ds-input" type="text" id="passwordmintokenlength" size="5"/> + </div> + <p></p> </div> <p></p> + <hr class="ds-hr"> + <button type="submit" class="ds-button" title="Save global password policy settings">Save</button> + <p></p> + <p></p> </div> - <p></p> - <hr class="ds-hr"> - <button type="submit" class="ds-button">Save</button> - <p></p> - <p></p> - <input type="button" class="accordion localpwp-accordion ds-agmt-wiz-button ds-accordion-spacing" id="localpwd-accordion" value="Local Password Policies &#9660;"/> + <input type="button" class="accordion localpwp-accordion ds-accordion-button ds-accordion-spacing" id="localpwd-accordion" value="Local Password Policies &#9660;" title= + "Configure User/Subtree local password policies"/> <div class="ds-accordion-panel"> - <div class="ds-container3"> + <div class=""> <table id="passwd-policy-table" class="display ds-repl-table" cellspacing="0" width="100%"> <thead> <tr class="ds-table-header"> @@ -379,7 +386,7 @@ <p></p> <!-- Access logging --> - <input type="button" class="accordion log-accordion ds-agmt-wiz-button ds-accordion-spacing" id="access-accordion" value="Access Log Settings &#9660;"/> + <input type="button" class="accordion log-accordion ds-accordion-button ds-accordion-spacing" id="access-accordion" value="Access Log Settings &#9660;"/> <div class="ds-accordion-panel"> <input type="checkbox" class="ds-config-checkbox" id="nsslapd-accesslog-logging-enabled" checked><label for="nsslapd-accesslog-logging-enabled" class="ds-label" title="Enable access logging (nsslapd-accesslog-logging-enabled)."> Enable Access Logging</label> @@ -439,7 +446,7 @@ </div> <!-- Error logging --> - <input type="button" class="accordion log-accordion ds-agmt-wiz-button ds-accordion-spacing" id="error-accordion" value="Errors Log Settings &#9660;"/> + <input type="button" class="accordion log-accordion ds-accordion-button ds-accordion-spacing" id="error-accordion" value="Errors Log Settings &#9660;"/> <div class="ds-accordion-panel"> <input type="checkbox" class="ds-config-checkbox" id="nsslapd-errorlog-logging-enabled" checked><label for="nsslapd-errorlog-logging-enabled" class="ds-label" title="Enable error logging (nsslapd-errorlog-logging-enabled)."> Enable Error Logging</label> @@ -510,7 +517,7 @@ </div> <!-- Audit logging --> - <input type="button" class="accordion log-accordion ds-agmt-wiz-button ds-accordion-spacing" id="audit-accordion" value="Audit Log Settings &#9660;"/> + <input type="button" class="accordion log-accordion ds-accordion-button ds-accordion-spacing" id="audit-accordion" value="Audit Log Settings &#9660;"/> <div class="ds-accordion-panel"> <input type="checkbox" class="ds-config-checkbox" id="nsslapd-auditlog-logging-enabled" checked><label for="nsslapd-auditlog-logging-enabled" class="ds-label" title="Enable audit logging (nsslapd-auditlog-logging-enabled)."> Enable Audit Logging</label> @@ -557,7 +564,7 @@ </div> <!-- Auditfail logging --> - <input type="button" class="accordion log-accordion ds-agmt-wiz-button ds-accordion-spacing" id="audit-accordion" value="Audit Failure Log Settings &#9660;"/> + <input type="button" class="accordion log-accordion ds-accordion-button ds-accordion-spacing" id="audit-accordion" value="Audit Failure Log Settings &#9660;"/> <div class="ds-accordion-panel"> <input type="checkbox" class="ds-config-checkbox" id="nsslapd-auditfaillog-logging-enabled" checked><label for="nsslapd-auditfaillog-logging-enabled" class="ds-label" title="Enable audit failure logging (nsslapd-auditfaillog-logging-enabled)."> Enable Audit Failure Logging</label> @@ -662,7 +669,7 @@ </div> <p></p> - <input type="button" class="accordion adv-config-accordion ds-agmt-wiz-button ds-accordion-spacing" id="adv-config-accordion" value="Advanced Settings &#9660;"/> + <input type="button" class="accordion adv-config-accordion ds-accordion-button ds-accordion-spacing" id="adv-config-accordion" value="Advanced Settings &#9660;"/> <div class="ds-accordion-panel"> <div class="ds-container"> <div> @@ -741,6 +748,97 @@ </div> </div> + <!-- Modals/Popups/Wizards --> + + <div id="create-inst" class="modal"> + <form class="modal-content animate"> + <div class="container"> + <h2 id=""><b>Create Server Instance</b> <span class="close" id="create-inst-close">&times;</span></h2> + <hr class="ds-hr"> + + <div class="clearfix ds-container"> + <div class="ds-panel-left"> + <button type="button" id="create-inst-cancel" class="ds-button-left">Cancel</button> + </div> + <div class="ds-panel-right"> + <button type="submit" id="create-inst-save" class="ds-button-right">Create Instance</button> + </div> + </div> + </div> + </form> + </div> + + <div id="create-sasl-map" class="modal"> + <form class="modal-content animate"> + <div class="container"> + <h2 id=""><b>Create SASL Mapping</b> <span class="close" id="create-sasl-close">&times;</span></h2> + <hr class="ds-hr"> + + <div class="clearfix ds-container"> + <div class="ds-panel-left"> + <button type="button" id="create-sasl-map-cancel" class="ds-button-left">Cancel</button> + </div> + <div class="ds-panel-right"> + <button type="submit" id="create-sasl-map-save" class="ds-button-right">Create Mapping</button> + </div> + </div> + </div> + </form> + </div> + + <div id="edit-sasl-map" class="modal"> + <form class="modal-content animate"> + <div class="container"> + <h2 id=""><b>Create Sub Suffix</b> <span class="close" id="edit-sasl-close">&times;</span></h2> + <hr class="ds-hr"> + + <div class="clearfix ds-container"> + <div class="ds-panel-left"> + <button type="button" id="edit-sasl-map-cancel" class="ds-button-left">Cancel</button> + </div> + <div class="ds-panel-right"> + <button type="submit" id="edit-sasl-map-save" class="ds-button-right">Save</button> + </div> + </div> + </div> + </form> + </div> + + <div id="create-local-pwp" class="modal"> + <form class="modal-content animate"> + <div class="container"> + <h2 id=""><b>Create Local Password Policy</b> <span class="close" id="create-local-pwp-close">&times;</span></h2> + <hr class="ds-hr"> + + <div class="clearfix ds-container"> + <div class="ds-panel-left"> + <button type="button" id="create-local-pwp-cancel" class="ds-button-left">Cancel</button> + </div> + <div class="ds-panel-right"> + <button type="submit" id="create-local-pwp-save" class="ds-button-right">Create Policy</button> + </div> + </div> + </div> + </form> + </div> + + <div id="edit-local-pwp" class="modal"> + <form class="modal-content animate"> + <div class="container"> + <h2 id=""><b>Edit Local Password Policy</b> <span class="close" id="edit-local-pwp-close">&times;</span></h2> + <hr class="ds-hr"> + + <div class="clearfix ds-container"> + <div class="ds-panel-left"> + <button type="button" id="edit-local-pwp-cancel" class="ds-button-left">Cancel</button> + </div> + <div class="ds-panel-right"> + <button type="submit" id="edit-local-pwp-save" class="ds-button-right">Save</button> + </div> + </div> + </div> + </form> + </div> </div>
0
f2ff28e027dabccc92b43e079f90f5f86c4d8332
389ds/389-ds-base
Ticket 50053 - improve testcase
commit f2ff28e027dabccc92b43e079f90f5f86c4d8332 Author: Thierry Bordaz <[email protected]> Date: Wed Nov 28 17:50:11 2018 +0100 Ticket 50053 - improve testcase diff --git a/dirsrvtests/tests/suites/plugins/cos_test.py b/dirsrvtests/tests/suites/plugins/cos_test.py index a0551b98a..c624e8fbb 100644 --- a/dirsrvtests/tests/suites/plugins/cos_test.py +++ b/dirsrvtests/tests/suites/plugins/cos_test.py @@ -29,7 +29,8 @@ def add_user(server, uid, testbase, locality=None, tel=None, title=None): 'uid': uid, 'l': locality, 'title': title, - 'telephoneNumber': tel}))) + 'telephoneNumber': tel, + 'description': 'description real'}))) @pytest.mark.ds50053 def test_cos_operational_default(topo): @@ -123,7 +124,9 @@ def test_cos_operational_default(topo): 'cosAttribute': 'description override'}))) # title cos override - TITLE_VIRT = "title is %s" % VIRTUAL + TITLE_VIRT = [] + for i in range(2): + TITLE_VIRT.append("title is %s %d" % (VIRTUAL, i)) TITLE_COS_TEMPLATE = "cn=title_template,%s" % PEOPLE TITLE_COS_DEFINITION = "cn=title_definition,%s" % PEOPLE inst.add_s(Entry((TITLE_COS_TEMPLATE, { @@ -142,52 +145,74 @@ def test_cos_operational_default(topo): assert len(ents) == 1 ent = ents[0] - # Check telephonenumber (specifier default) + # Check telephonenumber (specifier default) with real value => real assert ent.hasAttr('telephonenumber') value = ent.getValue('telephonenumber') - log.info('Returned telephonenumber: %s' % value) + log.info('Returned telephonenumber (exp. real): %s' % value) log.info('Returned telephonenumber: %d' % value.find(REAL.encode())) assert value.find(REAL.encode()) != -1 - # Check 'locality' (specifier operational-default) + # Check 'locality' (specifier operational-default) with real value => real assert ent.hasAttr('l') value = ent.getValue('l') - log.info('Returned l: %s' % value) + log.info('Returned l (exp. real): %s ' % value) log.info('Returned l: %d' % value.find(REAL.encode())) assert value.find(REAL.encode()) != -1 - # Check 'seealso' (specifier operational) + # Check 'seealso' (specifier operational) without real value => virtual assert not ent.hasAttr('seealso') ents = inst.search_s(SUFFIX, ldap.SCOPE_SUBTREE, "uid=user_0", ["seealso"]) assert len(ents) == 1 ent = ents[0] value = ent.getValue('seealso') - log.info('Returned seealso: %s' % value) + log.info('Returned seealso (exp. virtual): %s' % value) log.info('Returned seealso: %d' % value.find(VIRTUAL.encode())) assert value.find(VIRTUAL.encode()) != -1 - # Check 'description' (specifier override) + # Check 'description' (specifier override) with real value => virtual assert not ent.hasAttr('description') ents = inst.search_s(SUFFIX, ldap.SCOPE_SUBTREE, "uid=user_0") assert len(ents) == 1 ent = ents[0] value = ent.getValue('description') - log.info('Returned description: %s' % value) + log.info('Returned description (exp. virtual): %s' % value) log.info('Returned description: %d' % value.find(VIRTUAL.encode())) assert value.find(VIRTUAL.encode()) != -1 - # Check 'title' (specifier merge-schemes) - # commented because it does not work need to open a new ticket -# ents = inst.search_s(SUFFIX, ldap.SCOPE_SUBTREE, "uid=user_0") -# assert len(ents) == 1 -# ent = ents[0] -# found_real = False -# found_virtual = False -# for value in ent.getValues('title'): -# log.info('Returned title: %s' % value) -# if value.find(VIRTUAL.encode()) != -1: -# found_virtual = True -# if value.find(REAL.encode()) != -1: -# found_real = True -# assert found_virtual -# assert found_real \ No newline at end of file + # Check 'title' (specifier merge-schemes) with real value => real value returned + ents = inst.search_s(SUFFIX, ldap.SCOPE_SUBTREE, "uid=user_0") + assert len(ents) == 1 + ent = ents[0] + found_real = False + found_virtual = False + for value in ent.getValues('title'): + log.info('Returned title (exp. real): %s' % value) + if value.find(VIRTUAL.encode()) != -1: + found_virtual = True + if value.find(REAL.encode()) != -1: + found_real = True + assert not found_virtual + assert found_real + + # Check 'title ((specifier merge-schemes) without real value => real value returned + ents = inst.search_s(SUFFIX, ldap.SCOPE_SUBTREE, "uid=user_0") + assert len(ents) == 1 + inst.modify_s(ents[0].dn,[(ldap.MOD_DELETE, 'title', None)]) + + inst.restart() + ents = inst.search_s(SUFFIX, ldap.SCOPE_SUBTREE, "uid=user_0") + assert len(ents) == 1 + ent = ents[0] + found_real = False + found_virtual = False + count = 0 + for value in ent.getValues('title'): + log.info('Returned title(exp. virt): %s' % value) + count = count + 1 + if value.find(VIRTUAL.encode()) != -1: + found_virtual = True + if value.find(REAL.encode()) != -1: + found_real = True + assert not found_real + assert found_virtual + assert count == 2
0
f4c6760ea5da7849e5b1f3b2f8d08bd7f6eedd1a
389ds/389-ds-base
Bug 644013 - uniqueness plugin segfault bug https://bugzilla.redhat.com/show_bug.cgi?id=644013 Resolves: bug 644013 Bug Description: uniqueness plugin segfault bug Reviewed by: self - one liner Branch: master Fix Description: Access the array pointer correctly Platforms tested: RHEL5 x86_64 Flag Day: no Doc impact: no
commit f4c6760ea5da7849e5b1f3b2f8d08bd7f6eedd1a Author: Rich Megginson <[email protected]> Date: Mon Oct 18 12:47:14 2010 -0600 Bug 644013 - uniqueness plugin segfault bug https://bugzilla.redhat.com/show_bug.cgi?id=644013 Resolves: bug 644013 Bug Description: uniqueness plugin segfault bug Reviewed by: self - one liner Branch: master Fix Description: Access the array pointer correctly Platforms tested: RHEL5 x86_64 Flag Day: no Doc impact: no diff --git a/ldap/servers/plugins/uiduniq/uid.c b/ldap/servers/plugins/uiduniq/uid.c index de7310fe2..01d635e19 100644 --- a/ldap/servers/plugins/uiduniq/uid.c +++ b/ldap/servers/plugins/uiduniq/uid.c @@ -637,7 +637,7 @@ addMod(LDAPMod ***modary, int *capacity, int *nmods, LDAPMod *toadd) *modary = (LDAPMod **)slapi_ch_malloc(*capacity * sizeof(LDAPMod *)); } } - *modary[*nmods] = toadd; + (*modary)[*nmods] = toadd; (*nmods)++; }
0
00a13794271527c7c474cd4d61f8ace5e7e7da32
389ds/389-ds-base
Bug 676053 - export task followed by import task causes cache assertion https://bugzilla.redhat.com/show_bug.cgi?id=676053 Description: When Simple Paged Results is requested and a page is returned, one entry is read ahead to check whether more entries exist or not. The read-ahead retrieves an entry (if any) and adds it into the entry cache. Simple Paged Results code puts the read- ahead entry back, but there was missing to call cache_return for the entry (that decrementing refcnt). If ldif2db.pl is called with the cache state, it finds out the entry which is still referred. This patch calls cache_return when the Simple Paged Results puts the read-ahead entry back. Plus, adding a debug function dump_hash.
commit 00a13794271527c7c474cd4d61f8ace5e7e7da32 Author: Noriko Hosoi <[email protected]> Date: Mon Feb 14 15:06:57 2011 -0800 Bug 676053 - export task followed by import task causes cache assertion https://bugzilla.redhat.com/show_bug.cgi?id=676053 Description: When Simple Paged Results is requested and a page is returned, one entry is read ahead to check whether more entries exist or not. The read-ahead retrieves an entry (if any) and adds it into the entry cache. Simple Paged Results code puts the read- ahead entry back, but there was missing to call cache_return for the entry (that decrementing refcnt). If ldif2db.pl is called with the cache state, it finds out the entry which is still referred. This patch calls cache_return when the Simple Paged Results puts the read-ahead entry back. Plus, adding a debug function dump_hash. diff --git a/ldap/servers/slapd/back-ldbm/cache.c b/ldap/servers/slapd/back-ldbm/cache.c index 721a838bf..8e39c5136 100644 --- a/ldap/servers/slapd/back-ldbm/cache.c +++ b/ldap/servers/slapd/back-ldbm/cache.c @@ -275,6 +275,42 @@ int remove_hash(Hashtable *ht, const void *key, size_t keylen) return 0; } +#ifdef LDAP_CACHE_DEBUG +void +dump_hash(Hashtable *ht) +{ + u_long i; + void *e; + char ep_id[16]; + char ep_ids[80]; + char *p; + int ids_size = 80; + + LDAPDebug0Args(LDAP_DEBUG_ANY, "entry cache:\n"); + p = ep_ids; + for (i = 0; i < ht->size; i++) { + int len; + e = ht->slot[i]; + if (NULL == e) { + continue; + } + do { + PR_snprintf(ep_id, 16, "%u", ((struct backcommon *)e)->ep_id); + len = strlen(ep_id); + if (ids_size < len + 1) { + LDAPDebug1Arg(LDAP_DEBUG_ANY, "%s\n", ep_ids); + p = ep_ids; ids_size = 80; + } + PR_snprintf(p, ids_size, "%s", ep_id); + p += len; ids_size -= len + 1; + } while (e = HASH_NEXT(ht, e)); + } + if (p != ep_ids) { + LDAPDebug1Arg(LDAP_DEBUG_ANY, "%s\n", ep_ids); + } +} +#endif + /* hashtable distribution stats -- * slots: # of slots in the hashtable * total_entries: # of entries in the hashtable @@ -574,9 +610,12 @@ static void entrycache_clear_int(struct cache *cache) } cache->c_maxsize = size; if (cache->c_curentries > 0) { - LDAPDebug1Arg(LDAP_DEBUG_ANY, + LDAPDebug1Arg(LDAP_DEBUG_ANY, "entrycache_clear_int: there are still %ld entries " - "in the entry cache. :/\n", cache->c_curentries); + "in the entry cache.\n", cache->c_curentries); +#ifdef LDAP_CACHE_DEBUG + dump_hash(cache->c_idtable); +#endif } } diff --git a/ldap/servers/slapd/back-ldbm/ldbm_search.c b/ldap/servers/slapd/back-ldbm/ldbm_search.c index 568d32c66..5565578b6 100644 --- a/ldap/servers/slapd/back-ldbm/ldbm_search.c +++ b/ldap/servers/slapd/back-ldbm/ldbm_search.c @@ -1458,9 +1458,32 @@ bail: void ldbm_back_prev_search_results( Slapi_PBlock *pb ) { + backend *be; + ldbm_instance *inst; back_search_result_set *sr; + + slapi_pblock_get( pb, SLAPI_BACKEND, &be ); + if (!be) { + LDAPDebug0Args(LDAP_DEBUG_ANY, + "ldbm_back_prev_search_results: no backend\n"); + return; + } + inst = (ldbm_instance *) be->be_instance_info; + if (!inst) { + LDAPDebug0Args(LDAP_DEBUG_ANY, + "ldbm_back_prev_search_results: no backend instance\n"); + return; + } slapi_pblock_get( pb, SLAPI_SEARCH_RESULT_SET, &sr ); if (sr) { + if (sr->sr_entry) { + /* The last entry should be returned to cache */ + LDAPDebug1Arg(LDAP_DEBUG_BACKLDBM, + "ldbm_back_prev_search_results: returning: %s\n", + slapi_entry_get_dn_const(sr->sr_entry->ep_entry)); + CACHE_RETURN (&inst->inst_cache, &(sr->sr_entry)); + sr->sr_entry = NULL; + } idl_iterator_decrement(&(sr->sr_current)); } return;
0
ca8ac8ec0261a40f2d1f3c457726dfc5483ab125
389ds/389-ds-base
Issue 4412 - Fix CLI repl-agmt requirement for parameters (#4422) Description: In dsconf CLI, make it possible to create SSLCLIENTAUTH bind method agreement without specifying bind dn (--bind-dn) and the password (--bind-passwd). Fixes: #4412 Reviewed by: @mreynolds389 (Thanks!)
commit ca8ac8ec0261a40f2d1f3c457726dfc5483ab125 Author: Simon Pichugin <[email protected]> Date: Mon Nov 9 11:43:04 2020 +0100 Issue 4412 - Fix CLI repl-agmt requirement for parameters (#4422) Description: In dsconf CLI, make it possible to create SSLCLIENTAUTH bind method agreement without specifying bind dn (--bind-dn) and the password (--bind-passwd). Fixes: #4412 Reviewed by: @mreynolds389 (Thanks!) diff --git a/src/lib389/lib389/cli_conf/replication.py b/src/lib389/lib389/cli_conf/replication.py index 121cc40f0..439665731 100644 --- a/src/lib389/lib389/cli_conf/replication.py +++ b/src/lib389/lib389/cli_conf/replication.py @@ -756,8 +756,8 @@ def add_agmt(inst, basedn, log, args): raise ValueError('Bootstrap connection protocol can only be "LDAP", "LDAPS", or "STARTTLS"') properties['nsDS5ReplicaBootstrapTransportInfo'] = args.bootstrap_conn_protocol - # We do need the bind dn and credentials for none-sasl bind methods - if (bind_method in ('simple', 'sslclientauth')) and (args.bind_dn is None or args.bind_passwd is None): + # We do need the bind dn and credentials for 'simple' bind method + if (bind_method == 'simple') and (args.bind_dn is None or args.bind_passwd is None): raise ValueError("You need to set the bind dn (--bind-dn) and the password (--bind-passwd) for bind method ({})".format(bind_method)) # Create the agmt
0
e9ad5f5aca64f65fa2c9b2dc5132b0dacf131c99
389ds/389-ds-base
Ticket 49378 server init fails Bug Description: We used our own target for DS installation, but we should just use multi-user like anything else. Fix Description: Change service template to multi-user. This should be a seamless upgrade to most consumers. https://pagure.io/389-ds-base/issue/49378 Author: wibrown Review by: mreynolds (Thanks!)
commit e9ad5f5aca64f65fa2c9b2dc5132b0dacf131c99 Author: William Brown <[email protected]> Date: Thu Sep 28 09:11:00 2017 +1000 Ticket 49378 server init fails Bug Description: We used our own target for DS installation, but we should just use multi-user like anything else. Fix Description: Change service template to multi-user. This should be a seamless upgrade to most consumers. https://pagure.io/389-ds-base/issue/49378 Author: wibrown Review by: mreynolds (Thanks!) diff --git a/wrappers/systemd.template.asan.service.in b/wrappers/systemd.template.asan.service.in index 1fe321ccb..52681f632 100644 --- a/wrappers/systemd.template.asan.service.in +++ b/wrappers/systemd.template.asan.service.in @@ -36,5 +36,5 @@ ExecStart=@sbindir@/ns-slapd -D @instconfigdir@/slapd-%i -i @localstatedir@/run/ .include @initconfigdir@/@[email protected] [Install] -WantedBy=dirsrv.target +WantedBy=multi-user.target diff --git a/wrappers/systemd.template.service.in b/wrappers/systemd.template.service.in index 30b9e4b78..0d88900b6 100644 --- a/wrappers/systemd.template.service.in +++ b/wrappers/systemd.template.service.in @@ -40,5 +40,5 @@ ExecStart=@sbindir@/ns-slapd -D @instconfigdir@/slapd-%i -i @localstatedir@/run/ .include @initconfigdir@/@[email protected] [Install] -WantedBy=dirsrv.target +WantedBy=multi-user.target
0
145e7f4aa9d9cf400751a16768b077b533f428cc
389ds/389-ds-base
Issue 6356 - On LMDB, after an update the impact VLV index, the vlv recno cache is not systematically cleared (#6357) Bug description: The VLV manages/uses an index database. In LMDB in addition to the index there is one 'recno cache' database per VLV index. This recno cache, related to a given VLV index, is cleared when an update impacts the VLV index. The recno cache is not systematically cleared upon update of the VLV index. The way to clear the cache is to delete the record with key "OK". When the 'recno cache' does not contain this key, the next lookup to the cache, clears all entries and rebuild the cache from the vlv index. The deletion is done with mdb_del with both KEY and DATA refering to the same mdb_val. This means it deletes the records matching if both KEY/DATA. It should not match the DATA as the "OK" record is just a flag and all entries with that key should be removed. Fix description: The fix consist to call mdb_del only with key fixes: #6356 Reviewed by: Pierre Rogier (Thanks!!)
commit 145e7f4aa9d9cf400751a16768b077b533f428cc Author: tbordaz <[email protected]> Date: Wed Oct 9 15:20:48 2024 +0200 Issue 6356 - On LMDB, after an update the impact VLV index, the vlv recno cache is not systematically cleared (#6357) Bug description: The VLV manages/uses an index database. In LMDB in addition to the index there is one 'recno cache' database per VLV index. This recno cache, related to a given VLV index, is cleared when an update impacts the VLV index. The recno cache is not systematically cleared upon update of the VLV index. The way to clear the cache is to delete the record with key "OK". When the 'recno cache' does not contain this key, the next lookup to the cache, clears all entries and rebuild the cache from the vlv index. The deletion is done with mdb_del with both KEY and DATA refering to the same mdb_val. This means it deletes the records matching if both KEY/DATA. It should not match the DATA as the "OK" record is just a flag and all entries with that key should be removed. Fix description: The fix consist to call mdb_del only with key fixes: #6356 Reviewed by: Pierre Rogier (Thanks!!) diff --git a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.c b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.c index 9c2db199c..313eb35ae 100644 --- a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.c +++ b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.c @@ -2882,7 +2882,7 @@ dbmdb_public_clear_vlv_cache(Slapi_Backend *be, dbi_txn_t *txn, dbi_db_t *db) ok.mv_size = 2; rc = dbmdb_open_dbi_from_filename(&rcdbi, be, rcdbname, NULL, 0); if (rc == 0) { - rc = MDB_DEL(TXN(txn), rcdbi->dbi, &ok, &ok); + rc = MDB_DEL(TXN(txn), rcdbi->dbi, &ok, NULL); } slapi_ch_free_string(&rcdbname); return rc;
0
8566e327d9589e5ace7996b1c0b35535aeeaa83f
389ds/389-ds-base
Remove __python3 from local specfile Description: Fix regression from previous commit
commit 8566e327d9589e5ace7996b1c0b35535aeeaa83f Author: Mark Reynolds <[email protected]> Date: Mon Nov 5 10:43:38 2018 -0500 Remove __python3 from local specfile Description: Fix regression from previous commit diff --git a/rpm/389-ds-base.spec.in b/rpm/389-ds-base.spec.in index 62b1b95e2..8aee72336 100644 --- a/rpm/389-ds-base.spec.in +++ b/rpm/389-ds-base.spec.in @@ -117,7 +117,7 @@ BuildRequires: doxygen BuildRequires: libcmocka-devel BuildRequires: libevent-devel # For lib389 and related components. -BuildRequires: %{__python3} +BuildRequires: python%{python3_pkgversion} BuildRequires: python%{python3_pkgversion}-devel BuildRequires: python%{python3_pkgversion}-setuptools BuildRequires: python%{python3_pkgversion}-ldap @@ -289,7 +289,7 @@ Requires: openssl # This is for /usr/bin/c_rehash tool Requires: openssl-perl Requires: iproute -Requires: %{__python3} +Requires: python%{python3_pkgversion} Requires: python%{python3_pkgversion}-pytest Requires: python%{python3_pkgversion}-ldap Requires: python%{python3_pkgversion}-six @@ -308,7 +308,7 @@ This module contains tools and libraries for accessing, testing, Summary: Cockpit UI Plugin for configuring and administering the 389 Directory Server BuildArch: noarch Requires: cockpit -Requires: %{__python3} +Requires: python%{python3_pkgversion} Requires: python%{python3_pkgversion}-lib389 %description -n cockpit-389-ds @@ -323,9 +323,6 @@ A cockpit UI Plugin for configuring and administering the 389 Directory Server cp %{SOURCE2} README.devel -# Make sure python3 is used in shebangs -sed -r -i '1s|^#!\s*/usr/bin.*python.*|#!%{__python3}|' ldap/admin/src/scripts/{*.py,ds-replcheck} - %build %if %{use_clang}
0
74423d2eaef50949fd96ded8a2251834663e95e8
389ds/389-ds-base
Ticket #579 - Error messages encountered when using POSIX winsync Bug description: posix_group_fix_memberuid_callback registered by posixWinsyncCreateMemberOfTask calls an internal modify function even if there are no attributes to fix up. The attempt fails as expected, but it logs cryptic errors in the error log: - slapi_modify_internal_set_pb: NULL parameter - allow_operation: component identity is NULL Fix description: This patch skips calling the fix up internal modify if there is no attributes to fix up. https://fedorahosted.org/389/ticket/579 Reviewed by Rich (Thank you!!)
commit 74423d2eaef50949fd96ded8a2251834663e95e8 Author: Noriko Hosoi <[email protected]> Date: Mon Feb 11 14:49:14 2013 -0800 Ticket #579 - Error messages encountered when using POSIX winsync Bug description: posix_group_fix_memberuid_callback registered by posixWinsyncCreateMemberOfTask calls an internal modify function even if there are no attributes to fix up. The attempt fails as expected, but it logs cryptic errors in the error log: - slapi_modify_internal_set_pb: NULL parameter - allow_operation: component identity is NULL Fix description: This patch skips calling the fix up internal modify if there is no attributes to fix up. https://fedorahosted.org/389/ticket/579 Reviewed by Rich (Thank you!!) diff --git a/ldap/servers/plugins/posix-winsync/posix-group-task.c b/ldap/servers/plugins/posix-winsync/posix-group-task.c index 8d9d8acc2..2cb9a3174 100644 --- a/ldap/servers/plugins/posix-winsync/posix-group-task.c +++ b/ldap/servers/plugins/posix-winsync/posix-group-task.c @@ -249,6 +249,7 @@ posix_group_fix_memberuid_callback(Slapi_Entry *e, void *callback_data) char *dn = slapi_entry_get_dn(e); Slapi_DN *sdn = slapi_entry_get_sdn(e); + LDAPMod **mods = NULL; /* Clean out memberuids and dsonlymemberuids without a valid referant */ rc = slapi_entry_attr_find(e, "memberuid", &muid_attr); @@ -379,17 +380,19 @@ posix_group_fix_memberuid_callback(Slapi_Entry *e, void *callback_data) } } - Slapi_PBlock *mod_pb = slapi_pblock_new(); + mods = slapi_mods_get_ldapmods_passout(smods); + if (mods) { + Slapi_PBlock *mod_pb = NULL; + mod_pb = slapi_pblock_new(); + slapi_modify_internal_set_pb_ext(mod_pb, sdn, mods, 0, 0, + posix_winsync_get_plugin_identity(), 0); - slapi_modify_internal_set_pb_ext(mod_pb, sdn, slapi_mods_get_ldapmods_passout(smods), 0, 0, - posix_winsync_get_plugin_identity(), 0); - - slapi_pblock_set(mod_pb, SLAPI_TXN, the_cb_data->txn); - slapi_modify_internal_pb(mod_pb); - - slapi_pblock_get(mod_pb, SLAPI_PLUGIN_INTOP_RESULT, &rc); - slapi_pblock_destroy(mod_pb); + slapi_pblock_set(mod_pb, SLAPI_TXN, the_cb_data->txn); + slapi_modify_internal_pb(mod_pb); + slapi_pblock_get(mod_pb, SLAPI_PLUGIN_INTOP_RESULT, &rc); + slapi_pblock_destroy(mod_pb); + } slapi_mods_free(&smods); slapi_log_error(SLAPI_LOG_PLUGIN, POSIX_WINSYNC_PLUGIN_NAME,
0
6fa30ec4b28db045e3bad00313f391811d1c53ad
389ds/389-ds-base
Ticket 48707 - Update rfc to accomodate that authid is mandatory Fix Description: authid in sasl is mandatory, so we should include this. https://pagure.io/389-ds-base/issue/48707 Author: wibrown Review by: mreynolds (Thanks!)
commit 6fa30ec4b28db045e3bad00313f391811d1c53ad Author: William Brown <[email protected]> Date: Tue Feb 28 11:17:20 2017 +1000 Ticket 48707 - Update rfc to accomodate that authid is mandatory Fix Description: authid in sasl is mandatory, so we should include this. https://pagure.io/389-ds-base/issue/48707 Author: wibrown Review by: mreynolds (Thanks!) diff --git a/rfcs/Makefile b/rfcs/Makefile index e868d38fa..31db4d434 100644 --- a/rfcs/Makefile +++ b/rfcs/Makefile @@ -1,5 +1,5 @@ -allrfcs: folders examplerfcs draft-wibrown-ldapssotoken-00 +allrfcs: folders draft-wibrown-ldapssotoken-00 folders: mkdir -p txt diff --git a/rfcs/src/draft-wibrown-ldapssotoken-00.xml b/rfcs/src/draft-wibrown-ldapssotoken-00.xml index c503744d1..24cdf0fe0 100644 --- a/rfcs/src/draft-wibrown-ldapssotoken-00.xml +++ b/rfcs/src/draft-wibrown-ldapssotoken-00.xml @@ -1,6 +1,7 @@ <?xml version="1.0" encoding="US-ASCII"?> <!DOCTYPE rfc SYSTEM "rfc2629.dtd" [ +<!ENTITY RFC2078 SYSTEM "http://xml.resource.org/public/rfc/bibxml/reference.RFC.2078.xml"> <!ENTITY RFC2119 SYSTEM "http://xml.resource.org/public/rfc/bibxml/reference.RFC.2119.xml"> <!ENTITY RFC2222 SYSTEM "http://xml.resource.org/public/rfc/bibxml/reference.RFC.2222.xml"> <!ENTITY RFC4511 SYSTEM "http://xml.resource.org/public/rfc/bibxml/reference.RFC.4511.xml"> @@ -15,7 +16,7 @@ <?rfc sortrefs="yes" ?> <?rfc compact="yes" ?> <?rfc subcompact="no" ?> -<rfc category="std" docName="draft-wibrown-ldapssotoken-01" ipr="trust200902"> +<rfc category="std" docName="draft-wibrown-ldapssotoken-02" ipr="trust200902"> <front> @@ -90,7 +91,7 @@ </address> </author> - <date year="2016"></date> + <date year="2017"></date> <area>General</area> @@ -235,6 +236,9 @@ Date Time Until || User Unique Id The acquisition method for the token is discussed in section XXX.</t> <t>For authentication, the client MUST send the token as it was received. IE changes to formatting are not permitted.</t> + <t>The client MUST send the an appropriate authid in <xref target="RFC2078">RFC 2078</xref> + form. This authid MUST internally match the User Unique Id in the token. The server + is responsible for this validation.</t> <t>The client MAY transform the token if acting in a proxy fashion. However this transformation must be deterministic and able to be reversed to satisfy the previous requirement.</t> @@ -276,6 +280,8 @@ Date Time Until || User Unique Id <section title="SASL Authentication"> <t>The client issues a SASL bind request with the mechanism name LDAPSSOTOKEN.</t> + <t>The client sends an appropriate authid in <xref target="RFC2078">RFC 2078</xref> + form.</t> <t>The client provides the encrypted token that was provided in the LDAPSSOTokenResponse Token Field.</t> <t>The token is decrypted and authenticated based on the token @@ -296,6 +302,10 @@ Date Time Until || User Unique Id invalidCredentials MUST be returned.</t> <t>The User Unique Id is validated to exist on the server. If the User Unique Id does not exist, invalidCredentials MUST be returned.</t> + <t>The authid provided by the SASL client is verified with the User Unique Id. For example + if the authid is [email protected], the server maps this to an identity. Once this + identity is validated, the identity is check to match the User Unique Id. If they do not + match, the authentication MUST fail.</t> <t>The DateTimeIssued field is validated against the User Unique Id object's attribute or related attribute that contains "Valid Not Before". If the value of "Valid Not Before" exceeds or is equal to DateTimeIssued, @@ -414,6 +424,8 @@ LDAPSSOTokenResponse ::= SEQUENCE { </references> <references title="Informative References"> + &RFC2078; + &RFC2222; &RFC4511;
0
2309c38a2c510b011327354c0600e945ce2e53a5
389ds/389-ds-base
Ticket 48902 - Strdup pwdstoragescheme name to prevent misbehaving plugins Bug Description: Some plugins would set the pwdstorageschemename to a value from their stack. This would cause ns-slapd to segfault on shutdown due to attempting to free this value. Fix Description: pblock now strdups the pwdstorageschemename, so that plugins can behave however they want, and we always do the right thing. https://fedorahosted.org/389/ticket/48902 Author: wibrown Review by: nhosoi (Thanks!)
commit 2309c38a2c510b011327354c0600e945ce2e53a5 Author: William Brown <[email protected]> Date: Tue Jun 28 12:15:03 2016 +1000 Ticket 48902 - Strdup pwdstoragescheme name to prevent misbehaving plugins Bug Description: Some plugins would set the pwdstorageschemename to a value from their stack. This would cause ns-slapd to segfault on shutdown due to attempting to free this value. Fix Description: pblock now strdups the pwdstorageschemename, so that plugins can behave however they want, and we always do the right thing. https://fedorahosted.org/389/ticket/48902 Author: wibrown Review by: nhosoi (Thanks!) diff --git a/ldap/servers/plugins/pwdstorage/pwd_init.c b/ldap/servers/plugins/pwdstorage/pwd_init.c index d441d382b..5c14c954e 100644 --- a/ldap/servers/plugins/pwdstorage/pwd_init.c +++ b/ldap/servers/plugins/pwdstorage/pwd_init.c @@ -50,7 +50,6 @@ int sha_pwd_storage_scheme_init( Slapi_PBlock *pb ) { int rc; - char *name; slapi_log_error( SLAPI_LOG_PLUGIN, plugin_name, "=> sha_pwd_storage_scheme_init\n" ); @@ -62,9 +61,8 @@ sha_pwd_storage_scheme_init( Slapi_PBlock *pb ) (void *) sha1_pw_enc); rc |= slapi_pblock_set( pb, SLAPI_PLUGIN_PWD_STORAGE_SCHEME_CMP_FN, (void *) sha1_pw_cmp ); - name = slapi_ch_strdup("SHA"); rc |= slapi_pblock_set( pb, SLAPI_PLUGIN_PWD_STORAGE_SCHEME_NAME, - name ); + "SHA" ); slapi_log_error( SLAPI_LOG_PLUGIN, plugin_name, "<= sha_pwd_storage_scheme_init %d\n\n", rc ); @@ -75,7 +73,6 @@ int ssha_pwd_storage_scheme_init( Slapi_PBlock *pb ) { int rc; - char *name; slapi_log_error( SLAPI_LOG_PLUGIN, plugin_name, "=> ssha_pwd_storage_scheme_init\n" ); @@ -87,9 +84,8 @@ ssha_pwd_storage_scheme_init( Slapi_PBlock *pb ) (void *) salted_sha1_pw_enc ); rc |= slapi_pblock_set( pb, SLAPI_PLUGIN_PWD_STORAGE_SCHEME_CMP_FN, (void *) sha1_pw_cmp ); - name = slapi_ch_strdup("SSHA"); rc |= slapi_pblock_set( pb, SLAPI_PLUGIN_PWD_STORAGE_SCHEME_NAME, - name ); + "SSHA" ); slapi_log_error( SLAPI_LOG_PLUGIN, plugin_name, "<= ssha_pwd_storage_scheme_init %d\n\n", rc ); return( rc ); @@ -99,7 +95,6 @@ int sha256_pwd_storage_scheme_init( Slapi_PBlock *pb ) { int rc; - char *name; slapi_log_error( SLAPI_LOG_PLUGIN, plugin_name, "=> sha256_pwd_storage_scheme_init\n" ); @@ -111,9 +106,8 @@ sha256_pwd_storage_scheme_init( Slapi_PBlock *pb ) (void *) sha256_pw_enc); rc |= slapi_pblock_set( pb, SLAPI_PLUGIN_PWD_STORAGE_SCHEME_CMP_FN, (void *) sha256_pw_cmp ); - name = slapi_ch_strdup("SHA256"); rc |= slapi_pblock_set( pb, SLAPI_PLUGIN_PWD_STORAGE_SCHEME_NAME, - name ); + "SHA256" ); slapi_log_error( SLAPI_LOG_PLUGIN, plugin_name, "<= sha256_pwd_storage_scheme_init %d\n\n", rc ); @@ -124,7 +118,6 @@ int ssha256_pwd_storage_scheme_init( Slapi_PBlock *pb ) { int rc; - char *name; slapi_log_error( SLAPI_LOG_PLUGIN, plugin_name, "=> ssha256_pwd_storage_scheme_init\n" ); @@ -136,9 +129,8 @@ ssha256_pwd_storage_scheme_init( Slapi_PBlock *pb ) (void *) salted_sha256_pw_enc ); rc |= slapi_pblock_set( pb, SLAPI_PLUGIN_PWD_STORAGE_SCHEME_CMP_FN, (void *) sha256_pw_cmp ); - name = slapi_ch_strdup("SSHA256"); rc |= slapi_pblock_set( pb, SLAPI_PLUGIN_PWD_STORAGE_SCHEME_NAME, - name ); + "SSHA256" ); slapi_log_error( SLAPI_LOG_PLUGIN, plugin_name, "<= ssha256_pwd_storage_scheme_init %d\n\n", rc ); return( rc ); @@ -148,7 +140,6 @@ int sha384_pwd_storage_scheme_init( Slapi_PBlock *pb ) { int rc; - char *name; slapi_log_error( SLAPI_LOG_PLUGIN, plugin_name, "=> sha384_pwd_storage_scheme_init\n" ); @@ -160,9 +151,8 @@ sha384_pwd_storage_scheme_init( Slapi_PBlock *pb ) (void *) sha384_pw_enc); rc |= slapi_pblock_set( pb, SLAPI_PLUGIN_PWD_STORAGE_SCHEME_CMP_FN, (void *) sha384_pw_cmp ); - name = slapi_ch_strdup("SHA384"); rc |= slapi_pblock_set( pb, SLAPI_PLUGIN_PWD_STORAGE_SCHEME_NAME, - name ); + "SHA384" ); slapi_log_error( SLAPI_LOG_PLUGIN, plugin_name, "<= sha384_pwd_storage_scheme_init %d\n\n", rc ); @@ -173,7 +163,6 @@ int ssha384_pwd_storage_scheme_init( Slapi_PBlock *pb ) { int rc; - char *name; slapi_log_error( SLAPI_LOG_PLUGIN, plugin_name, "=> ssha384_pwd_storage_scheme_init\n" ); @@ -185,9 +174,8 @@ ssha384_pwd_storage_scheme_init( Slapi_PBlock *pb ) (void *) salted_sha384_pw_enc ); rc |= slapi_pblock_set( pb, SLAPI_PLUGIN_PWD_STORAGE_SCHEME_CMP_FN, (void *) sha384_pw_cmp ); - name = slapi_ch_strdup("SSHA384"); rc |= slapi_pblock_set( pb, SLAPI_PLUGIN_PWD_STORAGE_SCHEME_NAME, - name ); + "SSHA384" ); slapi_log_error( SLAPI_LOG_PLUGIN, plugin_name, "<= ssha384_pwd_storage_scheme_init %d\n\n", rc ); return( rc ); @@ -197,7 +185,6 @@ int sha512_pwd_storage_scheme_init( Slapi_PBlock *pb ) { int rc; - char *name; slapi_log_error( SLAPI_LOG_PLUGIN, plugin_name, "=> sha512_pwd_storage_scheme_init\n" ); @@ -209,9 +196,8 @@ sha512_pwd_storage_scheme_init( Slapi_PBlock *pb ) (void *) sha512_pw_enc); rc |= slapi_pblock_set( pb, SLAPI_PLUGIN_PWD_STORAGE_SCHEME_CMP_FN, (void *) sha512_pw_cmp ); - name = slapi_ch_strdup("SHA512"); rc |= slapi_pblock_set( pb, SLAPI_PLUGIN_PWD_STORAGE_SCHEME_NAME, - name ); + "SHA512" ); slapi_log_error( SLAPI_LOG_PLUGIN, plugin_name, "<= sha512_pwd_storage_scheme_init %d\n\n", rc ); @@ -222,7 +208,6 @@ int ssha512_pwd_storage_scheme_init( Slapi_PBlock *pb ) { int rc; - char *name; slapi_log_error( SLAPI_LOG_PLUGIN, plugin_name, "=> ssha512_pwd_storage_scheme_init\n" ); @@ -234,9 +219,8 @@ ssha512_pwd_storage_scheme_init( Slapi_PBlock *pb ) (void *) salted_sha512_pw_enc ); rc |= slapi_pblock_set( pb, SLAPI_PLUGIN_PWD_STORAGE_SCHEME_CMP_FN, (void *) sha512_pw_cmp ); - name = slapi_ch_strdup("SSHA512"); rc |= slapi_pblock_set( pb, SLAPI_PLUGIN_PWD_STORAGE_SCHEME_NAME, - name ); + "SSHA512" ); slapi_log_error( SLAPI_LOG_PLUGIN, plugin_name, "<= ssha512_pwd_storage_scheme_init %d\n\n", rc ); return( rc ); @@ -246,7 +230,6 @@ int crypt_pwd_storage_scheme_init( Slapi_PBlock *pb ) { int rc; - char *name; slapi_log_error( SLAPI_LOG_PLUGIN, plugin_name, "=> crypt_pwd_storage_scheme_init\n" ); @@ -259,9 +242,8 @@ crypt_pwd_storage_scheme_init( Slapi_PBlock *pb ) (void *) crypt_pw_enc ); rc |= slapi_pblock_set( pb, SLAPI_PLUGIN_PWD_STORAGE_SCHEME_CMP_FN, (void *) crypt_pw_cmp ); - name = slapi_ch_strdup("CRYPT"); rc |= slapi_pblock_set( pb, SLAPI_PLUGIN_PWD_STORAGE_SCHEME_NAME, - name ); + "CRYPT" ); slapi_log_error( SLAPI_LOG_PLUGIN, plugin_name, "<= crypt_pwd_storage_scheme_init %d\n\n", rc ); return( rc ); @@ -271,7 +253,6 @@ int clear_pwd_storage_scheme_init( Slapi_PBlock *pb ) { int rc; - char *name; slapi_log_error( SLAPI_LOG_PLUGIN, plugin_name, "=> clear_pwd_storage_scheme_init\n" ); @@ -283,9 +264,8 @@ clear_pwd_storage_scheme_init( Slapi_PBlock *pb ) (void *) clear_pw_enc ); rc |= slapi_pblock_set( pb, SLAPI_PLUGIN_PWD_STORAGE_SCHEME_CMP_FN, (void *) clear_pw_cmp ); - name = slapi_ch_strdup("CLEAR"); rc |= slapi_pblock_set( pb, SLAPI_PLUGIN_PWD_STORAGE_SCHEME_NAME, - name ); + "CLEAR" ); slapi_log_error( SLAPI_LOG_PLUGIN, plugin_name, "<= clear_pwd_storage_scheme_init %d\n\n", rc ); return( rc ); @@ -295,7 +275,6 @@ int ns_mta_md5_pwd_storage_scheme_init( Slapi_PBlock *pb ) { int rc; - char *name; slapi_log_error( SLAPI_LOG_PLUGIN, plugin_name, "=> ns_mta_md5_pwd_storage_scheme_init\n" ); @@ -307,9 +286,8 @@ ns_mta_md5_pwd_storage_scheme_init( Slapi_PBlock *pb ) (void *) NULL ); rc |= slapi_pblock_set( pb, SLAPI_PLUGIN_PWD_STORAGE_SCHEME_CMP_FN, (void *) ns_mta_md5_pw_cmp ); - name = slapi_ch_strdup("NS-MTA-MD5"); rc |= slapi_pblock_set( pb, SLAPI_PLUGIN_PWD_STORAGE_SCHEME_NAME, - name ); + "NS-MTA-MD5" ); slapi_log_error( SLAPI_LOG_PLUGIN, plugin_name, "<= ns_mta_md5_pwd_storage_scheme_init %d\n\n", rc ); return( rc ); @@ -319,7 +297,6 @@ int md5_pwd_storage_scheme_init( Slapi_PBlock *pb ) { int rc; - char *name; slapi_log_error( SLAPI_LOG_PLUGIN, plugin_name, "=> md5_pwd_storage_scheme_init\n" ); @@ -331,9 +308,8 @@ md5_pwd_storage_scheme_init( Slapi_PBlock *pb ) (void *) md5_pw_enc ); rc |= slapi_pblock_set( pb, SLAPI_PLUGIN_PWD_STORAGE_SCHEME_CMP_FN, (void *) md5_pw_cmp ); - name = slapi_ch_strdup("MD5"); rc |= slapi_pblock_set( pb, SLAPI_PLUGIN_PWD_STORAGE_SCHEME_NAME, - name ); + "MD5" ); slapi_log_error( SLAPI_LOG_PLUGIN, plugin_name, "<= md5_pwd_storage_scheme_init %d\n\n", rc ); return( rc ); @@ -343,7 +319,6 @@ int smd5_pwd_storage_scheme_init( Slapi_PBlock *pb ) { int rc; - char *name; slapi_log_error( SLAPI_LOG_PLUGIN, plugin_name, "=> smd5_pwd_storage_scheme_init\n" ); @@ -355,9 +330,8 @@ smd5_pwd_storage_scheme_init( Slapi_PBlock *pb ) (void *) smd5_pw_enc ); rc |= slapi_pblock_set( pb, SLAPI_PLUGIN_PWD_STORAGE_SCHEME_CMP_FN, (void *) smd5_pw_cmp ); - name = slapi_ch_strdup("SMD5"); rc |= slapi_pblock_set( pb, SLAPI_PLUGIN_PWD_STORAGE_SCHEME_NAME, - name ); + "SMD5" ); slapi_log_error( SLAPI_LOG_PLUGIN, plugin_name, "<= smd5_pwd_storage_scheme_init %d\n\n", rc ); return( rc ); diff --git a/ldap/servers/plugins/rever/rever.c b/ldap/servers/plugins/rever/rever.c index c49eeec45..719cbdea3 100644 --- a/ldap/servers/plugins/rever/rever.c +++ b/ldap/servers/plugins/rever/rever.c @@ -68,7 +68,6 @@ aes_dec( char *pwd, char *alg ) int aes_init( Slapi_PBlock *pb) { - char *name = slapi_ch_strdup(AES_REVER_SCHEME_NAME); int rc; slapi_log_error( SLAPI_LOG_PLUGIN, plugin_name, "=> aes_init\n" ); @@ -78,7 +77,7 @@ aes_init( Slapi_PBlock *pb) rc |= slapi_pblock_set( pb, SLAPI_PLUGIN_PWD_STORAGE_SCHEME_ENC_FN, (void *) aes_enc); rc |= slapi_pblock_set( pb, SLAPI_PLUGIN_PWD_STORAGE_SCHEME_CMP_FN, (void *) aes_cmp ); rc |= slapi_pblock_set( pb, SLAPI_PLUGIN_PWD_STORAGE_SCHEME_DEC_FN, (void *) aes_dec ); - rc |= slapi_pblock_set( pb, SLAPI_PLUGIN_PWD_STORAGE_SCHEME_NAME, name ); + rc |= slapi_pblock_set( pb, SLAPI_PLUGIN_PWD_STORAGE_SCHEME_NAME, AES_REVER_SCHEME_NAME ); init_pbe_plugin(); @@ -130,7 +129,6 @@ des_dec( char *pwd ) int des_init( Slapi_PBlock *pb ) { - char *name = slapi_ch_strdup(DES_REVER_SCHEME_NAME); int rc; slapi_log_error( SLAPI_LOG_PLUGIN, plugin_name, "=> des_init\n" ); @@ -140,7 +138,7 @@ des_init( Slapi_PBlock *pb ) rc |= slapi_pblock_set( pb, SLAPI_PLUGIN_PWD_STORAGE_SCHEME_ENC_FN, (void *) des_enc); rc |= slapi_pblock_set( pb, SLAPI_PLUGIN_PWD_STORAGE_SCHEME_CMP_FN, (void *) des_cmp ); rc |= slapi_pblock_set( pb, SLAPI_PLUGIN_PWD_STORAGE_SCHEME_DEC_FN, (void *) des_dec ); - rc |= slapi_pblock_set( pb, SLAPI_PLUGIN_PWD_STORAGE_SCHEME_NAME, name ); + rc |= slapi_pblock_set( pb, SLAPI_PLUGIN_PWD_STORAGE_SCHEME_NAME, DES_REVER_SCHEME_NAME ); init_pbe_plugin(); diff --git a/ldap/servers/slapd/pblock.c b/ldap/servers/slapd/pblock.c index 72053378d..db5a9e7f2 100644 --- a/ldap/servers/slapd/pblock.c +++ b/ldap/servers/slapd/pblock.c @@ -3473,7 +3473,7 @@ slapi_pblock_set( Slapi_PBlock *pblock, int arg, void *value ) /* password storage scheme (kexcoff) */ case SLAPI_PLUGIN_PWD_STORAGE_SCHEME_NAME: - pblock->pb_plugin->plg_pwdstorageschemename = (char *)value; + pblock->pb_plugin->plg_pwdstorageschemename = slapi_ch_strdup((char *)value); break; case SLAPI_PLUGIN_PWD_STORAGE_SCHEME_USER_PWD: pblock->pb_pwd_storage_scheme_user_passwd = (char *)value;
0
ed5b13cab68f82ae6a1abe03bf41eb7369c30f2c
389ds/389-ds-base
Issue 51000 - Separate the BDB backend monitors Bug Description: While trying to remove duplicate code from the backend and BDB backend code, I found that we were not correctly separating the BDB monitors from the core backend code. Fix Description: Move all the monitor registering to the db_layer private structure. This way we have fully isolated the monitors for each backend implementation/library. This also removed some duplicate code from the core backend and BDB code. relates: https://pagure.io/389-ds-base/issue/51000 Reviewed by: spichugi(Thanks!)
commit ed5b13cab68f82ae6a1abe03bf41eb7369c30f2c Author: Mark Reynolds <[email protected]> Date: Mon Jul 6 15:18:21 2020 -0400 Issue 51000 - Separate the BDB backend monitors Bug Description: While trying to remove duplicate code from the backend and BDB backend code, I found that we were not correctly separating the BDB monitors from the core backend code. Fix Description: Move all the monitor registering to the db_layer private structure. This way we have fully isolated the monitors for each backend implementation/library. This also removed some duplicate code from the core backend and BDB code. relates: https://pagure.io/389-ds-base/issue/51000 Reviewed by: spichugi(Thanks!) diff --git a/dirsrvtests/tests/suites/monitor/monitor_test.py b/dirsrvtests/tests/suites/monitor/monitor_test.py index 5786903f3..98dd62dab 100644 --- a/dirsrvtests/tests/suites/monitor/monitor_test.py +++ b/dirsrvtests/tests/suites/monitor/monitor_test.py @@ -2,6 +2,7 @@ import logging import pytest import os from lib389.monitor import * +from lib389.backend import Backends, DatabaseConfig from lib389._constants import * from lib389.topologies import topology_st as topo @@ -63,6 +64,87 @@ def test_monitor(topo): log.info('dtablesize: {0[0]},readwaiters: {0[1]},entriessent: {0[2]},bytessent: {0[3]},currenttime: {0[4]},starttime: {0[5]}'.format(stats)) +pytestmark = pytest.mark.tier1 +def test_monitor_ldbm(topo): + """This test is to check if we are getting the correct monitor entry + + :id: e62ba369-32f5-4b03-8865-f597a5bb6a70 + :setup: Single instance + :steps: + 1. Get the backend library (bdb, ldbm, etc) + 2. Get the database monitor + 3. Check for expected attributes in output + 4. Check for expected DB library specific attributes + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + """ + + # Are we using BDB? + db_config = DatabaseConfig(topo.standalone) + db_lib = db_config.get_db_lib() + + # Get the database monitor entry + monitor = MonitorLDBM(topo.standalone).get_status() + + # Check that known attributes exist (only NDN cache stats) + assert 'normalizeddncachehits' in monitor + + # Check for library specific attributes + if db_lib == 'bdb': + assert 'dbcachehits' in monitor + assert 'nsslapd-db-configured-locks' in monitor + elif db_lib == 'lmdb': + pass + else: + # Unknown - the server would probably fail to start but check it anyway + log.fatal(f'Unknown backend library: {db_lib}') + assert False + + +pytestmark = pytest.mark.tier1 +def test_monitor_backend(topo): + """This test is to check if we are getting the correct backend monitor entry + + :id: 27b0534f-a18c-4c95-aa2b-936bc1886a7b + :setup: Single instance + :steps: + 1. Get the backend library (bdb, ldbm, etc) + 2. Get the backend monitor + 3. Check for expected attributes in output + 4. Check for expected DB library specific attributes + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + """ + + # Are we using BDB? + db_config = DatabaseConfig(topo.standalone) + db_lib = db_config.get_db_lib() + + # Get the backend monitor + be = Backends(topo.standalone).list()[0] + monitor = be.get_monitor().get_status() + + # Check for expected attributes + assert 'entrycachehits' in monitor + assert 'dncachehits' in monitor + + # Check for library specific attributes + if db_lib == 'bdb': + assert 'dbfilename-0' in monitor + elif db_lib == 'lmdb': + pass + else: + # Unknown - the server would probably fail to start but check it anyway + log.fatal(f'Unknown backend library: {db_lib}') + assert False + + if __name__ == '__main__': # Run isolated # -s for DEBUG mode diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_config.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_config.c index 7c05d2f90..738b841aa 100644 --- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_config.c +++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_config.c @@ -88,6 +88,7 @@ int bdb_init(struct ldbminfo *li, config_info *config_array) priv->instance_postdel_config_fn = &bdb_instance_post_delete_instance_entry_callback; priv->instance_cleanup_fn = &bdb_instance_cleanup; priv->instance_create_fn = &bdb_instance_create; + priv->instance_register_monitor_fn = &bdb_instance_register_monitor; priv->instance_search_callback_fn = &bdb_instance_search_callback; priv->dblayer_auto_tune_fn = &bdb_start_autotune; return 0; @@ -1530,6 +1531,7 @@ bail: } return rval; } + /* Reads in any config information held in the dse for the bdb * implementation of the ldbm plugin. * Creates dse entries used to configure the ldbm plugin and dblayer @@ -1640,7 +1642,7 @@ retry: * Now still using ldbm functions */ slapi_config_register_callback(SLAPI_OPERATION_SEARCH, DSE_FLAG_PREOP, dn, - LDAP_SCOPE_BASE, "(objectclass=*)", ldbm_back_monitor_search, + LDAP_SCOPE_BASE, "(objectclass=*)", bdb_monitor_search, (void *)li); slapi_ch_free_string(&dn); @@ -1656,7 +1658,7 @@ retry: goto bail; } slapi_config_register_callback(SLAPI_OPERATION_SEARCH, DSE_FLAG_PREOP, dn, - LDAP_SCOPE_BASE, "(objectclass=*)", ldbm_back_dbmonitor_search, + LDAP_SCOPE_BASE, "(objectclass=*)", bdb_dbmonitor_search, (void *)li); bail: @@ -1664,6 +1666,72 @@ bail: return rval; } +/* general-purpose callback to deny an operation */ +static int +bdb_deny_config(Slapi_PBlock *pb __attribute__((unused)), + Slapi_Entry *e __attribute__((unused)), + Slapi_Entry *entryAfter __attribute__((unused)), + int *returncode, + char *returntext __attribute__((unused)), + void *arg __attribute__((unused))) +{ + *returncode = LDAP_UNWILLING_TO_PERFORM; + return SLAPI_DSE_CALLBACK_ERROR; +} + +int +bdb_instance_register_monitor(ldbm_instance *inst) { + struct ldbminfo *li = inst->inst_li; + char *dn = NULL; + + dn = slapi_create_dn_string("cn=monitor,cn=%s,cn=%s,cn=plugins,cn=config", + inst->inst_name, li->li_plugin->plg_name); + if (NULL == dn) { + slapi_log_err(SLAPI_LOG_ERR, + "bdb_instance_register_monitor", + "failed create monitor instance dn for plugin %s, " + "instance %s\n", + inst->inst_li->li_plugin->plg_name, inst->inst_name); + return 1; + } + /* make callback on search; deny add/modify/delete */ + slapi_config_register_callback(SLAPI_OPERATION_SEARCH, DSE_FLAG_PREOP, dn, + LDAP_SCOPE_BASE, "(objectclass=*)", bdb_monitor_instance_search, + (void *)inst); + slapi_config_register_callback(SLAPI_OPERATION_ADD, DSE_FLAG_PREOP, dn, + LDAP_SCOPE_SUBTREE, "(objectclass=*)", bdb_deny_config, + (void *)inst); + slapi_config_register_callback(SLAPI_OPERATION_MODIFY, DSE_FLAG_PREOP, dn, + LDAP_SCOPE_BASE, "(objectclass=*)", bdb_deny_config, + (void *)inst); + slapi_ch_free_string(&dn); + + return 0; +} + +void +bdb_instance_unregister_monitor(ldbm_instance *inst) { + struct ldbminfo *li = inst->inst_li; + char *dn = NULL; + + dn = slapi_create_dn_string("cn=monitor,cn=%s,cn=%s,cn=plugins,cn=config", + inst->inst_name, li->li_plugin->plg_name); + if (NULL == dn) { + slapi_log_err(SLAPI_LOG_ERR, + "bdb_instance_unregister_monitor", + "Failed create monitor instance dn for plugin %s, " + "instance %s\n", + inst->inst_li->li_plugin->plg_name, inst->inst_name); + return; + } + slapi_config_remove_callback(SLAPI_OPERATION_SEARCH, DSE_FLAG_PREOP, dn, + LDAP_SCOPE_BASE, "(objectclass=*)", bdb_monitor_instance_search); + slapi_config_remove_callback(SLAPI_OPERATION_ADD, DSE_FLAG_PREOP, dn, + LDAP_SCOPE_SUBTREE, "(objectclass=*)", bdb_deny_config); + slapi_config_remove_callback(SLAPI_OPERATION_MODIFY, DSE_FLAG_PREOP, dn, + LDAP_SCOPE_BASE, "(objectclass=*)", bdb_deny_config); + slapi_ch_free_string(&dn); +} /* Utility function used in creating config entries. Using the * config_info, this function gets info and formats in the correct diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_instance_config.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_instance_config.c index 6a21c2242..0ac3694b6 100644 --- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_instance_config.c +++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_instance_config.c @@ -197,6 +197,8 @@ bdb_instance_post_delete_instance_entry_callback(struct ldbminfo *li, struct ldb if (inst_dirp != inst_dir) { slapi_ch_free_string(&inst_dirp); } + /* unregister the monitor */ + bdb_instance_unregister_monitor(inst); } /* non-null pEnv */ return SLAPI_DSE_CALLBACK_OK; } diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.h b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.h index d729327e8..bf00d2e9a 100644 --- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.h +++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.h @@ -150,7 +150,6 @@ int bdb_back_ok_to_dump(const char *dn, char **include, char **exclude); int bdb_back_fetch_incl_excl(Slapi_PBlock *pb, char ***include, char ***exclude); PRUint64 bdb_get_id2entry_size(ldbm_instance *inst); - /* bdb version functions */ int bdb_version_write(struct ldbminfo *li, const char *directory, const char *dataversion, PRUint32 flags); int bdb_version_read(struct ldbminfo *li, const char *directory, char **ldbmversion, char **dataversion); @@ -162,3 +161,10 @@ int bdb_instance_post_delete_instance_entry_callback(struct ldbminfo *li, struct int bdb_instance_add_instance_entry_callback(struct ldbminfo *li, struct ldbm_instance *inst); int bdb_instance_postadd_instance_entry_callback(struct ldbminfo *li, struct ldbm_instance *inst); void bdb_config_setup_default(struct ldbminfo *li); + +/* monitor functions */ +int bdb_monitor_instance_search(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *entryAfter, int *returncode, char *returntext, void *arg); +int bdb_monitor_search(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *entryAfter, int *returncode, char *returntext, void *arg); +int bdb_dbmonitor_search(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *entryAfter, int *returncode, char *returntext, void *arg); +int bdb_instance_register_monitor(ldbm_instance *inst); +void bdb_instance_unregister_monitor(ldbm_instance *inst); diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_ldif2db.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_ldif2db.c index 9ffd877cb..506c285a3 100644 --- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_ldif2db.c +++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_ldif2db.c @@ -3145,7 +3145,7 @@ bdb_upgradednformat(Slapi_PBlock *pb) run_from_cmdline = (task_flags & SLAPI_TASK_RUNNING_FROM_COMMANDLINE); slapi_pblock_get(pb, SLAPI_PLUGIN_PRIVATE, &li); if (run_from_cmdline) { - ldbm_config_load_dse_info(li); + bdb_config_load_dse_info(li); if (bdb_check_and_set_import_cache(li) < 0) { return -1; } diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_monitor.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_monitor.c index ac3479d1c..8f954c474 100644 --- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_monitor.c +++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_monitor.c @@ -33,12 +33,12 @@ /* DSE callback to monitor stats for a particular instance */ int -ldbm_back_monitor_instance_search(Slapi_PBlock *pb __attribute__((unused)), - Slapi_Entry *e, - Slapi_Entry *entryAfter __attribute__((unused)), - int *returncode, - char *returntext __attribute__((unused)), - void *arg) +bdb_monitor_instance_search(Slapi_PBlock *pb __attribute__((unused)), + Slapi_Entry *e, + Slapi_Entry *entryAfter __attribute__((unused)), + int *returncode, + char *returntext __attribute__((unused)), + void *arg) { ldbm_instance *inst = (ldbm_instance *)arg; struct ldbminfo *li = NULL; @@ -150,8 +150,8 @@ ldbm_back_monitor_instance_search(Slapi_PBlock *pb __attribute__((unused)), continue; /* Since the filenames are now relative, we need to construct an absolute version - * for the purpose of stat() etc below... - */ + * for the purpose of stat() etc below... + */ slapi_ch_free_string(&absolute_pathname); absolute_pathname = slapi_ch_smprintf("%s%c%s", inst->inst_parent_dir_name, get_sep(inst->inst_parent_dir_name), mpfstat[i]->file_name); @@ -159,10 +159,10 @@ ldbm_back_monitor_instance_search(Slapi_PBlock *pb __attribute__((unused)), if (stat(absolute_pathname, &astat)) continue; /* If the file has been re-created after been deleted - * We should show only statistics for the last instance - * Since SleepyCat returns the statistic of the last open file first, - * we should only display the first statistic record for a given file - */ + * We should show only statistics for the last instance + * Since SleepyCat returns the statistic of the last open file first, + * we should only display the first statistic record for a given file + */ for (j = 0; j < i; j++) if (!strcmp(mpfstat[i]->file_name, mpfstat[j]->file_name)) break; @@ -194,7 +194,7 @@ ldbm_back_monitor_instance_search(Slapi_PBlock *pb __attribute__((unused)), /* monitor global ldbm stats */ int -ldbm_back_monitor_search(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *entryAfter, int *returncode, char *returntext, void *arg) +bdb_monitor_search(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *entryAfter, int *returncode, char *returntext, void *arg) { struct ldbminfo *li = (struct ldbminfo *)arg; struct berval val; @@ -288,7 +288,7 @@ ldbm_back_monitor_search(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *entryAft /* monitor global ldbm database stats */ int -ldbm_back_dbmonitor_search(Slapi_PBlock *pb __attribute__((unused)), +bdb_dbmonitor_search(Slapi_PBlock *pb __attribute__((unused)), Slapi_Entry *e, Slapi_Entry *entryAfter __attribute__((unused)), int *returncode, diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_verify.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_verify.c index 32933a804..42d1cd145 100644 --- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_verify.c +++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_verify.c @@ -171,7 +171,7 @@ bdb_verify(Slapi_PBlock *pb) slapi_pblock_get(pb, SLAPI_SEQ_TYPE, &verbose); slapi_pblock_get(pb, SLAPI_PLUGIN_PRIVATE, &li); slapi_pblock_get(pb, SLAPI_DBVERIFY_DBDIR, &dbdir); - ldbm_config_load_dse_info(li); + bdb_config_load_dse_info(li); bdb_config_internal_set(li, CONFIG_DB_TRANSACTION_LOGGING, "off"); /* no write needed; choose EXPORT MODE */ diff --git a/ldap/servers/slapd/back-ldbm/dblayer.c b/ldap/servers/slapd/back-ldbm/dblayer.c index c9f43c712..05cc5b891 100644 --- a/ldap/servers/slapd/back-ldbm/dblayer.c +++ b/ldap/servers/slapd/back-ldbm/dblayer.c @@ -273,10 +273,9 @@ dblayer_setup(struct ldbminfo *li) } ldbm_config_load_dse_info(li); - priv = (dblayer_private *)li->li_dblayer_private; - rc = priv->dblayer_load_dse_fn(li); + return rc; } diff --git a/ldap/servers/slapd/back-ldbm/dblayer.h b/ldap/servers/slapd/back-ldbm/dblayer.h index 8e156dd28..cd53f25be 100644 --- a/ldap/servers/slapd/back-ldbm/dblayer.h +++ b/ldap/servers/slapd/back-ldbm/dblayer.h @@ -147,6 +147,7 @@ struct dblayer_private instance_config_entry_callback_fn_t *instance_postdel_config_fn; instance_cleanup_fn_t *instance_cleanup_fn; instance_create_fn_t *instance_create_fn; + instance_create_fn_t *instance_register_monitor_fn; instance_search_callback_fn_t *instance_search_callback_fn; dblayer_auto_tune_fn_t *dblayer_auto_tune_fn; }; diff --git a/ldap/servers/slapd/back-ldbm/ldbm_config.c b/ldap/servers/slapd/back-ldbm/ldbm_config.c index 88c186359..3fe86d567 100644 --- a/ldap/servers/slapd/back-ldbm/ldbm_config.c +++ b/ldap/servers/slapd/back-ldbm/ldbm_config.c @@ -1113,38 +1113,6 @@ ldbm_config_load_dse_info(struct ldbminfo *li) (void *)li); slapi_ch_free_string(&dn); - /* setup the dse callback functions for the ldbm backend monitor entry */ - dn = slapi_create_dn_string("cn=monitor,cn=%s,cn=plugins,cn=config", - li->li_plugin->plg_name); - if (NULL == dn) { - slapi_log_err(SLAPI_LOG_ERR, - "ldbm_config_load_dse_info", - "failed to create monitor dn for %s\n", - li->li_plugin->plg_name); - rval = 1; - goto bail; - } - slapi_config_register_callback(SLAPI_OPERATION_SEARCH, DSE_FLAG_PREOP, dn, - LDAP_SCOPE_BASE, "(objectclass=*)", ldbm_back_monitor_search, - (void *)li); - slapi_ch_free_string(&dn); - - /* And the ldbm backend database monitor entry */ - dn = slapi_create_dn_string("cn=database,cn=monitor,cn=%s,cn=plugins,cn=config", - li->li_plugin->plg_name); - if (NULL == dn) { - slapi_log_err(SLAPI_LOG_ERR, - "ldbm_config_load_dse_info", - "failed create monitor database dn for %s\n", - li->li_plugin->plg_name); - rval = 1; - goto bail; - } - slapi_config_register_callback(SLAPI_OPERATION_SEARCH, DSE_FLAG_PREOP, dn, - LDAP_SCOPE_BASE, "(objectclass=*)", ldbm_back_dbmonitor_search, - (void *)li); - slapi_ch_free_string(&dn); - /* setup the dse callback functions for the ldbm backend instance * entries */ dn = slapi_create_dn_string("cn=%s,cn=plugins,cn=config", diff --git a/ldap/servers/slapd/back-ldbm/ldbm_instance_config.c b/ldap/servers/slapd/back-ldbm/ldbm_instance_config.c index 36ec11202..56bf35f4a 100644 --- a/ldap/servers/slapd/back-ldbm/ldbm_instance_config.c +++ b/ldap/servers/slapd/back-ldbm/ldbm_instance_config.c @@ -675,32 +675,6 @@ ldbm_instance_config_load_dse_info(ldbm_instance *inst) LDAP_SCOPE_BASE, "(objectclass=*)", ldbm_instance_deny_config, (void *)inst); /* delete is handled by a callback set in ldbm_config.c */ - - slapi_ch_free_string(&dn); - - /* don't forget the monitor! */ - dn = slapi_create_dn_string("cn=monitor,cn=%s,cn=%s,cn=plugins,cn=config", - inst->inst_name, li->li_plugin->plg_name); - if (NULL == dn) { - slapi_log_err(SLAPI_LOG_ERR, - "ldbm_instance_config_load_dse_info", - "failed create monitor instance dn for plugin %s, " - "instance %s\n", - inst->inst_li->li_plugin->plg_name, inst->inst_name); - rval = 1; - goto bail; - } - /* make callback on search; deny add/modify/delete */ - slapi_config_register_callback(SLAPI_OPERATION_SEARCH, DSE_FLAG_PREOP, dn, - LDAP_SCOPE_BASE, "(objectclass=*)", ldbm_back_monitor_instance_search, - (void *)inst); - slapi_config_register_callback(SLAPI_OPERATION_ADD, DSE_FLAG_PREOP, dn, - LDAP_SCOPE_SUBTREE, "(objectclass=*)", ldbm_instance_deny_config, - (void *)inst); - slapi_config_register_callback(SLAPI_OPERATION_MODIFY, DSE_FLAG_PREOP, dn, - LDAP_SCOPE_BASE, "(objectclass=*)", ldbm_instance_deny_config, - (void *)inst); - /* delete is okay */ slapi_ch_free_string(&dn); /* Callbacks to handle indexes */ @@ -908,6 +882,7 @@ static int ldbm_instance_generate(struct ldbminfo *li, char *instance_name, Slapi_Backend **ret_be) { Slapi_Backend *new_be = NULL; + dblayer_private *priv = (dblayer_private *)li->li_dblayer_private; int rc = 0; /* Create a new instance, process config info for it, @@ -922,6 +897,8 @@ ldbm_instance_generate(struct ldbminfo *li, char *instance_name, Slapi_Backend * } ldbm_instance_config_load_dse_info(new_be->be_instance_info); + priv->instance_register_monitor_fn(new_be->be_instance_info); + ldbm_instance_create_default_indexes(new_be); /* if USN plugin is enabled, set slapi_counter */ @@ -984,24 +961,6 @@ ldbm_instance_unregister_callbacks(ldbm_instance *inst) ldbm_instance_deny_config); slapi_ch_free_string(&dn); - /* now the cn=monitor entry */ - dn = slapi_create_dn_string("cn=monitor,cn=%s,cn=%s,cn=plugins,cn=config", - inst->inst_name, li->li_plugin->plg_name); - if (NULL == dn) { - slapi_log_err(SLAPI_LOG_ERR, - "ldbm_instance_unregister_callbacks", - "Failed create monitor instance dn for plugin %s, " - "instance %s\n", - inst->inst_li->li_plugin->plg_name, inst->inst_name); - goto bail; - } - slapi_config_remove_callback(SLAPI_OPERATION_SEARCH, DSE_FLAG_PREOP, dn, - LDAP_SCOPE_BASE, "(objectclass=*)", ldbm_back_monitor_instance_search); - slapi_config_remove_callback(SLAPI_OPERATION_ADD, DSE_FLAG_PREOP, dn, - LDAP_SCOPE_SUBTREE, "(objectclass=*)", ldbm_instance_deny_config); - slapi_config_remove_callback(SLAPI_OPERATION_MODIFY, DSE_FLAG_PREOP, dn, - LDAP_SCOPE_BASE, "(objectclass=*)", ldbm_instance_deny_config); - slapi_ch_free_string(&dn); /* now the cn=index entries */ dn = slapi_create_dn_string("cn=index,cn=%s,cn=%s,cn=plugins,cn=config", diff --git a/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h b/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h index 43793f66a..5d618a89c 100644 --- a/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h +++ b/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h @@ -479,14 +479,6 @@ int ldbm_back_init(Slapi_PBlock *pb); void ldbm_back_prev_search_results(Slapi_PBlock *pb); int ldbm_back_isinitialized(void); -/* - * monitor.c - */ - -int ldbm_back_monitor_search(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *entryAfter, int *returncode, char *returntext, void *arg); -int ldbm_back_monitor_instance_search(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *entryAfter, int *returncode, char *returntext, void *arg); -int ldbm_back_dbmonitor_search(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *entryAfter, int *returncode, char *returntext, void *arg); - /* * vlv.c */ diff --git a/src/lib389/lib389/backend.py b/src/lib389/lib389/backend.py index e4938b2ff..13177ca33 100644 --- a/src/lib389/lib389/backend.py +++ b/src/lib389/lib389/backend.py @@ -1035,6 +1035,10 @@ class DatabaseConfig(DSLdapObject): vo = vo[0] self._instance.log.info(f'{k}: {vo}') + def get_db_lib(self): + """Return the backend library, bdb, lmdb, etc""" + return self._db_lib + def set(self, value_pairs): for attr, val in value_pairs: attr = attr.lower()
0
0ad289c99e8a5595117c53514e2f79bc71bafb84
389ds/389-ds-base
Ticket 48978 - error log refactoring error Description: Fix refectoring errors https://fedorahosted.org/389/ticket/48978 Reviewed by: mreynolds(oneline commit rule)
commit 0ad289c99e8a5595117c53514e2f79bc71bafb84 Author: Mark Reynolds <[email protected]> Date: Tue Nov 1 11:57:52 2016 -0400 Ticket 48978 - error log refactoring error Description: Fix refectoring errors https://fedorahosted.org/389/ticket/48978 Reviewed by: mreynolds(oneline commit rule) diff --git a/ldap/servers/slapd/back-ldbm/ldif2ldbm.c b/ldap/servers/slapd/back-ldbm/ldif2ldbm.c index 6b8db10ca..f9de439de 100644 --- a/ldap/servers/slapd/back-ldbm/ldif2ldbm.c +++ b/ldap/servers/slapd/back-ldbm/ldif2ldbm.c @@ -1599,9 +1599,9 @@ ldbm_back_ldbm2ldif( Slapi_PBlock *pb ) "%s: Processed %d entries (100%%).", inst->inst_name, cnt); } - slapi_log_err(SLAPI_LOG_INFO, + slapi_log_err(SLAPI_LOG_INFO, "ldbm_back_ldbm2ldif", "export %s: Processed %d entries (100%%).\n", - inst->inst_name, cnt, 0); + inst->inst_name, cnt); } bye: if (idl) { diff --git a/ldap/servers/slapd/back-ldbm/start.c b/ldap/servers/slapd/back-ldbm/start.c index 9765779a7..836e67f9a 100644 --- a/ldap/servers/slapd/back-ldbm/start.c +++ b/ldap/servers/slapd/back-ldbm/start.c @@ -150,10 +150,9 @@ ldbm_back_start( Slapi_PBlock *pb ) cache_size = (PRUint64)cache_get_max_size(&(inst->inst_cache)); db_size = dblayer_get_id2entry_size(inst); if (cache_size < db_size) { - slapi_log_err(SLAPI_LOG_NOTICE, - "ldbm_back_start - " - "%s: entry cache size %llu B is " - "less than db size %llu B; " + slapi_log_err(SLAPI_LOG_NOTICE, "ldbm_back_start", + "%s: entry cache size %lu B is " + "less than db size %lu B; " "We recommend to increase the entry cache size " "nsslapd-cachememsize.\n", inst->inst_name, cache_size, db_size); @@ -267,10 +266,8 @@ ldbm_back_start( Slapi_PBlock *pb ) "It is highly likely your memory configuration of all backends will EXCEED your systems memory.\n"); slapi_log_err(SLAPI_LOG_CRIT, "ldbm_back_start", "In a future release this WILL prevent server start up. You MUST alter your configuration.\n"); - slapi_log_err(SLAPI_LOG_CRIT, - "ldbm_back_start", "Total entry cache size: %lu B; " - "dbcache size: %lu B; " - "available memory size: %lu B; \n", + slapi_log_err(SLAPI_LOG_CRIT, "ldbm_back_start", "Total entry cache size: %lu B; " + "dbcache size: %lu B; available memory size: %lu B; \n", #ifdef LINUX (PRUint64)total_cache_size, (PRUint64)li->li_dbcachesize, availpages * pagesize #else
0
90dd9bb3c1411daca353d055d90618e67aa1fa7e
389ds/389-ds-base
Trac Ticket #443 - Deleting attribute present in nsslapd-allowed-to-delete-attrs returns Operations error https://fedorahosted.org/389/ticket/443 Bug Description: Even if setting a config parameter to nsslapd- allowed-to-delete-attrs, the value failed to delete if the type was on|off or integer. Fix Description: Store all the initial config param values in ConfigList. If the attribute value is deleted, reset the initial value.
commit 90dd9bb3c1411daca353d055d90618e67aa1fa7e Author: Noriko Hosoi <[email protected]> Date: Tue Nov 6 18:15:46 2012 -0800 Trac Ticket #443 - Deleting attribute present in nsslapd-allowed-to-delete-attrs returns Operations error https://fedorahosted.org/389/ticket/443 Bug Description: Even if setting a config parameter to nsslapd- allowed-to-delete-attrs, the value failed to delete if the type was on|off or integer. Fix Description: Store all the initial config param values in ConfigList. If the attribute value is deleted, reset the initial value. diff --git a/ldap/servers/slapd/libglobs.c b/ldap/servers/slapd/libglobs.c index dc8452aa1..bd1062def 100644 --- a/ldap/servers/slapd/libglobs.c +++ b/ldap/servers/slapd/libglobs.c @@ -125,6 +125,139 @@ static int config_set_onoff( const char *attrname, char *value, static int config_set_schemareplace ( const char *attrname, char *value, char *errorbuf, int apply ); +/* Keeping the initial values */ +/* CONFIG_INT/CONFIG_LONG */ +#define DEFAULT_LOG_ROTATIONSYNCHOUR "0" +#define DEFAULT_LOG_ROTATIONSYNCMIN "0" +#define DEFAULT_LOG_ROTATIONTIME "1" +#define DEFAULT_LOG_ACCESS_MAXNUMLOGS "10" +#define DEFAULT_LOG_MAXNUMLOGS "1" +#define DEFAULT_LOG_EXPTIME "1" +#define DEFAULT_LOG_ACCESS_MAXDISKSPACE "500" +#define DEFAULT_LOG_MAXDISKSPACE "100" +#define DEFAULT_LOG_MAXLOGSIZE "100" +#define DEFAULT_LOG_MINFREESPACE "5" +#define DEFAULT_ACCESSLOGLEVEL "256" +#define DEFAULT_SIZELIMIT "2000" +#define DEFAULT_TIMELIMIT "3600" +#define DEFAULT_PAGEDSIZELIMIT "0" +#define DEFAULT_IDLE_TIMEOUT "0" +#define DEFAULT_MAXDESCRIPTORS "1024" +#define DEFAULT_RESERVE_FDS "64" +#define DEFAULT_MAX_BERSIZE "0" +#define DEFAULT_MAX_THREADS "30" +#define DEFAULT_MAX_THREADS_PER_CONN "5" +#define DEFAULT_IOBLOCK_TIMEOUT "1800000" +#define DEFAULT_OUTBOUND_LDAP_IO_TIMEOUT "300000" +#define DEFAULT_MAX_FILTER_NEST_LEVEL "40" +#define DEFAULT_GROUPEVALNESTLEVEL "0" +#define DEFAULT_MAX_SASLIO_SIZE "2097152" +#define DEFAULT_DISK_THRESHOLD "2097152" +#define DEFAULT_DISK_GRACE_PERIOD "60" +#define DEFAULT_LOCAL_SSF "71" +#define DEFAULT_MIN_SSF "0" +#define DEFAULT_PW_INHISTORY "6" +#define DEFAULT_PW_GRACELIMIT "0" +#define DEFAULT_PW_MINLENGTH "0" +#define DEFAULT_PW_MINDIGITS "0" +#define DEFAULT_PW_MINALPHAS "0" +#define DEFAULT_PW_MINUPPERS "0" +#define DEFAULT_PW_MINLOWERS "0" +#define DEFAULT_PW_MINSPECIALS "0" +#define DEFAULT_PW_MIN8BIT "0" +#define DEFAULT_PW_MAXREPEATS "0" +#define DEFAULT_PW_MINCATEGORIES "3" +#define DEFAULT_PW_MINTOKENLENGTH "3" +#define DEFAULT_PW_MAXAGE "8640000" +#define DEFAULT_PW_MINAGE "0" +#define DEFAULT_PW_WARNING "86400" +#define DEFAULT_PW_MAXFAILURE "3" +#define DEFAULT_PW_RESETFAILURECOUNT "600" +#define DEFAULT_PW_LOCKDURATION "3600" +#define DEFAULT_NDN_SIZE "20971520" +#ifdef MEMPOOL_EXPERIMENTAL +#define DEFAULT_MEMPOOL_MAXFREELIST "1024" +#endif + +/* CONFIG_STRING... */ +#define INIT_ACCESSLOG_MODE "600" +#define INIT_ERRORLOG_MODE "600" +#define INIT_AUDITLOG_MODE "600" +#define INIT_ACCESSLOG_ROTATIONUNIT "day" +#define INIT_ERRORLOG_ROTATIONUNIT "week" +#define INIT_AUDITLOG_ROTATIONUNIT "week" +#define INIT_ACCESSLOG_EXPTIMEUNIT "month" +#define INIT_ERRORLOG_EXPTIMEUNIT "month" +#define INIT_AUDITLOG_EXPTIMEUNIT "month" +#define DEFAULT_DIRECTORY_MANAGER "cn=Directory Manager" +#define DEFAULT_UIDNUM_TYPE "uidNumber" +#define DEFAULT_GIDNUM_TYPE "gidNumber" +#define DEFAULT_LDAPI_SEARCH_BASE "dc=example,dc=com" +#define DEFAULT_LDAPI_AUTO_DN "cn=peercred,cn=external,cn=auth" +#define ENTRYUSN_IMPORT_INIT "0" +#define DEFAULT_ALLOWED_TO_DELETE_ATTRS "nsslapd-listenhost nsslapd-securelistenhost nsslapd-defaultnamingcontext" +#define SALTED_SHA1_SCHEME_NAME "SSHA" + +/* CONFIG_ON_OFF */ +int init_accesslog_rotationsync_enabled; +int init_errorlog_rotationsync_enabled; +int init_auditlog_rotationsync_enabled; +int init_accesslog_logging_enabled; +int init_accesslogbuffering; +int init_errorlog_logging_enabled; +int init_auditlog_logging_enabled; +int init_auditlog_logging_hide_unhashed_pw; +int init_csnlogging; +int init_pw_unlock; +int init_pw_must_change; +int init_pwpolicy_local; +int init_pw_lockout; +int init_pw_history; +int init_pw_is_global_policy; +int init_pw_is_legacy; +int init_pw_track_update_time; +int init_pw_change; +int init_pw_exp; +int init_pw_syntax; +int init_schemacheck; +int init_ds4_compatible_schema; +int init_schema_ignore_trailing_spaces; +int init_enquote_sup_oc; +int init_rewrite_rfc1274; +int init_syntaxcheck; +int init_syntaxlogging; +int init_dn_validate_strict; +int init_attrname_exceptions; +int init_return_exact_case; +int init_result_tweak; +int init_plugin_track; +int init_lastmod; +int init_readonly; +int init_accesscontrol; +int init_nagle; +int init_security; +int init_ssl_check_hostname; +int init_ldapi_switch; +int init_ldapi_bind_switch; +int init_ldapi_map_entries; +int init_allow_unauth_binds; +int init_require_secure_binds; +int init_minssf_exclude_rootdse; +int init_force_sasl_external; +int init_slapi_counters; +int init_entryusn_global; +int init_disk_monitoring; +int init_disk_logging_critical; +int init_disk_preserve_logging; +int init_ndn_cache_enabled; +#ifdef MEMPOOL_EXPERIMENTAL +int init_mempool_switch; +#endif + +#define DEFAULT_SSLCLIENTAPTH "off" +#define DEFAULT_ALLOW_ANON_ACCESS "on" +#define DEFAULT_VALIDATE_CERT "warn" + static int isInt(ConfigVarType type) { @@ -145,567 +278,740 @@ static struct config_get_and_set { void** config_var_addr; /* address of member of slapdFrontendConfig struct */ ConfigVarType config_var_type; /* cast to this type when getting */ ConfigGetFunc getfunc; /* for special handling */ + void *initvalue; } ConfigList[] = { {CONFIG_AUDITLOG_MODE_ATTRIBUTE, NULL, log_set_mode, SLAPD_AUDIT_LOG, - (void**)&global_slapdFrontendConfig.auditlog_mode, CONFIG_STRING, NULL}, + (void**)&global_slapdFrontendConfig.auditlog_mode, + CONFIG_STRING, NULL, INIT_AUDITLOG_MODE}, {CONFIG_AUDITLOG_LOGROTATIONSYNCENABLED_ATTRIBUTE, NULL, log_set_rotationsync_enabled, SLAPD_AUDIT_LOG, - (void**)&global_slapdFrontendConfig.auditlog_rotationsync_enabled, CONFIG_ON_OFF, NULL}, + (void**)&global_slapdFrontendConfig.auditlog_rotationsync_enabled, + CONFIG_ON_OFF, NULL, &init_auditlog_rotationsync_enabled}, {CONFIG_AUDITLOG_LOGROTATIONSYNCHOUR_ATTRIBUTE, NULL, log_set_rotationsynchour, SLAPD_AUDIT_LOG, - (void**)&global_slapdFrontendConfig.auditlog_rotationsynchour, CONFIG_INT, NULL}, + (void**)&global_slapdFrontendConfig.auditlog_rotationsynchour, + CONFIG_INT, NULL, DEFAULT_LOG_ROTATIONSYNCHOUR}, {CONFIG_AUDITLOG_LOGROTATIONSYNCMIN_ATTRIBUTE, NULL, log_set_rotationsyncmin, SLAPD_AUDIT_LOG, - (void**)&global_slapdFrontendConfig.auditlog_rotationsyncmin, CONFIG_INT, NULL}, + (void**)&global_slapdFrontendConfig.auditlog_rotationsyncmin, + CONFIG_INT, NULL, DEFAULT_LOG_ROTATIONSYNCMIN}, {CONFIG_AUDITLOG_LOGROTATIONTIME_ATTRIBUTE, NULL, log_set_rotationtime, SLAPD_AUDIT_LOG, - (void**)&global_slapdFrontendConfig.auditlog_rotationtime, CONFIG_INT, NULL}, + (void**)&global_slapdFrontendConfig.auditlog_rotationtime, + CONFIG_INT, NULL, DEFAULT_LOG_ROTATIONTIME}, {CONFIG_ACCESSLOG_MODE_ATTRIBUTE, NULL, log_set_mode, SLAPD_ACCESS_LOG, - (void**)&global_slapdFrontendConfig.accesslog_mode, CONFIG_STRING, NULL}, + (void**)&global_slapdFrontendConfig.accesslog_mode, + CONFIG_STRING, NULL, INIT_ACCESSLOG_MODE}, {CONFIG_ACCESSLOG_MAXNUMOFLOGSPERDIR_ATTRIBUTE, NULL, log_set_numlogsperdir, SLAPD_ACCESS_LOG, - (void**)&global_slapdFrontendConfig.accesslog_maxnumlogs, CONFIG_INT, NULL}, + (void**)&global_slapdFrontendConfig.accesslog_maxnumlogs, + CONFIG_INT, NULL, DEFAULT_LOG_ACCESS_MAXNUMLOGS}, {CONFIG_LOGLEVEL_ATTRIBUTE, config_set_errorlog_level, NULL, 0, (void**)&global_slapdFrontendConfig.errorloglevel, - CONFIG_SPECIAL_ERRORLOGLEVEL, NULL}, + CONFIG_SPECIAL_ERRORLOGLEVEL, NULL, NULL}, {CONFIG_ERRORLOG_LOGGING_ENABLED_ATTRIBUTE, NULL, log_set_logging, SLAPD_ERROR_LOG, - (void**)&global_slapdFrontendConfig.errorlog_logging_enabled, CONFIG_ON_OFF, NULL}, + (void**)&global_slapdFrontendConfig.errorlog_logging_enabled, + CONFIG_ON_OFF, NULL, &init_errorlog_logging_enabled}, {CONFIG_ERRORLOG_MODE_ATTRIBUTE, NULL, log_set_mode, SLAPD_ERROR_LOG, - (void**)&global_slapdFrontendConfig.errorlog_mode, CONFIG_STRING, NULL}, + (void**)&global_slapdFrontendConfig.errorlog_mode, + CONFIG_STRING, NULL, INIT_ERRORLOG_MODE}, {CONFIG_ERRORLOG_LOGEXPIRATIONTIME_ATTRIBUTE, NULL, log_set_expirationtime, SLAPD_ERROR_LOG, - (void**)&global_slapdFrontendConfig.errorlog_exptime, CONFIG_INT, NULL}, + (void**)&global_slapdFrontendConfig.errorlog_exptime, + CONFIG_INT, NULL, DEFAULT_LOG_EXPTIME}, {CONFIG_ACCESSLOG_LOGGING_ENABLED_ATTRIBUTE, NULL, log_set_logging, SLAPD_ACCESS_LOG, - (void**)&global_slapdFrontendConfig.accesslog_logging_enabled, CONFIG_ON_OFF, NULL}, + (void**)&global_slapdFrontendConfig.accesslog_logging_enabled, + CONFIG_ON_OFF, NULL, &init_accesslog_logging_enabled}, {CONFIG_PORT_ATTRIBUTE, config_set_port, NULL, 0, - (void**)&global_slapdFrontendConfig.port, CONFIG_INT, NULL}, + (void**)&global_slapdFrontendConfig.port, + CONFIG_INT, NULL, NULL/* deletion is not allowed */}, {CONFIG_WORKINGDIR_ATTRIBUTE, config_set_workingdir, NULL, 0, - (void**)&global_slapdFrontendConfig.workingdir, CONFIG_STRING_OR_EMPTY, NULL}, + (void**)&global_slapdFrontendConfig.workingdir, + CONFIG_STRING_OR_EMPTY, NULL, NULL/* deletion is not allowed */}, {CONFIG_MAXTHREADSPERCONN_ATTRIBUTE, config_set_maxthreadsperconn, NULL, 0, - (void**)&global_slapdFrontendConfig.maxthreadsperconn, CONFIG_INT, NULL}, + (void**)&global_slapdFrontendConfig.maxthreadsperconn, + CONFIG_INT, NULL, DEFAULT_MAX_THREADS_PER_CONN}, {CONFIG_ACCESSLOG_LOGEXPIRATIONTIME_ATTRIBUTE, NULL, log_set_expirationtime, SLAPD_ACCESS_LOG, - (void**)&global_slapdFrontendConfig.accesslog_exptime, CONFIG_INT, NULL}, + (void**)&global_slapdFrontendConfig.accesslog_exptime, + CONFIG_INT, NULL, DEFAULT_LOG_EXPTIME}, #ifndef _WIN32 {CONFIG_LOCALUSER_ATTRIBUTE, config_set_localuser, NULL, 0, - (void**)&global_slapdFrontendConfig.localuser, CONFIG_STRING, NULL}, + (void**)&global_slapdFrontendConfig.localuser, + CONFIG_STRING, NULL, NULL/* deletion is not allowed */}, #endif {CONFIG_ERRORLOG_LOGROTATIONSYNCENABLED_ATTRIBUTE, NULL, log_set_rotationsync_enabled, SLAPD_ERROR_LOG, - (void**)&global_slapdFrontendConfig.errorlog_rotationsync_enabled, CONFIG_ON_OFF, NULL}, + (void**)&global_slapdFrontendConfig.errorlog_rotationsync_enabled, + CONFIG_ON_OFF, NULL, &init_errorlog_rotationsync_enabled}, {CONFIG_ERRORLOG_LOGROTATIONSYNCHOUR_ATTRIBUTE, NULL, log_set_rotationsynchour, SLAPD_ERROR_LOG, - (void**)&global_slapdFrontendConfig.errorlog_rotationsynchour, CONFIG_INT, NULL}, + (void**)&global_slapdFrontendConfig.errorlog_rotationsynchour, + CONFIG_INT, NULL, DEFAULT_LOG_ROTATIONSYNCHOUR}, {CONFIG_ERRORLOG_LOGROTATIONSYNCMIN_ATTRIBUTE, NULL, log_set_rotationsyncmin, SLAPD_ERROR_LOG, - (void**)&global_slapdFrontendConfig.errorlog_rotationsyncmin, CONFIG_INT, NULL}, + (void**)&global_slapdFrontendConfig.errorlog_rotationsyncmin, + CONFIG_INT, NULL, DEFAULT_LOG_ROTATIONSYNCMIN}, {CONFIG_ERRORLOG_LOGROTATIONTIME_ATTRIBUTE, NULL, log_set_rotationtime, SLAPD_ERROR_LOG, - (void**)&global_slapdFrontendConfig.errorlog_rotationtime, CONFIG_INT, NULL}, + (void**)&global_slapdFrontendConfig.errorlog_rotationtime, + CONFIG_INT, NULL, DEFAULT_LOG_ROTATIONTIME}, {CONFIG_PW_INHISTORY_ATTRIBUTE, config_set_pw_inhistory, NULL, 0, - (void**)&global_slapdFrontendConfig.pw_policy.pw_inhistory, CONFIG_INT, NULL}, + (void**)&global_slapdFrontendConfig.pw_policy.pw_inhistory, + CONFIG_INT, NULL, DEFAULT_PW_INHISTORY}, {CONFIG_PW_STORAGESCHEME_ATTRIBUTE, config_set_pw_storagescheme, - NULL, 0, NULL, CONFIG_STRING, (ConfigGetFunc)config_get_pw_storagescheme}, + NULL, 0, NULL, + CONFIG_STRING, (ConfigGetFunc)config_get_pw_storagescheme, + SALTED_SHA1_SCHEME_NAME}, {CONFIG_PW_UNLOCK_ATTRIBUTE, config_set_pw_unlock, NULL, 0, - (void**)&global_slapdFrontendConfig.pw_policy.pw_unlock, CONFIG_ON_OFF, NULL}, + (void**)&global_slapdFrontendConfig.pw_policy.pw_unlock, + CONFIG_ON_OFF, NULL, &init_pw_unlock}, {CONFIG_PW_GRACELIMIT_ATTRIBUTE, config_set_pw_gracelimit, NULL, 0, - (void**)&global_slapdFrontendConfig.pw_policy.pw_gracelimit, CONFIG_INT, NULL}, + (void**)&global_slapdFrontendConfig.pw_policy.pw_gracelimit, + CONFIG_INT, NULL, DEFAULT_PW_GRACELIMIT}, {CONFIG_ACCESSLOG_LOGROTATIONSYNCENABLED_ATTRIBUTE, NULL, log_set_rotationsync_enabled, SLAPD_ACCESS_LOG, - (void**)&global_slapdFrontendConfig.accesslog_rotationsync_enabled, CONFIG_ON_OFF, NULL}, + (void**)&global_slapdFrontendConfig.accesslog_rotationsync_enabled, + CONFIG_ON_OFF, NULL, &init_accesslog_rotationsync_enabled}, {CONFIG_ACCESSLOG_LOGROTATIONSYNCHOUR_ATTRIBUTE, NULL, log_set_rotationsynchour, SLAPD_ACCESS_LOG, - (void**)&global_slapdFrontendConfig.accesslog_rotationsynchour, CONFIG_INT, NULL}, + (void**)&global_slapdFrontendConfig.accesslog_rotationsynchour, + CONFIG_INT, NULL, DEFAULT_LOG_ROTATIONSYNCHOUR}, {CONFIG_ACCESSLOG_LOGROTATIONSYNCMIN_ATTRIBUTE, NULL, log_set_rotationsyncmin, SLAPD_ACCESS_LOG, - (void**)&global_slapdFrontendConfig.accesslog_rotationsyncmin, CONFIG_INT, NULL}, + (void**)&global_slapdFrontendConfig.accesslog_rotationsyncmin, + CONFIG_INT, NULL, DEFAULT_LOG_ROTATIONSYNCMIN}, {CONFIG_ACCESSLOG_LOGROTATIONTIME_ATTRIBUTE, NULL, log_set_rotationtime, SLAPD_ACCESS_LOG, - (void**)&global_slapdFrontendConfig.accesslog_rotationtime, CONFIG_INT, NULL}, + (void**)&global_slapdFrontendConfig.accesslog_rotationtime, + CONFIG_INT, NULL, DEFAULT_LOG_ROTATIONTIME}, {CONFIG_PW_MUSTCHANGE_ATTRIBUTE, config_set_pw_must_change, NULL, 0, - (void**)&global_slapdFrontendConfig.pw_policy.pw_must_change, CONFIG_ON_OFF, NULL}, + (void**)&global_slapdFrontendConfig.pw_policy.pw_must_change, + CONFIG_ON_OFF, NULL, &init_pw_must_change}, {CONFIG_PWPOLICY_LOCAL_ATTRIBUTE, config_set_pwpolicy_local, NULL, 0, - (void**)&global_slapdFrontendConfig.pwpolicy_local, CONFIG_ON_OFF, NULL}, + (void**)&global_slapdFrontendConfig.pwpolicy_local, + CONFIG_ON_OFF, NULL, &init_pwpolicy_local}, {CONFIG_AUDITLOG_MAXLOGDISKSPACE_ATTRIBUTE, NULL, log_set_maxdiskspace, SLAPD_AUDIT_LOG, - (void**)&global_slapdFrontendConfig.auditlog_maxdiskspace, CONFIG_INT, NULL}, + (void**)&global_slapdFrontendConfig.auditlog_maxdiskspace, + CONFIG_INT, NULL, DEFAULT_LOG_MAXDISKSPACE}, {CONFIG_SIZELIMIT_ATTRIBUTE, config_set_sizelimit, NULL, 0, - (void**)&global_slapdFrontendConfig.sizelimit, CONFIG_INT, NULL}, + (void**)&global_slapdFrontendConfig.sizelimit, + CONFIG_INT, NULL, DEFAULT_SIZELIMIT}, {CONFIG_AUDITLOG_MAXLOGSIZE_ATTRIBUTE, NULL, log_set_logsize, SLAPD_AUDIT_LOG, - (void**)&global_slapdFrontendConfig.auditlog_maxlogsize, CONFIG_INT, NULL}, + (void**)&global_slapdFrontendConfig.auditlog_maxlogsize, + CONFIG_INT, NULL, DEFAULT_LOG_MAXLOGSIZE}, {CONFIG_PW_WARNING_ATTRIBUTE, config_set_pw_warning, NULL, 0, - (void**)&global_slapdFrontendConfig.pw_policy.pw_warning, CONFIG_LONG, NULL}, + (void**)&global_slapdFrontendConfig.pw_policy.pw_warning, + CONFIG_LONG, NULL, DEFAULT_PW_WARNING}, {CONFIG_READONLY_ATTRIBUTE, config_set_readonly, NULL, 0, - (void**)&global_slapdFrontendConfig.readonly, CONFIG_ON_OFF, NULL}, + (void**)&global_slapdFrontendConfig.readonly, + CONFIG_ON_OFF, NULL, &init_readonly}, {CONFIG_THREADNUMBER_ATTRIBUTE, config_set_threadnumber, NULL, 0, - (void**)&global_slapdFrontendConfig.threadnumber, CONFIG_INT, NULL}, + (void**)&global_slapdFrontendConfig.threadnumber, + CONFIG_INT, NULL, DEFAULT_MAX_THREADS}, {CONFIG_PW_LOCKOUT_ATTRIBUTE, config_set_pw_lockout, NULL, 0, - (void**)&global_slapdFrontendConfig.pw_policy.pw_lockout, CONFIG_ON_OFF, NULL}, + (void**)&global_slapdFrontendConfig.pw_policy.pw_lockout, + CONFIG_ON_OFF, NULL, &init_pw_lockout}, {CONFIG_ENQUOTE_SUP_OC_ATTRIBUTE, config_set_enquote_sup_oc, NULL, 0, - (void**)&global_slapdFrontendConfig.enquote_sup_oc, CONFIG_ON_OFF, NULL}, + (void**)&global_slapdFrontendConfig.enquote_sup_oc, + CONFIG_ON_OFF, NULL, &init_enquote_sup_oc}, {CONFIG_LOCALHOST_ATTRIBUTE, config_set_localhost, NULL, 0, - (void**)&global_slapdFrontendConfig.localhost, CONFIG_STRING, NULL}, + (void**)&global_slapdFrontendConfig.localhost, + CONFIG_STRING, NULL, NULL/* deletion is not allowed */}, {CONFIG_IOBLOCKTIMEOUT_ATTRIBUTE, config_set_ioblocktimeout, NULL, 0, - (void**)&global_slapdFrontendConfig.ioblocktimeout, CONFIG_INT, NULL}, + (void**)&global_slapdFrontendConfig.ioblocktimeout, + CONFIG_INT, NULL, DEFAULT_IOBLOCK_TIMEOUT}, {CONFIG_MAX_FILTER_NEST_LEVEL_ATTRIBUTE, config_set_max_filter_nest_level, - NULL, 0, (void**)&global_slapdFrontendConfig.max_filter_nest_level, - CONFIG_INT, NULL}, + NULL, 0, + (void**)&global_slapdFrontendConfig.max_filter_nest_level, + CONFIG_INT, NULL, DEFAULT_MAX_FILTER_NEST_LEVEL}, {CONFIG_ERRORLOG_MAXLOGDISKSPACE_ATTRIBUTE, NULL, log_set_maxdiskspace, SLAPD_ERROR_LOG, - (void**)&global_slapdFrontendConfig.errorlog_maxdiskspace, CONFIG_INT, NULL}, + (void**)&global_slapdFrontendConfig.errorlog_maxdiskspace, + CONFIG_INT, NULL, DEFAULT_LOG_MAXDISKSPACE}, {CONFIG_PW_MINLENGTH_ATTRIBUTE, config_set_pw_minlength, NULL, 0, - (void**)&global_slapdFrontendConfig.pw_policy.pw_minlength, CONFIG_INT, NULL}, + (void**)&global_slapdFrontendConfig.pw_policy.pw_minlength, + CONFIG_INT, NULL, DEFAULT_PW_MINLENGTH}, {CONFIG_PW_MINDIGITS_ATTRIBUTE, config_set_pw_mindigits, NULL, 0, - (void**)&global_slapdFrontendConfig.pw_policy.pw_mindigits, CONFIG_INT, NULL}, + (void**)&global_slapdFrontendConfig.pw_policy.pw_mindigits, + CONFIG_INT, NULL, DEFAULT_PW_MINDIGITS}, {CONFIG_PW_MINALPHAS_ATTRIBUTE, config_set_pw_minalphas, NULL, 0, - (void**)&global_slapdFrontendConfig.pw_policy.pw_minalphas, CONFIG_INT, NULL}, + (void**)&global_slapdFrontendConfig.pw_policy.pw_minalphas, + CONFIG_INT, NULL, DEFAULT_PW_MINALPHAS}, {CONFIG_PW_MINUPPERS_ATTRIBUTE, config_set_pw_minuppers, NULL, 0, - (void**)&global_slapdFrontendConfig.pw_policy.pw_minuppers, CONFIG_INT, NULL}, + (void**)&global_slapdFrontendConfig.pw_policy.pw_minuppers, + CONFIG_INT, NULL, DEFAULT_PW_MINUPPERS}, {CONFIG_PW_MINLOWERS_ATTRIBUTE, config_set_pw_minlowers, NULL, 0, - (void**)&global_slapdFrontendConfig.pw_policy.pw_minlowers, CONFIG_INT, NULL}, - {CONFIG_PW_MINSPECIALS_ATTRIBUTE, config_set_pw_minspecials, - NULL, 0, - (void**)&global_slapdFrontendConfig.pw_policy.pw_minspecials, CONFIG_INT, NULL}, + (void**)&global_slapdFrontendConfig.pw_policy.pw_minlowers, + CONFIG_INT, NULL, DEFAULT_PW_MINLOWERS}, + {CONFIG_PW_MINSPECIALS_ATTRIBUTE, config_set_pw_minspecials, + NULL, 0, + (void**)&global_slapdFrontendConfig.pw_policy.pw_minspecials, + CONFIG_INT, NULL, DEFAULT_PW_MINSPECIALS}, {CONFIG_PW_MIN8BIT_ATTRIBUTE, config_set_pw_min8bit, NULL, 0, - (void**)&global_slapdFrontendConfig.pw_policy.pw_min8bit, CONFIG_INT, NULL}, + (void**)&global_slapdFrontendConfig.pw_policy.pw_min8bit, + CONFIG_INT, NULL, DEFAULT_PW_MIN8BIT}, {CONFIG_PW_MAXREPEATS_ATTRIBUTE, config_set_pw_maxrepeats, NULL, 0, - (void**)&global_slapdFrontendConfig.pw_policy.pw_maxrepeats, CONFIG_INT, NULL}, - {CONFIG_PW_MINCATEGORIES_ATTRIBUTE, config_set_pw_mincategories, - NULL, 0, - (void**)&global_slapdFrontendConfig.pw_policy.pw_mincategories, CONFIG_INT, NULL}, + (void**)&global_slapdFrontendConfig.pw_policy.pw_maxrepeats, + CONFIG_INT, NULL, DEFAULT_PW_MAXREPEATS}, + {CONFIG_PW_MINCATEGORIES_ATTRIBUTE, config_set_pw_mincategories, + NULL, 0, + (void**)&global_slapdFrontendConfig.pw_policy.pw_mincategories, + CONFIG_INT, NULL, DEFAULT_PW_MINCATEGORIES}, {CONFIG_PW_MINTOKENLENGTH_ATTRIBUTE, config_set_pw_mintokenlength, NULL, 0, - (void**)&global_slapdFrontendConfig.pw_policy.pw_mintokenlength, CONFIG_INT, NULL}, + (void**)&global_slapdFrontendConfig.pw_policy.pw_mintokenlength, + CONFIG_INT, NULL, DEFAULT_PW_MINTOKENLENGTH}, {CONFIG_ERRORLOG_ATTRIBUTE, config_set_errorlog, NULL, 0, - (void**)&global_slapdFrontendConfig.errorlog, CONFIG_STRING_OR_EMPTY, NULL}, + (void**)&global_slapdFrontendConfig.errorlog, + CONFIG_STRING_OR_EMPTY, NULL, NULL/* deletion is not allowed */}, {CONFIG_AUDITLOG_LOGEXPIRATIONTIME_ATTRIBUTE, NULL, log_set_expirationtime, SLAPD_AUDIT_LOG, - (void**)&global_slapdFrontendConfig.auditlog_exptime, CONFIG_INT, NULL}, + (void**)&global_slapdFrontendConfig.auditlog_exptime, + CONFIG_INT, NULL, DEFAULT_LOG_EXPTIME}, {CONFIG_SCHEMACHECK_ATTRIBUTE, config_set_schemacheck, NULL, 0, - (void**)&global_slapdFrontendConfig.schemacheck, CONFIG_ON_OFF, NULL}, + (void**)&global_slapdFrontendConfig.schemacheck, + CONFIG_ON_OFF, NULL, &init_schemacheck}, {CONFIG_SYNTAXCHECK_ATTRIBUTE, config_set_syntaxcheck, NULL, 0, - (void**)&global_slapdFrontendConfig.syntaxcheck, CONFIG_ON_OFF, NULL}, + (void**)&global_slapdFrontendConfig.syntaxcheck, + CONFIG_ON_OFF, NULL, &init_syntaxcheck}, {CONFIG_SYNTAXLOGGING_ATTRIBUTE, config_set_syntaxlogging, NULL, 0, - (void**)&global_slapdFrontendConfig.syntaxlogging, CONFIG_ON_OFF, NULL}, + (void**)&global_slapdFrontendConfig.syntaxlogging, + CONFIG_ON_OFF, NULL, &init_syntaxlogging}, {CONFIG_DN_VALIDATE_STRICT_ATTRIBUTE, config_set_dn_validate_strict, NULL, 0, - (void**)&global_slapdFrontendConfig.dn_validate_strict, CONFIG_ON_OFF, NULL}, + (void**)&global_slapdFrontendConfig.dn_validate_strict, + CONFIG_ON_OFF, NULL, &init_dn_validate_strict}, {CONFIG_DS4_COMPATIBLE_SCHEMA_ATTRIBUTE, config_set_ds4_compatible_schema, NULL, 0, (void**)&global_slapdFrontendConfig.ds4_compatible_schema, - CONFIG_ON_OFF, NULL}, + CONFIG_ON_OFF, NULL, &init_ds4_compatible_schema}, {CONFIG_SCHEMA_IGNORE_TRAILING_SPACES, config_set_schema_ignore_trailing_spaces, NULL, 0, (void**)&global_slapdFrontendConfig.schema_ignore_trailing_spaces, - CONFIG_ON_OFF, NULL}, + CONFIG_ON_OFF, NULL, &init_schema_ignore_trailing_spaces}, {CONFIG_SCHEMAREPLACE_ATTRIBUTE, config_set_schemareplace, NULL, 0, (void**)&global_slapdFrontendConfig.schemareplace, - CONFIG_STRING_OR_OFF, NULL}, + CONFIG_STRING_OR_OFF, NULL, CONFIG_SCHEMAREPLACE_STR_REPLICATION_ONLY}, {CONFIG_ACCESSLOG_MAXLOGDISKSPACE_ATTRIBUTE, NULL, log_set_maxdiskspace, SLAPD_ACCESS_LOG, - (void**)&global_slapdFrontendConfig.accesslog_maxdiskspace, CONFIG_INT, NULL}, + (void**)&global_slapdFrontendConfig.accesslog_maxdiskspace, + CONFIG_INT, NULL, DEFAULT_LOG_ACCESS_MAXDISKSPACE}, {CONFIG_REFERRAL_ATTRIBUTE, (ConfigSetFunc)config_set_defaultreferral, NULL, 0, (void**)&global_slapdFrontendConfig.defaultreferral, - CONFIG_SPECIAL_REFERRALLIST, NULL}, + CONFIG_SPECIAL_REFERRALLIST, NULL, NULL/* deletion is not allowed */}, {CONFIG_PW_MAXFAILURE_ATTRIBUTE, config_set_pw_maxfailure, NULL, 0, - (void**)&global_slapdFrontendConfig.pw_policy.pw_maxfailure, CONFIG_INT, NULL}, + (void**)&global_slapdFrontendConfig.pw_policy.pw_maxfailure, + CONFIG_INT, NULL, DEFAULT_PW_MAXFAILURE}, {CONFIG_ACCESSLOG_ATTRIBUTE, config_set_accesslog, NULL, 0, - (void**)&global_slapdFrontendConfig.accesslog, CONFIG_STRING_OR_EMPTY, NULL}, + (void**)&global_slapdFrontendConfig.accesslog, + CONFIG_STRING_OR_EMPTY, NULL, NULL/* deletion is not allowed */}, {CONFIG_LASTMOD_ATTRIBUTE, config_set_lastmod, NULL, 0, - (void**)&global_slapdFrontendConfig.lastmod, CONFIG_ON_OFF, NULL}, + (void**)&global_slapdFrontendConfig.lastmod, + CONFIG_ON_OFF, NULL, &init_lastmod}, {CONFIG_ROOTPWSTORAGESCHEME_ATTRIBUTE, config_set_rootpwstoragescheme, - NULL, 0, NULL, CONFIG_STRING, (ConfigGetFunc)config_get_rootpwstoragescheme}, + NULL, 0, NULL, + CONFIG_STRING, (ConfigGetFunc)config_get_rootpwstoragescheme, + SALTED_SHA1_SCHEME_NAME}, {CONFIG_PW_HISTORY_ATTRIBUTE, config_set_pw_history, NULL, 0, - (void**)&global_slapdFrontendConfig.pw_policy.pw_history, CONFIG_ON_OFF, NULL}, + (void**)&global_slapdFrontendConfig.pw_policy.pw_history, + CONFIG_ON_OFF, NULL, &init_pw_history}, {CONFIG_SECURITY_ATTRIBUTE, config_set_security, NULL, 0, - (void**)&global_slapdFrontendConfig.security, CONFIG_ON_OFF, NULL}, + (void**)&global_slapdFrontendConfig.security, + CONFIG_ON_OFF, NULL, &init_security}, {CONFIG_PW_MAXAGE_ATTRIBUTE, config_set_pw_maxage, NULL, 0, - (void**)&global_slapdFrontendConfig.pw_policy.pw_maxage, CONFIG_LONG, NULL}, + (void**)&global_slapdFrontendConfig.pw_policy.pw_maxage, + CONFIG_LONG, NULL, DEFAULT_PW_MAXAGE}, {CONFIG_AUDITLOG_LOGROTATIONTIMEUNIT_ATTRIBUTE, NULL, log_set_rotationtimeunit, SLAPD_AUDIT_LOG, (void**)&global_slapdFrontendConfig.auditlog_rotationunit, - CONFIG_STRING_OR_UNKNOWN, NULL}, + CONFIG_STRING_OR_UNKNOWN, INIT_AUDITLOG_ROTATIONUNIT}, {CONFIG_PW_RESETFAILURECOUNT_ATTRIBUTE, config_set_pw_resetfailurecount, NULL, 0, - (void**)&global_slapdFrontendConfig.pw_policy.pw_resetfailurecount, CONFIG_LONG, NULL}, + (void**)&global_slapdFrontendConfig.pw_policy.pw_resetfailurecount, + CONFIG_LONG, NULL, DEFAULT_PW_RESETFAILURECOUNT}, {CONFIG_PW_ISGLOBAL_ATTRIBUTE, config_set_pw_is_global_policy, NULL, 0, - (void**)&global_slapdFrontendConfig.pw_is_global_policy, CONFIG_ON_OFF, NULL}, + (void**)&global_slapdFrontendConfig.pw_is_global_policy, + CONFIG_ON_OFF, NULL, &init_pw_is_global_policy}, {CONFIG_PW_IS_LEGACY, config_set_pw_is_legacy_policy, NULL, 0, - (void**)&global_slapdFrontendConfig.pw_policy.pw_is_legacy, CONFIG_ON_OFF, NULL}, + (void**)&global_slapdFrontendConfig.pw_policy.pw_is_legacy, + CONFIG_ON_OFF, NULL, &init_pw_is_legacy}, {CONFIG_PW_TRACK_LAST_UPDATE_TIME, config_set_pw_track_last_update_time, NULL, 0, - (void**)&global_slapdFrontendConfig.pw_policy.pw_track_update_time, CONFIG_ON_OFF, NULL}, + (void**)&global_slapdFrontendConfig.pw_policy.pw_track_update_time, + CONFIG_ON_OFF, NULL, &init_pw_track_update_time}, {CONFIG_AUDITLOG_MAXNUMOFLOGSPERDIR_ATTRIBUTE, NULL, log_set_numlogsperdir, SLAPD_AUDIT_LOG, - (void**)&global_slapdFrontendConfig.auditlog_maxnumlogs, CONFIG_INT, NULL}, + (void**)&global_slapdFrontendConfig.auditlog_maxnumlogs, + CONFIG_INT, NULL, DEFAULT_LOG_MAXNUMLOGS}, {CONFIG_ERRORLOG_LOGEXPIRATIONTIMEUNIT_ATTRIBUTE, NULL, log_set_expirationtimeunit, SLAPD_ERROR_LOG, (void**)&global_slapdFrontendConfig.errorlog_exptimeunit, - CONFIG_STRING_OR_UNKNOWN, NULL}, + CONFIG_STRING_OR_UNKNOWN, NULL, INIT_ERRORLOG_EXPTIMEUNIT}, /* errorlog list is read only, so no set func and no config var addr */ - {CONFIG_ERRORLOG_LIST_ATTRIBUTE, NULL, NULL, 0, NULL, - CONFIG_CHARRAY, (ConfigGetFunc)config_get_errorlog_list}, + {CONFIG_ERRORLOG_LIST_ATTRIBUTE, NULL, + NULL, 0, NULL, + CONFIG_CHARRAY, (ConfigGetFunc)config_get_errorlog_list, NULL}, {CONFIG_GROUPEVALNESTLEVEL_ATTRIBUTE, config_set_groupevalnestlevel, NULL, 0, - (void**)&global_slapdFrontendConfig.groupevalnestlevel, CONFIG_INT, NULL}, + (void**)&global_slapdFrontendConfig.groupevalnestlevel, + CONFIG_INT, NULL, DEFAULT_GROUPEVALNESTLEVEL}, {CONFIG_ACCESSLOG_LOGEXPIRATIONTIMEUNIT_ATTRIBUTE, NULL, log_set_expirationtimeunit, SLAPD_ACCESS_LOG, (void**)&global_slapdFrontendConfig.accesslog_exptimeunit, - CONFIG_STRING_OR_UNKNOWN, NULL}, + CONFIG_STRING_OR_UNKNOWN, NULL, INIT_ACCESSLOG_EXPTIMEUNIT}, {CONFIG_ROOTPW_ATTRIBUTE, config_set_rootpw, NULL, 0, - (void**)&global_slapdFrontendConfig.rootpw, CONFIG_STRING, NULL}, + (void**)&global_slapdFrontendConfig.rootpw, + CONFIG_STRING, NULL, NULL/* deletion is not allowed */}, {CONFIG_PW_CHANGE_ATTRIBUTE, config_set_pw_change, NULL, 0, - (void**)&global_slapdFrontendConfig.pw_policy.pw_change, CONFIG_ON_OFF, NULL}, + (void**)&global_slapdFrontendConfig.pw_policy.pw_change, + CONFIG_ON_OFF, NULL, &init_pw_change}, {CONFIG_ACCESSLOGLEVEL_ATTRIBUTE, config_set_accesslog_level, NULL, 0, - (void**)&global_slapdFrontendConfig.accessloglevel, CONFIG_INT, NULL}, + (void**)&global_slapdFrontendConfig.accessloglevel, + CONFIG_INT, NULL, DEFAULT_ACCESSLOGLEVEL}, {CONFIG_ERRORLOG_LOGROTATIONTIMEUNIT_ATTRIBUTE, NULL, log_set_rotationtimeunit, SLAPD_ERROR_LOG, (void**)&global_slapdFrontendConfig.errorlog_rotationunit, - CONFIG_STRING_OR_UNKNOWN, NULL}, + CONFIG_STRING_OR_UNKNOWN, NULL, INIT_ERRORLOG_ROTATIONUNIT}, {CONFIG_SECUREPORT_ATTRIBUTE, config_set_secureport, NULL, 0, - (void**)&global_slapdFrontendConfig.secureport, CONFIG_INT, NULL}, + (void**)&global_slapdFrontendConfig.secureport, + CONFIG_INT, NULL, NULL/* deletion is not allowed */}, {CONFIG_BASEDN_ATTRIBUTE, config_set_basedn, NULL, 0, - (void**)&global_slapdFrontendConfig.certmap_basedn, CONFIG_STRING, NULL}, + (void**)&global_slapdFrontendConfig.certmap_basedn, + CONFIG_STRING, NULL, NULL/* deletion is not allowed */}, {CONFIG_TIMELIMIT_ATTRIBUTE, config_set_timelimit, NULL, 0, - (void**)&global_slapdFrontendConfig.timelimit, CONFIG_INT, NULL}, + (void**)&global_slapdFrontendConfig.timelimit, + CONFIG_INT, NULL, DEFAULT_TIMELIMIT}, {CONFIG_ERRORLOG_MAXLOGSIZE_ATTRIBUTE, NULL, log_set_logsize, SLAPD_ERROR_LOG, - (void**)&global_slapdFrontendConfig.errorlog_maxlogsize, CONFIG_INT, NULL}, + (void**)&global_slapdFrontendConfig.errorlog_maxlogsize, + CONFIG_INT, NULL, DEFAULT_LOG_MAXLOGSIZE}, {CONFIG_RESERVEDESCRIPTORS_ATTRIBUTE, config_set_reservedescriptors, NULL, 0, - (void**)&global_slapdFrontendConfig.reservedescriptors, CONFIG_INT, NULL}, + (void**)&global_slapdFrontendConfig.reservedescriptors, + CONFIG_INT, NULL, DEFAULT_RESERVE_FDS}, /* access log list is read only, no set func, no config var addr */ - {CONFIG_ACCESSLOG_LIST_ATTRIBUTE, NULL, NULL, 0, - NULL, CONFIG_CHARRAY, (ConfigGetFunc)config_get_accesslog_list}, + {CONFIG_ACCESSLOG_LIST_ATTRIBUTE, NULL, + NULL, 0, NULL, + CONFIG_CHARRAY, (ConfigGetFunc)config_get_accesslog_list, NULL}, {CONFIG_SVRTAB_ATTRIBUTE, config_set_srvtab, NULL, 0, - (void**)&global_slapdFrontendConfig.srvtab, CONFIG_STRING, NULL}, + (void**)&global_slapdFrontendConfig.srvtab, + CONFIG_STRING, NULL, ""}, {CONFIG_PW_EXP_ATTRIBUTE, config_set_pw_exp, NULL, 0, - (void**)&global_slapdFrontendConfig.pw_policy.pw_exp, CONFIG_ON_OFF, NULL}, + (void**)&global_slapdFrontendConfig.pw_policy.pw_exp, + CONFIG_ON_OFF, NULL, &init_pw_exp}, {CONFIG_ACCESSCONTROL_ATTRIBUTE, config_set_accesscontrol, NULL, 0, - (void**)&global_slapdFrontendConfig.accesscontrol, CONFIG_ON_OFF, NULL}, - {CONFIG_AUDITLOG_LIST_ATTRIBUTE, NULL, NULL, 0, - NULL, CONFIG_CHARRAY, (ConfigGetFunc)config_get_auditlog_list}, + (void**)&global_slapdFrontendConfig.accesscontrol, + CONFIG_ON_OFF, NULL, &init_accesscontrol}, + {CONFIG_AUDITLOG_LIST_ATTRIBUTE, NULL, + NULL, 0, NULL, + CONFIG_CHARRAY, (ConfigGetFunc)config_get_auditlog_list, NULL}, {CONFIG_ACCESSLOG_LOGROTATIONTIMEUNIT_ATTRIBUTE, NULL, log_set_rotationtimeunit, SLAPD_ACCESS_LOG, - (void**)&global_slapdFrontendConfig.accesslog_rotationunit, CONFIG_STRING, NULL}, + (void**)&global_slapdFrontendConfig.accesslog_rotationunit, + CONFIG_STRING, NULL, INIT_ACCESSLOG_ROTATIONUNIT}, {CONFIG_PW_LOCKDURATION_ATTRIBUTE, config_set_pw_lockduration, NULL, 0, - (void**)&global_slapdFrontendConfig.pw_policy.pw_lockduration, CONFIG_LONG, NULL}, + (void**)&global_slapdFrontendConfig.pw_policy.pw_lockduration, + CONFIG_LONG, NULL, DEFAULT_PW_LOCKDURATION}, {CONFIG_ACCESSLOG_MAXLOGSIZE_ATTRIBUTE, NULL, log_set_logsize, SLAPD_ACCESS_LOG, - (void**)&global_slapdFrontendConfig.accesslog_maxlogsize, CONFIG_INT, NULL}, + (void**)&global_slapdFrontendConfig.accesslog_maxlogsize, + CONFIG_INT, NULL, DEFAULT_LOG_MAXLOGSIZE}, {CONFIG_IDLETIMEOUT_ATTRIBUTE, config_set_idletimeout, NULL, 0, - (void**)&global_slapdFrontendConfig.idletimeout, CONFIG_INT, NULL}, + (void**)&global_slapdFrontendConfig.idletimeout, + CONFIG_INT, NULL, DEFAULT_IDLE_TIMEOUT}, {CONFIG_NAGLE_ATTRIBUTE, config_set_nagle, NULL, 0, - (void**)&global_slapdFrontendConfig.nagle, CONFIG_ON_OFF, NULL}, + (void**)&global_slapdFrontendConfig.nagle, + CONFIG_ON_OFF, NULL, &init_nagle}, {CONFIG_ERRORLOG_MINFREEDISKSPACE_ATTRIBUTE, NULL, log_set_mindiskspace, SLAPD_ERROR_LOG, - (void**)&global_slapdFrontendConfig.errorlog_minfreespace, CONFIG_INT, NULL}, + (void**)&global_slapdFrontendConfig.errorlog_minfreespace, + CONFIG_INT, NULL, DEFAULT_LOG_MINFREESPACE}, {CONFIG_AUDITLOG_LOGGING_ENABLED_ATTRIBUTE, NULL, log_set_logging, SLAPD_AUDIT_LOG, - (void**)&global_slapdFrontendConfig.auditlog_logging_enabled, CONFIG_ON_OFF, NULL}, + (void**)&global_slapdFrontendConfig.auditlog_logging_enabled, + CONFIG_ON_OFF, NULL, &init_auditlog_logging_enabled}, {CONFIG_AUDITLOG_LOGGING_HIDE_UNHASHED_PW, config_set_auditlog_unhashed_pw, NULL, 0, - (void**)&global_slapdFrontendConfig.auditlog_logging_hide_unhashed_pw, CONFIG_ON_OFF, NULL}, + (void**)&global_slapdFrontendConfig.auditlog_logging_hide_unhashed_pw, + CONFIG_ON_OFF, NULL, &init_auditlog_logging_hide_unhashed_pw}, {CONFIG_ACCESSLOG_BUFFERING_ATTRIBUTE, config_set_accesslogbuffering, NULL, 0, - (void**)&global_slapdFrontendConfig.accesslogbuffering, CONFIG_ON_OFF, NULL}, + (void**)&global_slapdFrontendConfig.accesslogbuffering, + CONFIG_ON_OFF, NULL, &init_accesslogbuffering}, {CONFIG_CSNLOGGING_ATTRIBUTE, config_set_csnlogging, NULL, 0, - (void**)&global_slapdFrontendConfig.csnlogging, CONFIG_ON_OFF, NULL}, + (void**)&global_slapdFrontendConfig.csnlogging, + CONFIG_ON_OFF, NULL, &init_csnlogging}, {CONFIG_AUDITLOG_LOGEXPIRATIONTIMEUNIT_ATTRIBUTE, NULL, log_set_expirationtimeunit, SLAPD_AUDIT_LOG, (void**)&global_slapdFrontendConfig.auditlog_exptimeunit, - CONFIG_STRING_OR_UNKNOWN, NULL}, + CONFIG_STRING_OR_UNKNOWN, NULL, INIT_AUDITLOG_EXPTIMEUNIT}, {CONFIG_PW_SYNTAX_ATTRIBUTE, config_set_pw_syntax, NULL, 0, - (void**)&global_slapdFrontendConfig.pw_policy.pw_syntax, CONFIG_ON_OFF, NULL}, + (void**)&global_slapdFrontendConfig.pw_policy.pw_syntax, + CONFIG_ON_OFF, NULL, &init_pw_syntax}, {CONFIG_LISTENHOST_ATTRIBUTE, config_set_listenhost, NULL, 0, - (void**)&global_slapdFrontendConfig.listenhost, CONFIG_STRING, NULL}, + (void**)&global_slapdFrontendConfig.listenhost, + CONFIG_STRING, NULL, NULL/* NULL value is allowed */}, {CONFIG_LDAPI_FILENAME_ATTRIBUTE, config_set_ldapi_filename, - NULL, 0, - (void**)&global_slapdFrontendConfig.ldapi_filename, CONFIG_STRING, NULL}, - {CONFIG_LDAPI_SWITCH_ATTRIBUTE, config_set_ldapi_switch, - NULL, 0, - (void**)&global_slapdFrontendConfig.ldapi_switch, CONFIG_ON_OFF, NULL}, - {CONFIG_LDAPI_BIND_SWITCH_ATTRIBUTE, config_set_ldapi_bind_switch, - NULL, 0, - (void**)&global_slapdFrontendConfig.ldapi_bind_switch, CONFIG_ON_OFF, NULL}, - {CONFIG_LDAPI_ROOT_DN_ATTRIBUTE, config_set_ldapi_root_dn, - NULL, 0, - (void**)&global_slapdFrontendConfig.ldapi_root_dn, CONFIG_STRING, NULL}, - {CONFIG_LDAPI_MAP_ENTRIES_ATTRIBUTE, config_set_ldapi_map_entries, - NULL, 0, - (void**)&global_slapdFrontendConfig.ldapi_map_entries, CONFIG_ON_OFF, NULL}, - {CONFIG_LDAPI_UIDNUMBER_TYPE_ATTRIBUTE, config_set_ldapi_uidnumber_type, - NULL, 0, - (void**)&global_slapdFrontendConfig.ldapi_uidnumber_type, CONFIG_STRING, NULL}, - {CONFIG_LDAPI_GIDNUMBER_TYPE_ATTRIBUTE, config_set_ldapi_gidnumber_type, - NULL, 0, - (void**)&global_slapdFrontendConfig.ldapi_gidnumber_type, CONFIG_STRING, NULL}, - {CONFIG_LDAPI_SEARCH_BASE_DN_ATTRIBUTE, config_set_ldapi_search_base_dn, - NULL, 0, - (void**)&global_slapdFrontendConfig.ldapi_search_base_dn, CONFIG_STRING, NULL}, + NULL, 0, + (void**)&global_slapdFrontendConfig.ldapi_filename, + CONFIG_STRING, NULL, SLAPD_LDAPI_DEFAULT_FILENAME}, + {CONFIG_LDAPI_SWITCH_ATTRIBUTE, config_set_ldapi_switch, + NULL, 0, + (void**)&global_slapdFrontendConfig.ldapi_switch, + CONFIG_ON_OFF, NULL, &init_ldapi_switch}, + {CONFIG_LDAPI_BIND_SWITCH_ATTRIBUTE, config_set_ldapi_bind_switch, + NULL, 0, + (void**)&global_slapdFrontendConfig.ldapi_bind_switch, + CONFIG_ON_OFF, NULL, &init_ldapi_bind_switch}, + {CONFIG_LDAPI_ROOT_DN_ATTRIBUTE, config_set_ldapi_root_dn, + NULL, 0, + (void**)&global_slapdFrontendConfig.ldapi_root_dn, + CONFIG_STRING, NULL, DEFAULT_DIRECTORY_MANAGER}, + {CONFIG_LDAPI_MAP_ENTRIES_ATTRIBUTE, config_set_ldapi_map_entries, + NULL, 0, + (void**)&global_slapdFrontendConfig.ldapi_map_entries, + CONFIG_ON_OFF, NULL, &init_ldapi_map_entries}, + {CONFIG_LDAPI_UIDNUMBER_TYPE_ATTRIBUTE, config_set_ldapi_uidnumber_type, + NULL, 0, + (void**)&global_slapdFrontendConfig.ldapi_uidnumber_type, + CONFIG_STRING, NULL, DEFAULT_UIDNUM_TYPE}, + {CONFIG_LDAPI_GIDNUMBER_TYPE_ATTRIBUTE, config_set_ldapi_gidnumber_type, + NULL, 0, + (void**)&global_slapdFrontendConfig.ldapi_gidnumber_type, + CONFIG_STRING, NULL, DEFAULT_GIDNUM_TYPE}, + {CONFIG_LDAPI_SEARCH_BASE_DN_ATTRIBUTE, config_set_ldapi_search_base_dn, + NULL, 0, + (void**)&global_slapdFrontendConfig.ldapi_search_base_dn, + CONFIG_STRING, NULL, DEFAULT_LDAPI_SEARCH_BASE}, #if defined(ENABLE_AUTO_DN_SUFFIX) - {CONFIG_LDAPI_AUTO_DN_SUFFIX_ATTRIBUTE, config_set_ldapi_auto_dn_suffix, - NULL, 0, - (void**)&global_slapdFrontendConfig.ldapi_auto_dn_suffix, CONFIG_STRING, NULL}, + {CONFIG_LDAPI_AUTO_DN_SUFFIX_ATTRIBUTE, config_set_ldapi_auto_dn_suffix, + NULL, 0, + (void**)&global_slapdFrontendConfig.ldapi_auto_dn_suffix, + CONFIG_STRING, NULL, DEFAULT_LDAPI_AUTO_DN}, #endif {CONFIG_ANON_LIMITS_DN_ATTRIBUTE, config_set_anon_limits_dn, - NULL, 0, - (void**)&global_slapdFrontendConfig.anon_limits_dn, CONFIG_STRING, NULL}, + NULL, 0, + (void**)&global_slapdFrontendConfig.anon_limits_dn, + CONFIG_STRING, NULL, ""}, {CONFIG_SLAPI_COUNTER_ATTRIBUTE, config_set_slapi_counters, NULL, 0, - (void**)&global_slapdFrontendConfig.slapi_counters, CONFIG_ON_OFF, - (ConfigGetFunc)config_get_slapi_counters}, + (void**)&global_slapdFrontendConfig.slapi_counters, + CONFIG_ON_OFF, (ConfigGetFunc)config_get_slapi_counters, + &init_slapi_counters}, {CONFIG_ACCESSLOG_MINFREEDISKSPACE_ATTRIBUTE, NULL, log_set_mindiskspace, SLAPD_ACCESS_LOG, - (void**)&global_slapdFrontendConfig.accesslog_minfreespace, CONFIG_INT, NULL}, + (void**)&global_slapdFrontendConfig.accesslog_minfreespace, + CONFIG_INT, NULL, DEFAULT_LOG_MINFREESPACE}, {CONFIG_ERRORLOG_MAXNUMOFLOGSPERDIR_ATTRIBUTE, NULL, log_set_numlogsperdir, SLAPD_ERROR_LOG, - (void**)&global_slapdFrontendConfig.errorlog_maxnumlogs, CONFIG_INT, NULL}, + (void**)&global_slapdFrontendConfig.errorlog_maxnumlogs, + CONFIG_INT, NULL, DEFAULT_LOG_MAXNUMLOGS}, {CONFIG_SECURELISTENHOST_ATTRIBUTE, config_set_securelistenhost, NULL, 0, - (void**)&global_slapdFrontendConfig.securelistenhost, CONFIG_STRING, NULL}, + (void**)&global_slapdFrontendConfig.securelistenhost, + CONFIG_STRING, NULL, NULL/* NULL value is allowed */}, {CONFIG_AUDITLOG_MINFREEDISKSPACE_ATTRIBUTE, NULL, log_set_mindiskspace, SLAPD_AUDIT_LOG, - (void**)&global_slapdFrontendConfig.auditlog_minfreespace, CONFIG_INT, NULL}, + (void**)&global_slapdFrontendConfig.auditlog_minfreespace, + CONFIG_INT, NULL, DEFAULT_LOG_MINFREESPACE}, {CONFIG_ROOTDN_ATTRIBUTE, config_set_rootdn, NULL, 0, - (void**)&global_slapdFrontendConfig.rootdn, CONFIG_STRING, NULL}, + (void**)&global_slapdFrontendConfig.rootdn, + CONFIG_STRING, NULL, DEFAULT_DIRECTORY_MANAGER}, {CONFIG_PW_MINAGE_ATTRIBUTE, config_set_pw_minage, NULL, 0, - (void**)&global_slapdFrontendConfig.pw_policy.pw_minage, CONFIG_LONG, NULL}, + (void**)&global_slapdFrontendConfig.pw_policy.pw_minage, + CONFIG_LONG, NULL, DEFAULT_PW_MINAGE}, {CONFIG_AUDITFILE_ATTRIBUTE, config_set_auditlog, NULL, 0, - (void**)&global_slapdFrontendConfig.auditlog, CONFIG_STRING_OR_EMPTY, NULL}, + (void**)&global_slapdFrontendConfig.auditlog, + CONFIG_STRING_OR_EMPTY, NULL, NULL/* deletion is not allowed */}, {CONFIG_RETURN_EXACT_CASE_ATTRIBUTE, config_set_return_exact_case, NULL, 0, - (void**)&global_slapdFrontendConfig.return_exact_case, CONFIG_ON_OFF, NULL}, + (void**)&global_slapdFrontendConfig.return_exact_case, + CONFIG_ON_OFF, NULL, &init_return_exact_case}, {CONFIG_RESULT_TWEAK_ATTRIBUTE, config_set_result_tweak, NULL, 0, - (void**)&global_slapdFrontendConfig.result_tweak, CONFIG_ON_OFF, NULL}, + (void**)&global_slapdFrontendConfig.result_tweak, + CONFIG_ON_OFF, NULL, &init_result_tweak}, {CONFIG_PLUGIN_BINDDN_TRACKING_ATTRIBUTE, config_set_plugin_tracking, NULL, 0, - (void**)&global_slapdFrontendConfig.plugin_track, CONFIG_ON_OFF, NULL}, + (void**)&global_slapdFrontendConfig.plugin_track, + CONFIG_ON_OFF, NULL, &init_plugin_track}, {CONFIG_ATTRIBUTE_NAME_EXCEPTION_ATTRIBUTE, config_set_attrname_exceptions, NULL, 0, - (void**)&global_slapdFrontendConfig.attrname_exceptions, CONFIG_ON_OFF, NULL}, + (void**)&global_slapdFrontendConfig.attrname_exceptions, + CONFIG_ON_OFF, NULL, &init_attrname_exceptions}, {CONFIG_MAXBERSIZE_ATTRIBUTE, config_set_maxbersize, NULL, 0, - (void**)&global_slapdFrontendConfig.maxbersize, CONFIG_INT, NULL}, + (void**)&global_slapdFrontendConfig.maxbersize, + CONFIG_INT, NULL, DEFAULT_MAX_BERSIZE}, {CONFIG_MAXSASLIOSIZE_ATTRIBUTE, config_set_maxsasliosize, NULL, 0, - (void**)&global_slapdFrontendConfig.maxsasliosize, CONFIG_INT, NULL}, + (void**)&global_slapdFrontendConfig.maxsasliosize, + CONFIG_INT, NULL, DEFAULT_MAX_SASLIO_SIZE}, {CONFIG_VERSIONSTRING_ATTRIBUTE, config_set_versionstring, NULL, 0, - (void**)&global_slapdFrontendConfig.versionstring, CONFIG_STRING, NULL}, + (void**)&global_slapdFrontendConfig.versionstring, + CONFIG_STRING, NULL, SLAPD_VERSION_STR}, {CONFIG_REFERRAL_MODE_ATTRIBUTE, config_set_referral_mode, NULL, 0, - (void**)&global_slapdFrontendConfig.refer_url, CONFIG_STRING, NULL}, + (void**)&global_slapdFrontendConfig.refer_url, + CONFIG_STRING, NULL, NULL/* deletion is not allowed */}, #if !defined(_WIN32) && !defined(AIX) {CONFIG_MAXDESCRIPTORS_ATTRIBUTE, config_set_maxdescriptors, NULL, 0, - (void**)&global_slapdFrontendConfig.maxdescriptors, CONFIG_INT, NULL}, + (void**)&global_slapdFrontendConfig.maxdescriptors, + CONFIG_INT, NULL, DEFAULT_MAXDESCRIPTORS}, #endif {CONFIG_CONNTABLESIZE_ATTRIBUTE, config_set_conntablesize, NULL, 0, - (void**)&global_slapdFrontendConfig.conntablesize, CONFIG_INT, NULL}, + (void**)&global_slapdFrontendConfig.conntablesize, + CONFIG_INT, NULL, NULL/* deletion is not allowed */}, {CONFIG_SSLCLIENTAUTH_ATTRIBUTE, config_set_SSLclientAuth, NULL, 0, - (void **)&global_slapdFrontendConfig.SSLclientAuth, CONFIG_SPECIAL_SSLCLIENTAUTH, NULL}, + (void **)&global_slapdFrontendConfig.SSLclientAuth, + CONFIG_SPECIAL_SSLCLIENTAUTH, NULL, DEFAULT_SSLCLIENTAPTH}, {CONFIG_SSL_CHECK_HOSTNAME_ATTRIBUTE, config_set_ssl_check_hostname, - NULL, 0, NULL, CONFIG_ON_OFF, (ConfigGetFunc)config_get_ssl_check_hostname}, - {CONFIG_CONFIG_ATTRIBUTE, 0, NULL, 0, (void**)SLAPD_CONFIG_DN, - CONFIG_CONSTANT_STRING, NULL}, + NULL, 0, NULL, + CONFIG_ON_OFF, (ConfigGetFunc)config_get_ssl_check_hostname, + &init_ssl_check_hostname}, + {CONFIG_CONFIG_ATTRIBUTE, 0, + NULL, 0, (void**)SLAPD_CONFIG_DN, + CONFIG_CONSTANT_STRING, NULL, NULL/* deletion is not allowed */}, {CONFIG_HASH_FILTERS_ATTRIBUTE, config_set_hash_filters, - NULL, 0, NULL, CONFIG_ON_OFF, (ConfigGetFunc)config_get_hash_filters}, + NULL, 0, NULL, + CONFIG_ON_OFF, (ConfigGetFunc)config_get_hash_filters, + NULL/* deletion is not allowed */}, /* instance dir; used by admin tasks */ {CONFIG_INSTDIR_ATTRIBUTE, config_set_instancedir, NULL, 0, - (void**)&global_slapdFrontendConfig.instancedir, CONFIG_STRING, NULL}, + (void**)&global_slapdFrontendConfig.instancedir, + CONFIG_STRING, NULL, NULL/* deletion is not allowed */}, /* parameterizing schema dir */ {CONFIG_SCHEMADIR_ATTRIBUTE, config_set_schemadir, NULL, 0, - (void**)&global_slapdFrontendConfig.schemadir, CONFIG_STRING, NULL}, + (void**)&global_slapdFrontendConfig.schemadir, + CONFIG_STRING, NULL, NULL/* deletion is not allowed */}, /* parameterizing lock dir */ {CONFIG_LOCKDIR_ATTRIBUTE, config_set_lockdir, NULL, 0, - (void**)&global_slapdFrontendConfig.lockdir, CONFIG_STRING, (ConfigGetFunc)config_get_lockdir}, + (void**)&global_slapdFrontendConfig.lockdir, + CONFIG_STRING, (ConfigGetFunc)config_get_lockdir, + NULL/* deletion is not allowed */}, /* parameterizing tmp dir */ {CONFIG_TMPDIR_ATTRIBUTE, config_set_tmpdir, NULL, 0, - (void**)&global_slapdFrontendConfig.tmpdir, CONFIG_STRING, (ConfigGetFunc)config_get_tmpdir}, + (void**)&global_slapdFrontendConfig.tmpdir, + CONFIG_STRING, (ConfigGetFunc)config_get_tmpdir, + NULL/* deletion is not allowed */}, /* parameterizing cert dir */ {CONFIG_CERTDIR_ATTRIBUTE, config_set_certdir, NULL, 0, - (void**)&global_slapdFrontendConfig.certdir, CONFIG_STRING, (ConfigGetFunc)config_get_certdir}, + (void**)&global_slapdFrontendConfig.certdir, + CONFIG_STRING, (ConfigGetFunc)config_get_certdir, + NULL/* deletion is not allowed */}, /* parameterizing ldif dir */ {CONFIG_LDIFDIR_ATTRIBUTE, config_set_ldifdir, NULL, 0, - (void**)&global_slapdFrontendConfig.ldifdir, CONFIG_STRING, (ConfigGetFunc)config_get_ldifdir}, + (void**)&global_slapdFrontendConfig.ldifdir, + CONFIG_STRING, (ConfigGetFunc)config_get_ldifdir, + NULL/* deletion is not allowed */}, /* parameterizing bak dir */ {CONFIG_BAKDIR_ATTRIBUTE, config_set_bakdir, NULL, 0, - (void**)&global_slapdFrontendConfig.bakdir, CONFIG_STRING, (ConfigGetFunc)config_get_bakdir}, + (void**)&global_slapdFrontendConfig.bakdir, + CONFIG_STRING, (ConfigGetFunc)config_get_bakdir, + NULL/* deletion is not allowed */}, /* parameterizing sasl plugin path */ {CONFIG_SASLPATH_ATTRIBUTE, config_set_saslpath, NULL, 0, - (void**)&global_slapdFrontendConfig.saslpath, CONFIG_STRING, (ConfigGetFunc)config_get_saslpath}, + (void**)&global_slapdFrontendConfig.saslpath, + CONFIG_STRING, (ConfigGetFunc)config_get_saslpath, + NULL/* deletion is not allowed */}, /* parameterizing run dir */ {CONFIG_RUNDIR_ATTRIBUTE, config_set_rundir, NULL, 0, - (void**)&global_slapdFrontendConfig.rundir, CONFIG_STRING, (ConfigGetFunc)config_get_rundir}, + (void**)&global_slapdFrontendConfig.rundir, + CONFIG_STRING, (ConfigGetFunc)config_get_rundir, + NULL/* deletion is not allowed */}, {CONFIG_REWRITE_RFC1274_ATTRIBUTE, config_set_rewrite_rfc1274, NULL, 0, - (void**)&global_slapdFrontendConfig.rewrite_rfc1274, CONFIG_ON_OFF, NULL}, + (void**)&global_slapdFrontendConfig.rewrite_rfc1274, + CONFIG_ON_OFF, NULL, &init_rewrite_rfc1274}, {CONFIG_OUTBOUND_LDAP_IO_TIMEOUT_ATTRIBUTE, config_set_outbound_ldap_io_timeout, NULL, 0, (void **)&global_slapdFrontendConfig.outbound_ldap_io_timeout, - CONFIG_INT, NULL}, + CONFIG_INT, NULL, DEFAULT_OUTBOUND_LDAP_IO_TIMEOUT}, {CONFIG_UNAUTH_BINDS_ATTRIBUTE, config_set_unauth_binds_switch, NULL, 0, - (void**)&global_slapdFrontendConfig.allow_unauth_binds, CONFIG_ON_OFF, - (ConfigGetFunc)config_get_unauth_binds_switch}, + (void**)&global_slapdFrontendConfig.allow_unauth_binds, + CONFIG_ON_OFF, (ConfigGetFunc)config_get_unauth_binds_switch, + &init_allow_unauth_binds}, {CONFIG_REQUIRE_SECURE_BINDS_ATTRIBUTE, config_set_require_secure_binds, NULL, 0, - (void**)&global_slapdFrontendConfig.require_secure_binds, CONFIG_ON_OFF, - (ConfigGetFunc)config_get_require_secure_binds}, + (void**)&global_slapdFrontendConfig.require_secure_binds, + CONFIG_ON_OFF, (ConfigGetFunc)config_get_require_secure_binds, + &init_require_secure_binds}, {CONFIG_ANON_ACCESS_ATTRIBUTE, config_set_anon_access_switch, NULL, 0, - (void**)&global_slapdFrontendConfig.allow_anon_access, CONFIG_SPECIAL_ANON_ACCESS_SWITCH, - (ConfigGetFunc)config_get_anon_access_switch}, + (void**)&global_slapdFrontendConfig.allow_anon_access, + CONFIG_SPECIAL_ANON_ACCESS_SWITCH, + (ConfigGetFunc)config_get_anon_access_switch, + DEFAULT_ALLOW_ANON_ACCESS}, {CONFIG_LOCALSSF_ATTRIBUTE, config_set_localssf, NULL, 0, - (void**)&global_slapdFrontendConfig.localssf, CONFIG_INT, NULL}, + (void**)&global_slapdFrontendConfig.localssf, + CONFIG_INT, NULL, DEFAULT_LOCAL_SSF}, {CONFIG_MINSSF_ATTRIBUTE, config_set_minssf, NULL, 0, - (void**)&global_slapdFrontendConfig.minssf, CONFIG_INT, NULL}, + (void**)&global_slapdFrontendConfig.minssf, + CONFIG_INT, NULL, DEFAULT_MIN_SSF}, {CONFIG_MINSSF_EXCLUDE_ROOTDSE, config_set_minssf_exclude_rootdse, NULL, 0, (void**)&global_slapdFrontendConfig.minssf_exclude_rootdse, - CONFIG_ON_OFF, - (ConfigGetFunc)config_get_minssf_exclude_rootdse}, + CONFIG_ON_OFF, (ConfigGetFunc)config_get_minssf_exclude_rootdse, + &init_minssf_exclude_rootdse}, {CONFIG_FORCE_SASL_EXTERNAL_ATTRIBUTE, config_set_force_sasl_external, NULL, 0, - (void**)&global_slapdFrontendConfig.force_sasl_external, CONFIG_ON_OFF, - (ConfigGetFunc)config_get_force_sasl_external}, + (void**)&global_slapdFrontendConfig.force_sasl_external, + CONFIG_ON_OFF, (ConfigGetFunc)config_get_force_sasl_external, + &init_force_sasl_external}, {CONFIG_ENTRYUSN_GLOBAL, config_set_entryusn_global, NULL, 0, - (void**)&global_slapdFrontendConfig.entryusn_global, CONFIG_ON_OFF, - (ConfigGetFunc)config_get_entryusn_global}, + (void**)&global_slapdFrontendConfig.entryusn_global, + CONFIG_ON_OFF, (ConfigGetFunc)config_get_entryusn_global, + &init_entryusn_global}, {CONFIG_ENTRYUSN_IMPORT_INITVAL, config_set_entryusn_import_init, NULL, 0, (void**)&global_slapdFrontendConfig.entryusn_import_init, - CONFIG_STRING, (ConfigGetFunc)config_get_entryusn_import_init}, + CONFIG_STRING, (ConfigGetFunc)config_get_entryusn_import_init, + ENTRYUSN_IMPORT_INIT}, {CONFIG_ALLOWED_TO_DELETE_ATTRIBUTE, config_set_allowed_to_delete_attrs, NULL, 0, (void**)&global_slapdFrontendConfig.allowed_to_delete_attrs, - CONFIG_STRING, (ConfigGetFunc)config_get_allowed_to_delete_attrs}, + CONFIG_STRING, (ConfigGetFunc)config_get_allowed_to_delete_attrs, + DEFAULT_ALLOWED_TO_DELETE_ATTRS }, {CONFIG_VALIDATE_CERT_ATTRIBUTE, config_set_validate_cert_switch, NULL, 0, (void**)&global_slapdFrontendConfig.validate_cert, CONFIG_SPECIAL_VALIDATE_CERT_SWITCH, - (ConfigGetFunc)config_get_validate_cert_switch}, + (ConfigGetFunc)config_get_validate_cert_switch, DEFAULT_VALIDATE_CERT}, {CONFIG_PAGEDSIZELIMIT_ATTRIBUTE, config_set_pagedsizelimit, NULL, 0, - (void**)&global_slapdFrontendConfig.pagedsizelimit, CONFIG_INT, NULL}, + (void**)&global_slapdFrontendConfig.pagedsizelimit, + CONFIG_INT, NULL, DEFAULT_PAGEDSIZELIMIT}, {CONFIG_DEFAULT_NAMING_CONTEXT, config_set_default_naming_context, NULL, 0, (void**)&global_slapdFrontendConfig.default_naming_context, - CONFIG_STRING, (ConfigGetFunc)config_get_default_naming_context}, + CONFIG_STRING, (ConfigGetFunc)config_get_default_naming_context, NULL}, {CONFIG_DISK_MONITORING, config_set_disk_monitoring, NULL, 0, - (void**)&global_slapdFrontendConfig.disk_monitoring, CONFIG_ON_OFF, - (ConfigGetFunc)config_get_disk_monitoring}, + (void**)&global_slapdFrontendConfig.disk_monitoring, + CONFIG_ON_OFF, (ConfigGetFunc)config_get_disk_monitoring, + &init_disk_monitoring}, {CONFIG_DISK_THRESHOLD, config_set_disk_threshold, NULL, 0, - (void**)&global_slapdFrontendConfig.disk_threshold, CONFIG_INT, - (ConfigGetFunc)config_get_disk_threshold}, + (void**)&global_slapdFrontendConfig.disk_threshold, + CONFIG_INT, (ConfigGetFunc)config_get_disk_threshold, + DEFAULT_DISK_THRESHOLD}, {CONFIG_DISK_GRACE_PERIOD, config_set_disk_grace_period, NULL, 0, (void**)&global_slapdFrontendConfig.disk_grace_period, - CONFIG_INT, (ConfigGetFunc)config_get_disk_grace_period}, + CONFIG_INT, (ConfigGetFunc)config_get_disk_grace_period, + DEFAULT_DISK_GRACE_PERIOD}, {CONFIG_DISK_LOGGING_CRITICAL, config_set_disk_logging_critical, NULL, 0, (void**)&global_slapdFrontendConfig.disk_logging_critical, - CONFIG_ON_OFF, (ConfigGetFunc)config_get_disk_logging_critical}, + CONFIG_ON_OFF, (ConfigGetFunc)config_get_disk_logging_critical, + &init_disk_logging_critical}, {CONFIG_DISK_PRESERVE_LOGGING, config_set_disk_preserve_logging, NULL, 0, (void**)&global_slapdFrontendConfig.disk_preserve_logging, - CONFIG_ON_OFF, (ConfigGetFunc)config_get_disk_preserve_logging}, + CONFIG_ON_OFF, (ConfigGetFunc)config_get_disk_preserve_logging, + &init_disk_preserve_logging}, {CONFIG_NDN_CACHE, config_set_ndn_cache_enabled, NULL, 0, - (void**)&global_slapdFrontendConfig.ndn_cache_enabled, CONFIG_INT, - (ConfigGetFunc)config_get_ndn_cache_enabled}, + (void**)&global_slapdFrontendConfig.ndn_cache_enabled, + CONFIG_ON_OFF, (ConfigGetFunc)config_get_ndn_cache_enabled, + &init_ndn_cache_enabled}, {CONFIG_NDN_CACHE_SIZE, config_set_ndn_cache_max_size, NULL, 0, (void**)&global_slapdFrontendConfig.ndn_cache_max_size, - CONFIG_INT, (ConfigGetFunc)config_get_ndn_cache_size}, + CONFIG_INT, (ConfigGetFunc)config_get_ndn_cache_size, DEFAULT_NDN_SIZE}, #ifdef MEMPOOL_EXPERIMENTAL ,{CONFIG_MEMPOOL_SWITCH_ATTRIBUTE, config_set_mempool_switch, NULL, 0, - (void**)&global_slapdFrontendConfig.mempool_switch, CONFIG_ON_OFF, (ConfigGetFunc)config_get_mempool_switch}, + (void**)&global_slapdFrontendConfig.mempool_switch, + CONFIG_ON_OFF, (ConfigGetFunc)config_get_mempool_switch, + &init_mempool_switch}, {CONFIG_MEMPOOL_MAXFREELIST_ATTRIBUTE, config_set_mempool_maxfreelist, NULL, 0, - (void**)&global_slapdFrontendConfig.mempool_maxfreelist, CONFIG_INT, (ConfigGetFunc)config_get_mempool_maxfreelist} + (void**)&global_slapdFrontendConfig.mempool_maxfreelist, + CONFIG_INT, (ConfigGetFunc)config_get_mempool_maxfreelist, + DEFAULT_MEMPOOL_MAXFREELIST} #endif /* MEMPOOL_EXPERIMENTAL */ }; @@ -943,21 +1249,21 @@ FrontendConfig_init () { cfg->port = LDAP_PORT; cfg->secureport = LDAPS_PORT; cfg->ldapi_filename = slapi_ch_strdup(SLAPD_LDAPI_DEFAULT_FILENAME); - cfg->ldapi_switch = LDAP_OFF; - cfg->ldapi_bind_switch = LDAP_OFF; - cfg->ldapi_root_dn = slapi_ch_strdup("cn=Directory Manager"); - cfg->ldapi_map_entries = LDAP_OFF; - cfg->ldapi_uidnumber_type = slapi_ch_strdup("uidNumber"); - cfg->ldapi_gidnumber_type = slapi_ch_strdup("gidNumber"); + init_ldapi_switch = cfg->ldapi_switch = LDAP_OFF; + init_ldapi_bind_switch = cfg->ldapi_bind_switch = LDAP_OFF; + cfg->ldapi_root_dn = slapi_ch_strdup(DEFAULT_DIRECTORY_MANAGER); + init_ldapi_map_entries = cfg->ldapi_map_entries = LDAP_OFF; + cfg->ldapi_uidnumber_type = slapi_ch_strdup(DEFAULT_UIDNUM_TYPE); + cfg->ldapi_gidnumber_type = slapi_ch_strdup(DEFAULT_GIDNUM_TYPE); /* These DNs are no need to be normalized. */ - cfg->ldapi_search_base_dn = slapi_ch_strdup("dc=example,dc=com"); + cfg->ldapi_search_base_dn = slapi_ch_strdup(DEFAULT_LDAPI_SEARCH_BASE); #if defined(ENABLE_AUTO_DN_SUFFIX) - cfg->ldapi_auto_dn_suffix = slapi_ch_strdup("cn=peercred,cn=external,cn=auth"); + cfg->ldapi_auto_dn_suffix = slapi_ch_strdup(DEFAULT_LDAPI_AUTO_DN); #endif - cfg->allow_unauth_binds = LDAP_OFF; - cfg->require_secure_binds = LDAP_OFF; + init_allow_unauth_binds = cfg->allow_unauth_binds = LDAP_OFF; + init_require_secure_binds = cfg->require_secure_binds = LDAP_OFF; cfg->allow_anon_access = SLAPD_ANON_ACCESS_ON; - cfg->slapi_counters = LDAP_ON; + init_slapi_counters = cfg->slapi_counters = LDAP_ON; cfg->threadnumber = SLAPD_DEFAULT_MAX_THREADS; cfg->maxthreadsperconn = SLAPD_DEFAULT_MAX_THREADS_PER_CONN; cfg->reservedescriptors = SLAPD_DEFAULT_RESERVE_FDS; @@ -968,8 +1274,8 @@ FrontendConfig_init () { cfg->maxsasliosize = SLAPD_DEFAULT_MAX_SASLIO_SIZE; cfg->localssf = SLAPD_DEFAULT_LOCAL_SSF; cfg->minssf = SLAPD_DEFAULT_MIN_SSF; - cfg->minssf_exclude_rootdse = LDAP_OFF; /* minssf is applied to rootdse, - by default */ + /* minssf is applied to rootdse, by default */ + init_minssf_exclude_rootdse = cfg->minssf_exclude_rootdse = LDAP_OFF; cfg->validate_cert = SLAPD_VALIDATE_CERT_WARN; #ifdef _WIN32 @@ -982,17 +1288,19 @@ FrontendConfig_init () { #endif /* USE_SYSCONF */ #endif /* _WIN32 */ - cfg->accesscontrol = LDAP_ON; - cfg->security = LDAP_OFF; - cfg->ssl_check_hostname = LDAP_ON; - cfg->return_exact_case = LDAP_ON; - cfg->result_tweak = LDAP_OFF; + init_accesscontrol = cfg->accesscontrol = LDAP_ON; + init_nagle = cfg->nagle = LDAP_OFF; + init_security = cfg->security = LDAP_OFF; + init_ssl_check_hostname = cfg->ssl_check_hostname = LDAP_ON; + init_return_exact_case = cfg->return_exact_case = LDAP_ON; + init_result_tweak = cfg->result_tweak = LDAP_OFF; + init_attrname_exceptions = cfg->attrname_exceptions = LDAP_OFF; cfg->reservedescriptors = SLAPD_DEFAULT_RESERVE_FDS; cfg->useroc = slapi_ch_strdup ( "" ); cfg->userat = slapi_ch_strdup ( "" ); /* kexcoff: should not be initialized by default here - cfg->rootpwstoragescheme = pw_name2scheme( SHA1_SCHEME_NAME ); - cfg->pw_storagescheme = pw_name2scheme( SHA1_SCHEME_NAME ); + cfg->rootpwstoragescheme = pw_name2scheme( SALTED_SHA1_SCHEME_NAME ); + cfg->pw_storagescheme = pw_name2scheme( SALTED_SHA1_SCHEME_NAME ); */ cfg->slapd_type = 0; cfg->versionstring = SLAPD_VERSION_STR; @@ -1000,24 +1308,29 @@ FrontendConfig_init () { cfg->pagedsizelimit = 0; cfg->timelimit = SLAPD_DEFAULT_TIMELIMIT; cfg->anon_limits_dn = slapi_ch_strdup(""); - cfg->schemacheck = LDAP_ON; - cfg->syntaxcheck = LDAP_OFF; - cfg->plugin_track = LDAP_OFF; - cfg->syntaxlogging = LDAP_OFF; - cfg->dn_validate_strict = LDAP_OFF; - cfg->ds4_compatible_schema = LDAP_OFF; - cfg->enquote_sup_oc = LDAP_OFF; - cfg->lastmod = LDAP_ON; - cfg->rewrite_rfc1274 = LDAP_OFF; + init_schemacheck = cfg->schemacheck = LDAP_ON; + init_syntaxcheck = cfg->syntaxcheck = LDAP_OFF; + init_plugin_track = cfg->plugin_track = LDAP_OFF; + init_syntaxlogging = cfg->syntaxlogging = LDAP_OFF; + init_dn_validate_strict = cfg->dn_validate_strict = LDAP_OFF; + init_ds4_compatible_schema = cfg->ds4_compatible_schema = LDAP_OFF; + init_enquote_sup_oc = cfg->enquote_sup_oc = LDAP_OFF; + init_lastmod = cfg->lastmod = LDAP_ON; + init_rewrite_rfc1274 = cfg->rewrite_rfc1274 = LDAP_OFF; cfg->schemareplace = slapi_ch_strdup( CONFIG_SCHEMAREPLACE_STR_REPLICATION_ONLY ); - cfg->schema_ignore_trailing_spaces = SLAPD_DEFAULT_SCHEMA_IGNORE_TRAILING_SPACES; - cfg->force_sasl_external = LDAP_OFF; /* do not force sasl external by default - let clients abide by the LDAP standards and send us a SASL/EXTERNAL bind if that's what they want to do */ - - cfg->pwpolicy_local = LDAP_OFF; - cfg->pw_policy.pw_change = LDAP_ON; - cfg->pw_policy.pw_must_change = LDAP_OFF; - cfg->pw_policy.pw_syntax = LDAP_OFF; - cfg->pw_policy.pw_exp = LDAP_OFF; + init_schema_ignore_trailing_spaces = cfg->schema_ignore_trailing_spaces = + SLAPD_DEFAULT_SCHEMA_IGNORE_TRAILING_SPACES; + /* do not force sasl external by default - + * let clients abide by the LDAP standards and send us a SASL/EXTERNAL bind + * if that's what they want to do */ + init_force_sasl_external = cfg->force_sasl_external = LDAP_OFF; + + init_readonly = cfg->readonly = LDAP_OFF; + init_pwpolicy_local = cfg->pwpolicy_local = LDAP_OFF; + init_pw_change = cfg->pw_policy.pw_change = LDAP_ON; + init_pw_must_change = cfg->pw_policy.pw_must_change = LDAP_OFF; + init_pw_syntax = cfg->pw_policy.pw_syntax = LDAP_OFF; + init_pw_exp = cfg->pw_policy.pw_exp = LDAP_OFF; cfg->pw_policy.pw_minlength = 8; cfg->pw_policy.pw_mindigits = 0; cfg->pw_policy.pw_minalphas = 0; @@ -1031,80 +1344,84 @@ FrontendConfig_init () { cfg->pw_policy.pw_maxage = 8640000; /* 100 days */ cfg->pw_policy.pw_minage = 0; cfg->pw_policy.pw_warning = 86400; /* 1 day */ - cfg->pw_policy.pw_history = LDAP_OFF; + init_pw_history = cfg->pw_policy.pw_history = LDAP_OFF; cfg->pw_policy.pw_inhistory = 6; - cfg->pw_policy.pw_lockout = LDAP_OFF; + init_pw_lockout = cfg->pw_policy.pw_lockout = LDAP_OFF; cfg->pw_policy.pw_maxfailure = 3; - cfg->pw_policy.pw_unlock = LDAP_ON; + init_pw_unlock = cfg->pw_policy.pw_unlock = LDAP_ON; cfg->pw_policy.pw_lockduration = 3600; /* 60 minutes */ cfg->pw_policy.pw_resetfailurecount = 600; /* 10 minutes */ cfg->pw_policy.pw_gracelimit = 0; - cfg->pw_policy.pw_is_legacy = LDAP_ON; - cfg->pw_policy.pw_track_update_time = LDAP_OFF; - cfg->pw_is_global_policy = LDAP_OFF; + init_pw_is_legacy = cfg->pw_policy.pw_is_legacy = LDAP_ON; + init_pw_track_update_time = cfg->pw_policy.pw_track_update_time = LDAP_OFF; + init_pw_is_global_policy = cfg->pw_is_global_policy = LDAP_OFF; - cfg->accesslog_logging_enabled = LDAP_ON; - cfg->accesslog_mode = slapi_ch_strdup("600"); + init_accesslog_logging_enabled = cfg->accesslog_logging_enabled = LDAP_ON; + cfg->accesslog_mode = slapi_ch_strdup(INIT_ACCESSLOG_MODE); cfg->accesslog_maxnumlogs = 10; cfg->accesslog_maxlogsize = 100; cfg->accesslog_rotationtime = 1; - cfg->accesslog_rotationunit = slapi_ch_strdup("day"); - cfg->accesslog_rotationsync_enabled = LDAP_OFF; + cfg->accesslog_rotationunit = slapi_ch_strdup(INIT_ACCESSLOG_ROTATIONUNIT); + init_accesslog_rotationsync_enabled = + cfg->accesslog_rotationsync_enabled = LDAP_OFF; cfg->accesslog_rotationsynchour = 0; cfg->accesslog_rotationsyncmin = 0; cfg->accesslog_maxdiskspace = 500; cfg->accesslog_minfreespace = 5; cfg->accesslog_exptime = 1; - cfg->accesslog_exptimeunit = slapi_ch_strdup("month"); + cfg->accesslog_exptimeunit = slapi_ch_strdup(INIT_ACCESSLOG_EXPTIMEUNIT); cfg->accessloglevel = 256; - cfg->accesslogbuffering = LDAP_ON; - cfg->csnlogging = LDAP_ON; + init_accesslogbuffering = cfg->accesslogbuffering = LDAP_ON; + init_csnlogging = cfg->csnlogging = LDAP_ON; - cfg->errorlog_logging_enabled = LDAP_ON; - cfg->errorlog_mode = slapi_ch_strdup("600"); + init_errorlog_logging_enabled = cfg->errorlog_logging_enabled = LDAP_ON; + cfg->errorlog_mode = slapi_ch_strdup(INIT_ERRORLOG_MODE); cfg->errorlog_maxnumlogs = 1; cfg->errorlog_maxlogsize = 100; cfg->errorlog_rotationtime = 1; - cfg->errorlog_rotationunit = slapi_ch_strdup ("week"); - cfg->errorlog_rotationsync_enabled = LDAP_OFF; + cfg->errorlog_rotationunit = slapi_ch_strdup (INIT_ERRORLOG_ROTATIONUNIT); + init_errorlog_rotationsync_enabled = + cfg->errorlog_rotationsync_enabled = LDAP_OFF; cfg->errorlog_rotationsynchour = 0; cfg->errorlog_rotationsyncmin = 0; cfg->errorlog_maxdiskspace = 100; cfg->errorlog_minfreespace = 5; cfg->errorlog_exptime = 1; - cfg->errorlog_exptimeunit = slapi_ch_strdup("month"); + cfg->errorlog_exptimeunit = slapi_ch_strdup(INIT_ERRORLOG_EXPTIMEUNIT); cfg->errorloglevel = 0; - cfg->auditlog_logging_enabled = LDAP_OFF; - cfg->auditlog_mode = slapi_ch_strdup("600"); + init_auditlog_logging_enabled = cfg->auditlog_logging_enabled = LDAP_OFF; + cfg->auditlog_mode = slapi_ch_strdup(INIT_AUDITLOG_MODE); cfg->auditlog_maxnumlogs = 1; cfg->auditlog_maxlogsize = 100; cfg->auditlog_rotationtime = 1; - cfg->auditlog_rotationunit = slapi_ch_strdup ("week"); - cfg->auditlog_rotationsync_enabled = LDAP_OFF; + cfg->auditlog_rotationunit = slapi_ch_strdup(INIT_AUDITLOG_ROTATIONUNIT); + init_auditlog_rotationsync_enabled = + cfg->auditlog_rotationsync_enabled = LDAP_OFF; cfg->auditlog_rotationsynchour = 0; cfg->auditlog_rotationsyncmin = 0; cfg->auditlog_maxdiskspace = 100; cfg->auditlog_minfreespace = 5; cfg->auditlog_exptime = 1; - cfg->auditlog_exptimeunit = slapi_ch_strdup("month"); - cfg->auditlog_logging_hide_unhashed_pw = LDAP_ON; + cfg->auditlog_exptimeunit = slapi_ch_strdup(INIT_AUDITLOG_EXPTIMEUNIT); + init_auditlog_logging_hide_unhashed_pw = + cfg->auditlog_logging_hide_unhashed_pw = LDAP_ON; - cfg->entryusn_global = LDAP_OFF; - cfg->entryusn_import_init = slapi_ch_strdup("0"); + init_entryusn_global = cfg->entryusn_global = LDAP_OFF; + cfg->entryusn_import_init = slapi_ch_strdup(ENTRYUSN_IMPORT_INIT); cfg->allowed_to_delete_attrs = slapi_ch_strdup("nsslapd-listenhost nsslapd-securelistenhost nsslapd-defaultnamingcontext"); cfg->default_naming_context = NULL; /* store normalized dn */ - cfg->disk_monitoring = LDAP_OFF; + init_disk_monitoring = cfg->disk_monitoring = LDAP_OFF; cfg->disk_threshold = 2097152; /* 2 mb */ cfg->disk_grace_period = 60; /* 1 hour */ - cfg->disk_preserve_logging = LDAP_OFF; - cfg->disk_logging_critical = LDAP_OFF; - cfg->ndn_cache_enabled = LDAP_OFF; + init_disk_preserve_logging = cfg->disk_preserve_logging = LDAP_OFF; + init_disk_logging_critical = cfg->disk_logging_critical = LDAP_OFF; + init_ndn_cache_enabled = cfg->ndn_cache_enabled = LDAP_OFF; cfg->ndn_cache_max_size = NDN_DEFAULT_SIZE; #ifdef MEMPOOL_EXPERIMENTAL - cfg->mempool_switch = LDAP_ON; + init_mempool_switch = cfg->mempool_switch = LDAP_ON; cfg->mempool_maxfreelist = 1024; cfg->system_page_size = sysconf(_SC_PAGE_SIZE); /* not to get every time; no set, get only */ { @@ -1219,8 +1536,8 @@ config_value_is_null( const char *attrname, const char *value, char *errorbuf, int or_zero_length ) { if ( NULL == value || ( or_zero_length && *value == '\0' )) { - PR_snprintf( errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, "%s: NULL value", - attrname ); + PR_snprintf( errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, + "%s: deleting the value is not allowed.", attrname ); return 1; } @@ -2721,7 +3038,6 @@ config_set_security( const char *attrname, char *value, char *errorbuf, int appl return retVal; } - static int config_set_onoff ( const char *attrname, char *value, int *configvalue, char *errorbuf, int apply ) @@ -2734,7 +3050,10 @@ config_set_onoff ( const char *attrname, char *value, int *configvalue, } if ( strcasecmp ( value, "on" ) != 0 && - strcasecmp ( value, "off") != 0 ) { + strcasecmp ( value, "off") != 0 && + /* initializing the value */ + (*(int *)value != LDAP_ON) && + (*(int *)value != LDAP_OFF)) { PR_snprintf ( errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, "%s: invalid value \"%s\". Valid values are \"on\" or \"off\".", attrname, value ); @@ -2750,9 +3069,10 @@ config_set_onoff ( const char *attrname, char *value, int *configvalue, if ( strcasecmp ( value, "on" ) == 0 ) { *configvalue = LDAP_ON; - } - else if ( strcasecmp ( value, "off" ) == 0 ) { + } else if ( strcasecmp ( value, "off" ) == 0 ) { *configvalue = LDAP_OFF; + } else { + *configvalue = *(int *)value; } CFG_UNLOCK_WRITE(slapdFrontendConfig); @@ -3691,7 +4011,8 @@ config_set_errorlog_level( const char *attrname, char *value, char *errorbuf, in int -config_set_accesslog_level( const char *attrname, char *value, char *errorbuf, int apply ) { +config_set_accesslog_level( const char *attrname, char *value, char *errorbuf, int apply ) +{ int retVal = LDAP_SUCCESS; long level = 0; char *endp = NULL; @@ -4805,7 +5126,7 @@ config_get_accesslog(){ } char * -config_get_errorlog( ){ +config_get_errorlog(){ slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); char *retVal; @@ -6284,10 +6605,10 @@ config_set(const char *attr, struct berval **values, char *errorbuf, int apply) if ((NULL == values) && config_allowed_to_delete_attrs(cgas->attr_name)) { if (cgas->setfunc) { - retval = (cgas->setfunc)(cgas->attr_name, NULL, + retval = (cgas->setfunc)(cgas->attr_name, cgas->initvalue, errorbuf, apply); } else if (cgas->logsetfunc) { - retval = (cgas->logsetfunc)(cgas->attr_name, NULL, + retval = (cgas->logsetfunc)(cgas->attr_name, cgas->initvalue, cgas->whichlog, errorbuf, apply); } else { LDAPDebug1Arg(LDAP_DEBUG_ANY,
0
8895fc4b4b7f62963a0f52b05c76767cbc4fba74
389ds/389-ds-base
Issue 50992 - Bump jemalloc version and enable profiling Description: jemalloc 5.2.1 release introduced a number of fixes. https://github.com/jemalloc/jemalloc/releases/tag/5.2.1 Additionally: * Override default page and hugepage sizes, because builder machines may not match the target systems. * Enable profiling by default (--enable-perf), so it can be used for troubleshooting. Fixes: https://pagure.io/389-ds-base/issue/50992 Reviewed by: mreynolds (Thanks!)
commit 8895fc4b4b7f62963a0f52b05c76767cbc4fba74 Author: Viktor Ashirov <[email protected]> Date: Sat May 2 20:58:21 2020 +0200 Issue 50992 - Bump jemalloc version and enable profiling Description: jemalloc 5.2.1 release introduced a number of fixes. https://github.com/jemalloc/jemalloc/releases/tag/5.2.1 Additionally: * Override default page and hugepage sizes, because builder machines may not match the target systems. * Enable profiling by default (--enable-perf), so it can be used for troubleshooting. Fixes: https://pagure.io/389-ds-base/issue/50992 Reviewed by: mreynolds (Thanks!) diff --git a/rpm/389-ds-base.spec.in b/rpm/389-ds-base.spec.in index b9f85489b..5f8c3dc1a 100644 --- a/rpm/389-ds-base.spec.in +++ b/rpm/389-ds-base.spec.in @@ -4,7 +4,7 @@ %global bundle_jemalloc __BUNDLE_JEMALLOC__ %if %{bundle_jemalloc} %global jemalloc_name jemalloc -%global jemalloc_ver 5.2.0 +%global jemalloc_ver 5.2.1 %endif # This is used in certain builds to help us know if it has extra features. @@ -367,12 +367,30 @@ LEGACY_FLAGS="--disable-legacy --disable-perl" %endif %if %{bundle_jemalloc} +# Override page size, bz #1545539 +# 4K +%ifarch %ix86 %arm x86_64 s390x +%define lg_page --with-lg-page=12 +%endif + +# 64K +%ifarch ppc64 ppc64le aarch64 +%define lg_page --with-lg-page=16 +%endif + +# Override huge page size on aarch64 +# 2M instead of 512M +%ifarch aarch64 +%define lg_hugepage --with-lg-hugepage=21 +%endif + # Build jemalloc pushd ../%{jemalloc_name}-%{jemalloc_ver} %configure \ --libdir=%{_libdir}/%{pkgname}/lib \ - --bindir=%{_libdir}/%{pkgname}/bin -make + --bindir=%{_libdir}/%{pkgname}/bin \ + --enable-prof %{lg_page} %{lg_hugepage} +make %{?_smp_mflags} popd %endif diff --git a/wrappers/systemd.template.service.custom.conf.in b/wrappers/systemd.template.service.custom.conf.in index fd10fe167..1c9241095 100644 --- a/wrappers/systemd.template.service.custom.conf.in +++ b/wrappers/systemd.template.service.custom.conf.in @@ -51,3 +51,7 @@ TimeoutStopSec=600 # Preload jemalloc Environment=LD_PRELOAD=@libdir@/@package_name@/lib/libjemalloc.so.2 + +# Uncomment to enable leak checking using jemalloc's heap profiler +# https://github.com/jemalloc/jemalloc/wiki/Use-Case%3A-Leak-Checking +#Environment=MALLOC_CONF=prof_leak:true,lg_prof_sample:0,prof_final:true,prof_prefix:/var/run/dirsrv/jeprof
0
e034c291ddfaed6c7ae5b2d30d16cfff5d191c1a
389ds/389-ds-base
Issue 49395 - Set the default TLS version min to TLS1.2 Description: On fedora the NSS default minimum is still TLS1.0, we need to force the default min to be TLS1.2 unless explicity set using sslVersionMin in cn=encryption,cn=config entry. This is also to comply with our healthcheck tool that complains about TLS1.0 min setting. relates: https://pagure.io/389-ds-base/issue/49395 Reviewed by: firstyear (Thanks!)
commit e034c291ddfaed6c7ae5b2d30d16cfff5d191c1a Author: Mark Reynolds <[email protected]> Date: Thu Jan 16 10:26:29 2020 -0500 Issue 49395 - Set the default TLS version min to TLS1.2 Description: On fedora the NSS default minimum is still TLS1.0, we need to force the default min to be TLS1.2 unless explicity set using sslVersionMin in cn=encryption,cn=config entry. This is also to comply with our healthcheck tool that complains about TLS1.0 min setting. relates: https://pagure.io/389-ds-base/issue/49395 Reviewed by: firstyear (Thanks!) diff --git a/ldap/servers/slapd/ssl.c b/ldap/servers/slapd/ssl.c index 6a07f1ab0..8365b2936 100644 --- a/ldap/servers/slapd/ssl.c +++ b/ldap/servers/slapd/ssl.c @@ -48,8 +48,8 @@ * sslVersionMax: max ssl version supported by NSS ******************************************************************************/ -#define DEFVERSION "TLS1.0" -#define CURRENT_DEFAULT_SSL_VERSION SSL_LIBRARY_VERSION_TLS_1_0 +#define DEFVERSION "TLS1.2" +#define CURRENT_DEFAULT_SSL_VERSION SSL_LIBRARY_VERSION_TLS_1_2 extern char *slapd_SSL3ciphers; extern symbol_t supported_ciphers[]; @@ -151,7 +151,7 @@ PRBool enableSSL2 = PR_FALSE; PRBool enableSSL3 = PR_FALSE; /* * nsTLS1: on -- enable TLS1 by default. - * Corresonding to SSL_LIBRARY_VERSION_TLS_1_0 and greater. + * Corresonding to SSL_LIBRARY_VERSION_TLS_1_2 and greater. */ PRBool enableTLS1 = PR_TRUE; @@ -1780,7 +1780,11 @@ slapd_ssl_init2(PRFileDesc **fd, int startTLS) } val = slapi_entry_attr_get_ref(e, "sslVersionMin"); if (val) { + /* Use the user defined minimum */ (void)set_NSS_version((char *)val, &NSSVersionMin, 1); + } else { + /* Force our default minimum */ + (void)set_NSS_version(DEFVERSION, &NSSVersionMin, 1); } val = slapi_entry_attr_get_ref(e, "sslVersionMax"); if (val) {
0
ec3d1f1e4c279a4d30bb05ab5d61b6cd97586090
389ds/389-ds-base
Resolves: 443241 Summary: Fixed issues with cleanup task not adding indirect memberships.
commit ec3d1f1e4c279a4d30bb05ab5d61b6cd97586090 Author: Nathan Kinder <[email protected]> Date: Mon Jun 9 21:43:59 2008 +0000 Resolves: 443241 Summary: Fixed issues with cleanup task not adding indirect memberships. diff --git a/ldap/servers/plugins/memberof/memberof.c b/ldap/servers/plugins/memberof/memberof.c index c9341571b..d435aec80 100644 --- a/ldap/servers/plugins/memberof/memberof.c +++ b/ldap/servers/plugins/memberof/memberof.c @@ -2188,9 +2188,9 @@ int memberof_fix_memberof(char *dn, char *filter_str) /* memberof_fix_memberof_callback() * Add initial and/or fix up broken group list in entry * - * 1. Make sure direct membership groups are in the entry - * 2. Add all groups that current group list allows through nested membership - * 3. Trim groups that have no relationship to entry + * 1. Remove all present memberOf values + * 2. Add direct group membership memberOf values + * 3. Add indirect group membership memberOf values */ int memberof_fix_memberof_callback(Slapi_Entry *e, void *callback_data) { @@ -2198,14 +2198,12 @@ int memberof_fix_memberof_callback(Slapi_Entry *e, void *callback_data) char *dn = slapi_entry_get_dn(e); memberof_add_groups data = {dn, dn}; - /* step 1. and step 2. */ + /* step 1 */ + slapi_entry_attr_delete(e, MEMBEROF_ATTR); + + /* step 2 and 3 */ rc = memberof_call_foreach_dn(0, dn, MEMBEROF_GROUP_ATTR, memberof_add_groups_search_callback, &data); - if(0 == rc) - { - /* step 3. */ - rc = memberof_test_membership_callback(e, 0); - } return rc; }
0
07b678dc92b801c59e31ef1755a955a67376c5ef
389ds/389-ds-base
Issue 1795 - RFE - Enable logging for libldap and libber in error log (#4481) Description: Libraries like libldap, libber do error and debug logging, but it is not available in the DS logs. Provide a way to enable the third party logging in DS. Add nsslapd-external-libs-debug-enabled attribute to 'cn=config' which will enable all of the levels available in libldap and libber. The setting should be used only for debugging purposes as it prints all of the operations with great verbosity. The code for log_external_libs_debug_print() and log_external_libs_debug_set_log_fn() functions are provided by a former Red Hat employee - Ludwig Krispenz. Fixes: #1795 Reviewed by: @Firstyear and @tbordaz (Thanks!)
commit 07b678dc92b801c59e31ef1755a955a67376c5ef Author: Simon Pichugin <[email protected]> Date: Mon Dec 14 21:13:45 2020 +0100 Issue 1795 - RFE - Enable logging for libldap and libber in error log (#4481) Description: Libraries like libldap, libber do error and debug logging, but it is not available in the DS logs. Provide a way to enable the third party logging in DS. Add nsslapd-external-libs-debug-enabled attribute to 'cn=config' which will enable all of the levels available in libldap and libber. The setting should be used only for debugging purposes as it prints all of the operations with great verbosity. The code for log_external_libs_debug_print() and log_external_libs_debug_set_log_fn() functions are provided by a former Red Hat employee - Ludwig Krispenz. Fixes: #1795 Reviewed by: @Firstyear and @tbordaz (Thanks!) diff --git a/dirsrvtests/tests/suites/disk_monitoring/disk_monitoring_test.py b/dirsrvtests/tests/suites/disk_monitoring/disk_monitoring_test.py index d433bafa1..49e5b1cdc 100644 --- a/dirsrvtests/tests/suites/disk_monitoring/disk_monitoring_test.py +++ b/dirsrvtests/tests/suites/disk_monitoring/disk_monitoring_test.py @@ -137,6 +137,40 @@ def test_verify_operation_when_disk_monitoring_is_off(topo, setup, reset_logs): os.remove('{}/foo1'.format(topo.standalone.ds_paths.log_dir)) +@disk_monitoring_ack +def test_enable_external_libs_debug_log(topo, setup, reset_logs): + """Check that OpenLDAP logs are successfully enabled and disabled when + disk threshold is reached + + :id: 121b2b24-ecba-48e2-9ee2-312d929dc8c6 + :setup: Standalone instance + :steps: 1. Set nsslapd-external-libs-debug-enabled to "on" + 2. Go straight below 1/2 of the threshold + 3. Verify that the external libs debug setting is disabled + 4. Go back above 1/2 of the threshold + 5. Verify that the external libs debug setting is enabled back + :expectedresults: 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + """ + try: + # Verify that verbose logging was set to default level + assert topo.standalone.config.set('nsslapd-disk-monitoring', 'on') + assert topo.standalone.config.set('nsslapd-disk-monitoring-logging-critical', 'off') + assert topo.standalone.config.set('nsslapd-external-libs-debug-enabled', 'on') + assert topo.standalone.config.set('nsslapd-errorlog-level', '8') + topo.standalone.restart() + subprocess.call(['dd', 'if=/dev/zero', 'of={}/foo'.format(topo.standalone.ds_paths.log_dir), 'bs=1M', 'count={}'.format(HALF_THR_FILL_SIZE)]) + # Verify that logging is disabled + _withouterrorlog(topo, "topo.standalone.config.get_attr_val_utf8('nsslapd-external-libs-debug-enabled') != 'off'", 31) + finally: + os.remove('{}/foo'.format(topo.standalone.ds_paths.log_dir)) + _withouterrorlog(topo, "topo.standalone.config.get_attr_val_utf8('nsslapd-external-libs-debug-enabled') != 'on'", 31) + assert topo.standalone.config.set('nsslapd-external-libs-debug-enabled', 'off') + + @disk_monitoring_ack def test_free_up_the_disk_space_and_change_ds_config(topo, setup, reset_logs): """Free up the disk space and change DS config @@ -734,3 +768,4 @@ def test_valid_operations_are_permitted(topo, setup, reset_logs): if __name__ == '__main__': CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s -v %s" % CURRENT_FILE) + diff --git a/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py b/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py index b02b5806a..7839ef581 100644 --- a/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py +++ b/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py @@ -16,7 +16,7 @@ from lib389.plugins import AutoMembershipPlugin, ReferentialIntegrityPlugin, Aut from lib389.idm.user import UserAccounts from lib389.idm.group import Groups from lib389.idm.organizationalunit import OrganizationalUnits -from lib389._constants import DEFAULT_SUFFIX, LOG_ACCESS_LEVEL +from lib389._constants import DEFAULT_SUFFIX, LOG_ACCESS_LEVEL, PASSWORD from lib389.utils import ds_is_older, ds_is_newer import ldap import glob @@ -838,7 +838,7 @@ def test_etime_order_of_magnitude(topology_st, clean_access_logs, remove_users, # The result_str returned looks like : # For ds older than 1.4.3.8: [23/Apr/2020:06:06:14.366429900 -0400] conn=1 op=93 RESULT err=0 tag=101 nentries=30 etime=0.005723017 # For ds newer than 1.4.3.8: [21/Oct/2020:09:27:50.095209871 -0400] conn=1 op=96 RESULT err=0 tag=101 nentries=30 wtime=0.000412584 optime=0.005428971 etime=0.005836077 - + log.info('get the operation end time from the RESULT string') # Here we are getting the sec.nanosec part of the date, '14.366429900' in the above example end_time = (result_str.split()[0]).split(':')[3] @@ -1037,6 +1037,80 @@ def test_audit_log_rotate_and_check_string(topology_st, clean_access_logs, set_a assert count == 1 +def test_enable_external_libs_debug_log(topology_st): + """Check that OpenLDAP logs are successfully enabled and disabled + + :id: b04646e3-9a5e-45ae-ad81-2882c1daf23e + :setup: Standalone instance + :steps: 1. Create a user to bind on + 2. Set nsslapd-external-libs-debug-enabled to "on" + 3. Clean the error log + 4. Bind as the user to generate OpenLDAP output + 5. Restart the servers to flush the logs + 6. Check the error log for OpenLDAP debug log + 7. Set nsslapd-external-libs-debug-enabled to "on" + 8. Clean the error log + 9. Bind as the user to generate OpenLDAP output + 10. Restart the servers to flush the logs + 11. Check the error log for OpenLDAP debug log + :expectedresults: 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Logs are present + 7. Success + 8. Success + 9. Success + 10. Success + 11. No logs are present + """ + + standalone = topology_st.standalone + + log.info('Create a user to bind on') + users = UserAccounts(standalone, DEFAULT_SUFFIX) + user = users.ensure_state(properties={ + 'uid': 'test_audit_log', + 'cn': 'test', + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '1000', + 'homeDirectory': '/home/test', + 'userPassword': PASSWORD + }) + + log.info('Set nsslapd-external-libs-debug-enabled to "on"') + standalone.config.set('nsslapd-external-libs-debug-enabled', 'on') + + log.info('Clean the error log') + standalone.deleteErrorLogs() + + log.info('Bind as the user to generate OpenLDAP output') + user.bind(PASSWORD) + + log.info('Restart the servers to flush the logs') + standalone.restart() + + log.info('Check the error log for OpenLDAP debug log') + assert standalone.ds_error_log.match('.*libldap/libber.*') + + log.info('Set nsslapd-external-libs-debug-enabled to "off"') + standalone.config.set('nsslapd-external-libs-debug-enabled', 'off') + + log.info('Clean the error log') + standalone.deleteErrorLogs() + + log.info('Bind as the user to generate OpenLDAP output') + user.bind(PASSWORD) + + log.info('Restart the servers to flush the logs') + standalone.restart() + + log.info('Check the error log for OpenLDAP debug log') + assert not standalone.ds_error_log.match('.*libldap/libber.*') + + if __name__ == '__main__': # Run isolated # -s for DEBUG mode diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c index 0b43c538b..ebc60c229 100644 --- a/ldap/servers/slapd/daemon.c +++ b/ldap/servers/slapd/daemon.c @@ -409,6 +409,7 @@ disk_monitoring_thread(void *nothing __attribute__((unused))) int using_accesslog = 0; int using_auditlog = 0; int using_auditfaillog = 0; + int using_external_libs_debug = 0; int logs_disabled = 0; int grace_period = 0; int first_pass = 1; @@ -421,6 +422,8 @@ disk_monitoring_thread(void *nothing __attribute__((unused))) Slapi_Backend *be_list[BE_LIST_SIZE + 1] = {0}; while (!g_get_shutdown()) { + char errorbuf[SLAPI_DSE_RETURNTEXT_SIZE]; + if (!first_pass) { struct timespec current_time = {0}; @@ -458,6 +461,9 @@ disk_monitoring_thread(void *nothing __attribute__((unused))) if (config_get_accesslog_logging_enabled()) { using_accesslog = 1; } + if (config_get_external_libs_debug_enabled()) { + using_external_libs_debug = 1; + } /* * Check the disk space. Always refresh the list, as backends can be added */ @@ -493,6 +499,13 @@ disk_monitoring_thread(void *nothing __attribute__((unused))) if (using_auditfaillog) { config_set_auditfaillog_enabled(LOGGING_ON); } + if (using_external_libs_debug) { + if (config_set_external_libs_debug_enabled(CONFIG_EXTERNAL_LIBS_DEBUG_ENABLED, + "on", errorbuf, CONFIG_APPLY) != LDAP_SUCCESS) { + slapi_log_err(SLAPI_LOG_ERR, "disk_monitoring_thread - setting on: %s: %s\n", + CONFIG_EXTERNAL_LIBS_DEBUG_ENABLED, errorbuf); + } + } } else { slapi_log_err(SLAPI_LOG_INFO, "disk_monitoring_thread", "Disk space is now within acceptable levels.\n"); } @@ -573,6 +586,12 @@ disk_monitoring_thread(void *nothing __attribute__((unused))) config_set_accesslog_enabled(LOGGING_OFF); config_set_auditlog_enabled(LOGGING_OFF); config_set_auditfaillog_enabled(LOGGING_OFF); + if (config_set_external_libs_debug_enabled(CONFIG_EXTERNAL_LIBS_DEBUG_ENABLED, + "off", errorbuf, CONFIG_APPLY) != LDAP_SUCCESS) { + slapi_log_err(SLAPI_LOG_ERR, "disk_monitoring_thread - setting off: %s: %s\n", + CONFIG_EXTERNAL_LIBS_DEBUG_ENABLED, errorbuf); + } + logs_disabled = 1; continue; } @@ -651,6 +670,13 @@ disk_monitoring_thread(void *nothing __attribute__((unused))) if (logs_disabled && using_auditfaillog) { config_set_auditfaillog_enabled(LOGGING_ON); } + if (logs_disabled && using_external_libs_debug) { + if (config_set_external_libs_debug_enabled(CONFIG_EXTERNAL_LIBS_DEBUG_ENABLED, + "on", errorbuf, CONFIG_APPLY) != LDAP_SUCCESS) { + slapi_log_err(SLAPI_LOG_ERR, "disk_monitoring_thread - setting on: %s: %s\n", + CONFIG_EXTERNAL_LIBS_DEBUG_ENABLED, errorbuf); + } + } deleted_rotated_logs = 0; passed_threshold = 0; logs_disabled = 0; diff --git a/ldap/servers/slapd/libglobs.c b/ldap/servers/slapd/libglobs.c index 230db88e2..d62211019 100644 --- a/ldap/servers/slapd/libglobs.c +++ b/ldap/servers/slapd/libglobs.c @@ -183,6 +183,7 @@ slapi_onoff_t init_auditlog_rotationsync_enabled; slapi_onoff_t init_auditfaillog_rotationsync_enabled; slapi_onoff_t init_accesslog_logging_enabled; slapi_onoff_t init_accesslogbuffering; +slapi_onoff_t init_external_libs_debug_enabled; slapi_onoff_t init_errorlog_logging_enabled; slapi_onoff_t init_auditlog_logging_enabled; slapi_onoff_t init_auditlog_logging_hide_unhashed_pw; @@ -561,6 +562,11 @@ static struct config_get_and_set NULL, 0, (void **)&global_slapdFrontendConfig.errorlog, CONFIG_STRING_OR_EMPTY, NULL, NULL, NULL /* deletion is not allowed */}, + {CONFIG_EXTERNAL_LIBS_DEBUG_ENABLED, config_set_external_libs_debug_enabled, + NULL, 0, + (void **)&global_slapdFrontendConfig.external_libs_debug_enabled, + CONFIG_ON_OFF, (ConfigGetFunc)config_get_external_libs_debug_enabled, + &init_external_libs_debug_enabled, NULL}, {CONFIG_AUDITLOG_LOGEXPIRATIONTIME_ATTRIBUTE, NULL, log_set_expirationtime, SLAPD_AUDIT_LOG, (void **)&global_slapdFrontendConfig.auditlog_exptime, @@ -1712,6 +1718,7 @@ FrontendConfig_init(void) init_csnlogging = cfg->csnlogging = LDAP_ON; init_errorlog_logging_enabled = cfg->errorlog_logging_enabled = LDAP_ON; + init_external_libs_debug_enabled = cfg->external_libs_debug_enabled = LDAP_OFF; cfg->errorlog_mode = slapi_ch_strdup(SLAPD_INIT_LOG_MODE); cfg->errorlog_maxnumlogs = SLAPD_DEFAULT_LOG_MAXNUMLOGS; cfg->errorlog_maxlogsize = SLAPD_DEFAULT_LOG_MAXLOGSIZE; @@ -6108,6 +6115,13 @@ config_get_errorlog() return retVal; } +int32_t +config_get_external_libs_debug_enabled() +{ + slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); + return slapi_atomic_load_32(&(slapdFrontendConfig->external_libs_debug_enabled), __ATOMIC_ACQUIRE); +} + char * config_get_auditlog() { @@ -8437,6 +8451,28 @@ config_set_entry(Slapi_Entry *e) return 1; } + +int +config_set_external_libs_debug_enabled(const char *attrname, char *value, char *errorbuf, int apply) +{ + int32_t retVal = LDAP_SUCCESS; + int32_t dbglvl = 0; /* no debugging */ + slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); + + retVal = config_set_onoff(attrname, value, &(slapdFrontendConfig->external_libs_debug_enabled), + errorbuf, apply); + if (retVal == LDAP_SUCCESS && strcasecmp(value, "on") == 0) { + dbglvl = -1; /* all debug levels */ + } else if (retVal == LDAP_SUCCESS && strcasecmp(value, "off") == 0) { + dbglvl = 0; + } else { + return retVal; + } + ber_set_option(NULL, LBER_OPT_DEBUG_LEVEL, &dbglvl); + ldap_set_option(NULL, LDAP_OPT_DEBUG_LEVEL, &dbglvl); + return retVal; +} + void config_set_accesslog_enabled(int value) { diff --git a/ldap/servers/slapd/log.c b/ldap/servers/slapd/log.c index f4f52b752..3837d3a99 100644 --- a/ldap/servers/slapd/log.c +++ b/ldap/servers/slapd/log.c @@ -119,6 +119,7 @@ static void log_write_title(LOGFD fp); static void log__error_emergency(const char *errstr, int reopen, int locked); static void vslapd_log_emergency_error(LOGFD fp, const char *msg, int locked); static int get_syslog_loglevel(int loglevel); +static void log_external_libs_debug_openldap_print(char *buffer); static int get_syslog_loglevel(int loglevel) @@ -441,6 +442,23 @@ log_set_logging(const char *attrname, char *value, int logtype, char *errorbuf, return LDAP_SUCCESS; } +static void +log_external_libs_debug_openldap_print(char *buffer) +{ + slapi_log_error(SLAPI_LOG_WARNING, "libldap/libber", "%s", buffer); +} + +int +log_external_libs_debug_set_log_fn(void) +{ + int rc = ber_set_option(NULL, LBER_OPT_LOG_PRINT_FN, log_external_libs_debug_openldap_print); + if (rc != LBER_OPT_SUCCESS) { + slapi_log_error(SLAPI_LOG_WARNING, "libldap/libber", + "Failed to init Log Function, err = %d\n", rc); + } + return rc; +} + int log_set_backend(const char *attrname __attribute__((unused)), char *value, int logtype __attribute__((unused)), char *errorbuf __attribute__((unused)), int apply) { diff --git a/ldap/servers/slapd/main.c b/ldap/servers/slapd/main.c index 012cf5236..88313f891 100644 --- a/ldap/servers/slapd/main.c +++ b/ldap/servers/slapd/main.c @@ -542,6 +542,9 @@ main(int argc, char **argv) #endif #endif + /* Set third party libs function before we init config so we see the logs earlier */ + log_external_libs_debug_set_log_fn(); + /* * Initialize NSPR very early. NSPR supports implicit initialization, * but it is not bulletproof -- so it is better to be explicit. diff --git a/ldap/servers/slapd/proto-slap.h b/ldap/servers/slapd/proto-slap.h index 254115f9c..ce77c607e 100644 --- a/ldap/servers/slapd/proto-slap.h +++ b/ldap/servers/slapd/proto-slap.h @@ -385,6 +385,7 @@ int config_set_disk_grace_period(const char *attrname, char *value, char *errorb int config_set_disk_logging_critical(const char *attrname, char *value, char *errorbuf, int apply); int config_set_auditlog_unhashed_pw(const char *attrname, char *value, char *errorbuf, int apply); int config_set_auditfaillog_unhashed_pw(const char *attrname, char *value, char *errorbuf, int apply); +int config_set_external_libs_debug_enabled(const char *attrname, char *value, char *errorbuf, int apply); int config_set_ndn_cache_enabled(const char *attrname, char *value, char *errorbuf, int apply); int config_set_ndn_cache_max_size(const char *attrname, char *value, char *errorbuf, int apply); int config_set_unhashed_pw_switch(const char *attrname, char *value, char *errorbuf, int apply); @@ -403,6 +404,7 @@ int config_set_localuser(const char *attrname, char *value, char *errorbuf, int int config_set_maxsimplepaged_per_conn(const char *attrname, char *value, char *errorbuf, int apply); +int log_external_libs_debug_set_log_fn(void); int log_set_backend(const char *attrname, char *value, int logtype, char *errorbuf, int apply); #ifdef HAVE_CLOCK_GETTIME @@ -544,6 +546,7 @@ void config_set_accesslog_enabled(int value); void config_set_auditlog_enabled(int value); void config_set_auditfaillog_enabled(int value); int config_get_accesslog_logging_enabled(void); +int config_get_external_libs_debug_enabled(void); int config_get_disk_monitoring(void); int config_get_disk_threshold_readonly(void); uint64_t config_get_disk_threshold(void); diff --git a/ldap/servers/slapd/slap.h b/ldap/servers/slapd/slap.h index e850b8b29..c2da81ead 100644 --- a/ldap/servers/slapd/slap.h +++ b/ldap/servers/slapd/slap.h @@ -2099,6 +2099,7 @@ typedef struct _slapdEntryPoints #define CONFIG_AUDITFAILLOG_LOGEXPIRATIONTIMEUNIT_ATTRIBUTE "nsslapd-auditfaillog-logexpirationtimeunit" #define CONFIG_ACCESSLOG_LOGGING_ENABLED_ATTRIBUTE "nsslapd-accesslog-logging-enabled" #define CONFIG_ERRORLOG_LOGGING_ENABLED_ATTRIBUTE "nsslapd-errorlog-logging-enabled" +#define CONFIG_EXTERNAL_LIBS_DEBUG_ENABLED "nsslapd-external-libs-debug-enabled" #define CONFIG_AUDITLOG_LOGGING_ENABLED_ATTRIBUTE "nsslapd-auditlog-logging-enabled" #define CONFIG_AUDITFAILLOG_LOGGING_ENABLED_ATTRIBUTE "nsslapd-auditfaillog-logging-enabled" #define CONFIG_AUDITLOG_LOGGING_HIDE_UNHASHED_PW "nsslapd-auditlog-logging-hide-unhashed-pw" @@ -2429,6 +2430,7 @@ typedef struct _slapdFrontendConfig int errorlog_exptime; char *errorlog_exptimeunit; int errorloglevel; + slapi_onoff_t external_libs_debug_enabled; /* AUDIT LOG */ char *auditlog; /* replication audit file */
0
c097f15b940d4192bc05f1b43cdca35697f96fd3
389ds/389-ds-base
Ticket #48048 - Fix coverity issues - 2015/2/24 Coverity defect 13071 - Explicit null dereferenced (FORWARD_NULL) Description: Added NULL check for mrINDEX. modified: matchrule_values_to_keys in matchrule.c
commit c097f15b940d4192bc05f1b43cdca35697f96fd3 Author: Noriko Hosoi <[email protected]> Date: Tue Feb 24 12:58:57 2015 -0800 Ticket #48048 - Fix coverity issues - 2015/2/24 Coverity defect 13071 - Explicit null dereferenced (FORWARD_NULL) Description: Added NULL check for mrINDEX. modified: matchrule_values_to_keys in matchrule.c diff --git a/ldap/servers/slapd/back-ldbm/matchrule.c b/ldap/servers/slapd/back-ldbm/matchrule.c index 7af42f451..6de6461ee 100644 --- a/ldap/servers/slapd/back-ldbm/matchrule.c +++ b/ldap/servers/slapd/back-ldbm/matchrule.c @@ -155,9 +155,13 @@ matchrule_values_to_keys(Slapi_PBlock *pb,struct berval **input_values,struct be slapi_pblock_get (pb, SLAPI_PLUGIN_MR_INDEX_FN, &mrINDEX); slapi_pblock_set (pb, SLAPI_PLUGIN_MR_VALUES, input_values); - mrINDEX (pb); - slapi_pblock_get (pb, SLAPI_PLUGIN_MR_KEYS, output_values); - return 0; + if (mrINDEX) { + mrINDEX (pb); + slapi_pblock_get (pb, SLAPI_PLUGIN_MR_KEYS, output_values); + return LDAP_SUCCESS; + } else { + return LDAP_OPERATIONS_ERROR; + } } /*
0
70c61f6987df094060c558163eb7fc4dc3f31138
389ds/389-ds-base
Bump github contianer shm size to 4 gigs
commit 70c61f6987df094060c558163eb7fc4dc3f31138 Author: Mark Reynolds <[email protected]> Date: Tue Oct 19 08:50:42 2021 -0400 Bump github contianer shm size to 4 gigs diff --git a/.github/workflows/pytest.yml b/.github/workflows/pytest.yml index cf3880128..6eaf759f6 100644 --- a/.github/workflows/pytest.yml +++ b/.github/workflows/pytest.yml @@ -64,7 +64,7 @@ jobs: - name: Run pytest in a container run: | set -x - CID=$(sudo docker run -d -h server.example.com --privileged --rm -v /sys/fs/cgroup:/sys/fs/cgroup:rw,rslave -v ${PWD}:/workspace quay.io/389ds/ci-images:test) + CID=$(sudo docker run -d -h server.example.com --privileged --rm --shm-size=4gb -v /sys/fs/cgroup:/sys/fs/cgroup:rw,rslave -v ${PWD}:/workspace quay.io/389ds/ci-images:test) sudo docker exec $CID sh -c "dnf install -y -v dist/rpms/*rpm" sudo docker exec $CID py.test --suppress-no-test-exit-code -m "not flaky" --junit-xml=pytest.xml --html=pytest.html -v dirsrvtests/tests/suites/${{ matrix.suite }} - name: Make the results file readable by all
0
7d0689aaadfa66a8f8a481b0c1bb70b2465c4986
389ds/389-ds-base
Ticket #48214 - ldapsearch on nsslapd-maxbersize returns 0 instead of current value Description: If nsslapd-maxbersize is not explicitely set in cn=config or the value is 0, the default value is assigned. Internally, it was. But ldapsearch did not return the default value. https://fedorahosted.org/389/ticket/48214 Reviewed by [email protected] (Thank you, Rich!)
commit 7d0689aaadfa66a8f8a481b0c1bb70b2465c4986 Author: Noriko Hosoi <[email protected]> Date: Thu Jul 2 16:44:09 2015 -0700 Ticket #48214 - ldapsearch on nsslapd-maxbersize returns 0 instead of current value Description: If nsslapd-maxbersize is not explicitely set in cn=config or the value is 0, the default value is assigned. Internally, it was. But ldapsearch did not return the default value. https://fedorahosted.org/389/ticket/48214 Reviewed by [email protected] (Thank you, Rich!) diff --git a/ldap/servers/slapd/libglobs.c b/ldap/servers/slapd/libglobs.c index 24de4f367..a3c4243af 100644 --- a/ldap/servers/slapd/libglobs.c +++ b/ldap/servers/slapd/libglobs.c @@ -821,7 +821,7 @@ static struct config_get_and_set { {CONFIG_MAXBERSIZE_ATTRIBUTE, config_set_maxbersize, NULL, 0, (void**)&global_slapdFrontendConfig.maxbersize, - CONFIG_INT, NULL, DEFAULT_MAX_BERSIZE}, + CONFIG_INT, NULL, STRINGIFYDEFINE(DEFAULT_MAXBERSIZE)}, {CONFIG_MAXSASLIOSIZE_ATTRIBUTE, config_set_maxsasliosize, NULL, 0, (void**)&global_slapdFrontendConfig.maxsasliosize, @@ -1540,6 +1540,7 @@ FrontendConfig_init () { init_cn_uses_dn_syntax_in_dns = cfg->cn_uses_dn_syntax_in_dns = LDAP_OFF; init_global_backend_local = LDAP_OFF; cfg->maxsimplepaged_per_conn = DEFAULT_MAXSIMPLEPAGED_PER_CONN; + cfg->maxbersize = DEFAULT_MAXBERSIZE; #ifdef ENABLE_NUNC_STANS init_enable_nunc_stans = cfg->enable_nunc_stans = LDAP_OFF; #endif @@ -5713,6 +5714,9 @@ config_set_maxbersize( const char *attrname, char *value, char *errorbuf, int ap return retVal; } + if (size == 0) { + size = DEFAULT_MAXBERSIZE; + } CFG_LOCK_WRITE(slapdFrontendConfig); slapdFrontendConfig->maxbersize = size; @@ -5728,8 +5732,9 @@ config_get_maxbersize() slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); maxbersize = slapdFrontendConfig->maxbersize; - if(maxbersize==0) + if (maxbersize == 0) { maxbersize = DEFAULT_MAXBERSIZE; + } return maxbersize; }
0
59e45a75a8ba2995b5ddb33a42fc017ecf3d17a3
389ds/389-ds-base
add testcase for ticket 48366 - proxyauth for root
commit 59e45a75a8ba2995b5ddb33a42fc017ecf3d17a3 Author: Ludwig Krispenz <[email protected]> Date: Fri Jun 17 13:49:05 2016 +0200 add testcase for ticket 48366 - proxyauth for root diff --git a/dirsrvtests/tests/tickets/ticket48366_test.py b/dirsrvtests/tests/tickets/ticket48366_test.py new file mode 100644 index 000000000..fb2dd9775 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48366_test.py @@ -0,0 +1,214 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from ldap.controls.simple import ProxyAuthzControl + +log = logging.getLogger(__name__) + +installation_prefix = None + +PROXY_USER_DN = 'cn=proxy,ou=people,%s' % SUFFIX +TEST_USER_DN = 'cn=test,ou=people,%s' % SUFFIX +USER_PW = 'password' + + +# subtrees used in test +SUBTREE_GREEN = "ou=green,%s" % SUFFIX +SUBTREE_RED = "ou=red,%s" % SUFFIX +SUBTREES = (SUBTREE_GREEN, SUBTREE_RED) + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + [email protected](scope="module") +def topology(request): + global installation_prefix + + if installation_prefix: + args_instance[SER_DEPLOYED_DIR] = installation_prefix + + standalone = DirSrv(verbose=False) + + # Args for the standalone instance + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + + # Get the status of the instance and restart it if it exists + instance_standalone = standalone.exists() + + # Remove the instance + if instance_standalone: + standalone.delete() + + # Create the instance + standalone.create() + + # Used to retrieve configuration information (dbdir, confdir...) + standalone.open() + + # clear the tmp directory + standalone.clearTmpDir(__file__) + + # Here we have standalone instance up and running + return TopologyStandalone(standalone) + + +def test_ticket48366_init(topology): + """ + It creates identical entries in 3 subtrees + It creates aci which allow access to a set of attrs + in two of these subtrees for bound users + It creates a user to be used for test + + """ + + + topology.standalone.log.info("Add subtree: %s" % SUBTREE_GREEN) + topology.standalone.add_s(Entry((SUBTREE_GREEN, { + 'objectclass': "top organizationalunit".split(), + 'ou': "green_one"}))) + topology.standalone.log.info("Add subtree: %s" % SUBTREE_RED) + topology.standalone.add_s(Entry((SUBTREE_RED, { + 'objectclass': "top organizationalunit".split(), + 'ou': "red"}))) + + # add proxy user and test user + topology.standalone.log.info("Add %s" % TEST_USER_DN) + topology.standalone.add_s(Entry((TEST_USER_DN, { + 'objectclass': "top person".split(), + 'sn': 'test', + 'cn': 'test', + 'userpassword': USER_PW}))) + topology.standalone.log.info("Add %s" % PROXY_USER_DN) + topology.standalone.add_s(Entry((PROXY_USER_DN, { + 'objectclass': "top person".split(), + 'sn': 'proxy', + 'cn': 'proxy', + 'userpassword': USER_PW}))) + + # enable acl error logging + # mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '128')] + # topology.standalone.modify_s(DN_CONFIG, mod) + + # get rid of default ACIs + mod = [(ldap.MOD_DELETE, 'aci', None)] + topology.standalone.modify_s(SUFFIX, mod) + + # Ok Now add the proper ACIs + ACI_TARGET = "(target = \"ldap:///%s\")" % SUBTREE_GREEN + ACI_TARGETATTR = "(targetattr = \"objectclass || cn || sn || uid || givenname \")" + ACI_ALLOW = "(version 3.0; acl \"Allow search-read to green subtree\"; allow (read, search, compare)" + ACI_SUBJECT = " userdn = \"ldap:///%s\";)" % TEST_USER_DN + ACI_BODY = ACI_TARGET + ACI_TARGETATTR + ACI_ALLOW + ACI_SUBJECT + mod = [(ldap.MOD_ADD, 'aci', ACI_BODY)] + topology.standalone.modify_s(SUFFIX, mod) + + ACI_ALLOW = "(version 3.0; acl \"Allow use pf proxy auth to green subtree\"; allow (proxy)" + ACI_SUBJECT = " userdn = \"ldap:///%s\";)" % PROXY_USER_DN + ACI_BODY = ACI_TARGET + ACI_TARGETATTR + ACI_ALLOW + ACI_SUBJECT + mod = [(ldap.MOD_ADD, 'aci', ACI_BODY)] + topology.standalone.modify_s(SUFFIX, mod) + + log.info("Adding %d test entries...") + for id in range(2): + name = "%s%d" % ('test', id) + mail = "%[email protected]" % name + for subtree in SUBTREES: + topology.standalone.add_s(Entry(("cn=%s,%s" % (name, subtree), { + 'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'sn': name, + 'cn': name, + 'uid': name, + 'givenname': 'test', + 'mail': mail, + 'description': 'description', + 'employeenumber': "%d" % id, + 'telephonenumber': "%d%d%d" % (id,id,id), + 'mobile': "%d%d%d" % (id,id,id), + 'l': 'MV', + 'title': 'Engineer'}))) + + + +def test_ticket48366_search_user(topology): + + proxy_ctrl = ProxyAuthzControl(criticality=True, authzId="dn: "+TEST_USER_DN) + # searching as test user should return one entry from the green subtree + topology.standalone.simple_bind_s(TEST_USER_DN, PASSWORD) + ents = topology.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, 'uid=test1') + assert (len(ents) == 1) + + # searching as proxy user should return no entry + topology.standalone.simple_bind_s(PROXY_USER_DN, PASSWORD) + ents = topology.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, 'uid=test1') + assert (len(ents) == 0) + + # serching as proxy user, authorizing as test user should return 1 entry + ents = topology.standalone.search_ext_s(SUFFIX, ldap.SCOPE_SUBTREE, 'uid=test1', serverctrls=[proxy_ctrl]) + assert (len(ents) == 1) + +def test_ticket48366_search_dm(topology): + + # searching as directory manager should return one entries from both subtrees + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + ents = topology.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, 'uid=test1') + assert (len(ents) == 2) + + # searching as directory manager proxying test user should return one entry + proxy_ctrl = ProxyAuthzControl(criticality=True, authzId="dn: "+TEST_USER_DN) + ents = topology.standalone.search_ext_s(SUFFIX, ldap.SCOPE_SUBTREE, 'uid=test1', serverctrls=[proxy_ctrl]) + assert (len(ents) == 1) + + # searching as directory manager proxying proxy user should return no entry + proxy_ctrl = ProxyAuthzControl(criticality=True, authzId="dn: "+PROXY_USER_DN) + ents = topology.standalone.search_ext_s(SUFFIX, ldap.SCOPE_SUBTREE, 'uid=test1', serverctrls=[proxy_ctrl]) + assert (len(ents) == 0) + +def test_ticket48366_final(topology): + topology.standalone.delete() + log.info('Testcase PASSED') + + +def run_isolated(): + ''' + run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) + To run isolated without py.test, you need to + - edit this file and comment '@pytest.fixture' line before 'topology' function. + - set the installation prefix + - run this program + ''' + global installation_prefix + installation_prefix = None + + topo = topology(True) + test_ticket48366_init(topo) + + test_ticket48366_search_dm(topo) + + test_ticket48366_final(topo) + + +if __name__ == '__main__': + run_isolated() +
0
5601fe46a98a7c9318b94c34fed75320f118957e
389ds/389-ds-base
Ticket 48834 - Fix jenkins: discared qualifier on auditlog.c Bug Description: ldap/servers/slapd/auditlog.c:87:31: warning: assignment discards 'const' qualifier from pointer target type [-Wdiscarded-qualifiers] ldap/servers/slapd/auditlog.c:155:31: warning: assignment discards 'const' qualifier from pointer target type [-Wdiscarded-qualifiers] Fix Description: Add the const char type to the array. https://fedorahosted.org/389/ticket/48834 Author: wibrown Review by: One line fix
commit 5601fe46a98a7c9318b94c34fed75320f118957e Author: William Brown <[email protected]> Date: Thu May 19 13:52:24 2016 +1000 Ticket 48834 - Fix jenkins: discared qualifier on auditlog.c Bug Description: ldap/servers/slapd/auditlog.c:87:31: warning: assignment discards 'const' qualifier from pointer target type [-Wdiscarded-qualifiers] ldap/servers/slapd/auditlog.c:155:31: warning: assignment discards 'const' qualifier from pointer target type [-Wdiscarded-qualifiers] Fix Description: Add the const char type to the array. https://fedorahosted.org/389/ticket/48834 Author: wibrown Review by: One line fix diff --git a/ldap/servers/slapd/auditlog.c b/ldap/servers/slapd/auditlog.c index 9a1b5028f..0f4cc94ce 100644 --- a/ldap/servers/slapd/auditlog.c +++ b/ldap/servers/slapd/auditlog.c @@ -35,7 +35,7 @@ static int auditfail_hide_unhashed_pw = 1; /* Forward Declarations */ static void write_audit_file(int logtype, int optype, const char *dn, void *change, int flag, time_t curtime, int rc ); -static char *modrdn_changes[4]; +static const char *modrdn_changes[4]; void write_audit_log_entry( Slapi_PBlock *pb )
0
322fd7996aa810db1b51882d8d1103e11a36cc62
389ds/389-ds-base
Ticket #47550 logconv: failed logins: Use of uninitialized value in numeric comparison at logconv.pl line 949 https://fedorahosted.org/389/ticket/47550 Reviewed by: nhosoi (Thanks!) Branch: master Fix Description: Copy/paste error. Changed badPassword to badPasswordIp. Platforms tested: RHEL6 x86_64 Flag Day: no Doc impact: no
commit 322fd7996aa810db1b51882d8d1103e11a36cc62 Author: Rich Megginson <[email protected]> Date: Thu Oct 10 15:55:29 2013 -0600 Ticket #47550 logconv: failed logins: Use of uninitialized value in numeric comparison at logconv.pl line 949 https://fedorahosted.org/389/ticket/47550 Reviewed by: nhosoi (Thanks!) Branch: master Fix Description: Copy/paste error. Changed badPassword to badPasswordIp. Platforms tested: RHEL6 x86_64 Flag Day: no Doc impact: no diff --git a/ldap/admin/src/logconv.pl b/ldap/admin/src/logconv.pl index 33023455e..38a84af76 100755 --- a/ldap/admin/src/logconv.pl +++ b/ldap/admin/src/logconv.pl @@ -948,7 +948,7 @@ if ($verb eq "yes" || $usage =~ /f/ ){ } print "\nFrom the IP address(s) :\n\n"; $bpCount = 0; - foreach my $ip (sort {$badPassword{$b} <=> $badPassword{$a} } keys %badPasswordIp){ + foreach my $ip (sort {$badPasswordIp{$b} <=> $badPasswordIp{$a} } keys %badPasswordIp){ if ($bpCount > $sizeCount){ last;} $bpCount++; printf "%-4s %-16s\n", $badPasswordIp{$ip}, $ip;
0
57b990de0d863a2ac0942b618bde97cc0a84e6b8
389ds/389-ds-base
Issue 50355 - SSL version min and max not correctly applied Bug Description: Setting the sslVersionMin or SSLVersionMax was not correctly applied and the NSS default min and max became the valid range. Fix Description: Do not attempt to reset the requested range based off of hardcoded limits. Also removed obsolete SSL3 code, and fixed a minor memory leak in main.c found during ASAN testing. Relates: https://pagure.io/389-ds-base/issue/50355 ASAN approved Reviewed by: tbordaz(Thanks!)
commit 57b990de0d863a2ac0942b618bde97cc0a84e6b8 Author: Mark Reynolds <[email protected]> Date: Thu Jul 18 21:44:07 2019 -0400 Issue 50355 - SSL version min and max not correctly applied Bug Description: Setting the sslVersionMin or SSLVersionMax was not correctly applied and the NSS default min and max became the valid range. Fix Description: Do not attempt to reset the requested range based off of hardcoded limits. Also removed obsolete SSL3 code, and fixed a minor memory leak in main.c found during ASAN testing. Relates: https://pagure.io/389-ds-base/issue/50355 ASAN approved Reviewed by: tbordaz(Thanks!) diff --git a/dirsrvtests/tests/suites/tls/ssl_version_test.py b/dirsrvtests/tests/suites/tls/ssl_version_test.py new file mode 100644 index 000000000..acc8b230d --- /dev/null +++ b/dirsrvtests/tests/suites/tls/ssl_version_test.py @@ -0,0 +1,55 @@ +import logging +import pytest +import os +from lib389.config import Encryption +from lib389.topologies import topology_st as topo + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + + +def test_ssl_version_range(topo): + """Specify a test case purpose or name here + + :id: bc400f54-3966-49c8-b640-abbf4fb2377e + 1. Get current default range + 2. Set sslVersionMin and verify it is applied after a restart + 3. Set sslVersionMax and verify it is applied after a restart + :expectedresults: + 1. Success + 2. Success + 3. Success + """ + + topo.standalone.enable_tls() + enc = Encryption(topo.standalone) + default_min = enc.get_attr_val_utf8('sslVersionMin') + default_max = enc.get_attr_val_utf8('sslVersionMax') + log.info(f"default min: {default_min} max: {default_max}") + if DEBUGGING: + topo.standalone.config.set('nsslapd-auditlog-logging-enabled', 'on') + + # Test that setting the min version is applied after a restart + enc.replace('sslVersionMin', default_max) + enc.replace('sslVersionMax', default_max) + topo.standalone.restart() + min = enc.get_attr_val_utf8('sslVersionMin') + assert min == default_max + + # Test that setting the max version is applied after a restart + enc.replace('sslVersionMin', default_min) + enc.replace('sslVersionMax', default_min) + topo.standalone.restart() + max = enc.get_attr_val_utf8('sslVersionMax') + assert max == default_min + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main(["-s", CURRENT_FILE]) diff --git a/ldap/servers/slapd/main.c b/ldap/servers/slapd/main.c index 2c7b53214..8224cd06e 100644 --- a/ldap/servers/slapd/main.c +++ b/ldap/servers/slapd/main.c @@ -911,14 +911,13 @@ main(int argc, char **argv) slapi_ch_free_string(&securelistenhost); #if defined(ENABLE_LDAPI) - if (config_get_ldapi_switch() && - config_get_ldapi_filename() != 0) { + if (config_get_ldapi_switch() && slapdFrontendConfig->ldapi_filename != 0) { mcfg.i_port = ports_info.i_port = 1; /* flag ldapi as on */ ports_info.i_listenaddr = (PRNetAddr **)slapi_ch_calloc(2, sizeof(PRNetAddr *)); *ports_info.i_listenaddr = (PRNetAddr *)slapi_ch_calloc(1, sizeof(PRNetAddr)); (*ports_info.i_listenaddr)->local.family = PR_AF_LOCAL; PL_strncpyz((*ports_info.i_listenaddr)->local.path, - config_get_ldapi_filename(), + slapdFrontendConfig->ldapi_filename, sizeof((*ports_info.i_listenaddr)->local.path)); unlink((*ports_info.i_listenaddr)->local.path); } diff --git a/ldap/servers/slapd/ssl.c b/ldap/servers/slapd/ssl.c index 9d278148f..eef853a59 100644 --- a/ldap/servers/slapd/ssl.c +++ b/ldap/servers/slapd/ssl.c @@ -48,8 +48,8 @@ * sslVersionMax: max ssl version supported by NSS ******************************************************************************/ -#define DEFVERSION "TLS1.2" -#define CURRENT_DEFAULT_SSL_VERSION SSL_LIBRARY_VERSION_TLS_1_2 +#define DEFVERSION "TLS1.0" +#define CURRENT_DEFAULT_SSL_VERSION SSL_LIBRARY_VERSION_TLS_1_0 extern char *slapd_SSL3ciphers; extern symbol_t supported_ciphers[]; @@ -137,75 +137,6 @@ typedef struct static cipherstruct *_conf_ciphers = NULL; static void _conf_init_ciphers(void); -/* - * This lookup table is for supporting the old cipher name. - * Once swtiching to the NSS cipherSuiteName is done, - * this lookup_cipher table can be removed. - */ -typedef struct -{ - char *alias; - char *name; -} lookup_cipher; -static lookup_cipher _lookup_cipher[] = { - {"rc4", "SSL_CK_RC4_128_WITH_MD5"}, - {"rc4export", "SSL_CK_RC4_128_EXPORT40_WITH_MD5"}, - {"rc2", "SSL_CK_RC2_128_CBC_WITH_MD5"}, - {"rc2export", "SSL_CK_RC2_128_CBC_EXPORT40_WITH_MD5"}, - /*{"idea", "SSL_EN_IDEA_128_CBC_WITH_MD5"}, */ - {"des", "SSL_CK_DES_64_CBC_WITH_MD5"}, - {"desede3", "SSL_CK_DES_192_EDE3_CBC_WITH_MD5"}, - {"rsa_rc4_128_md5", "TLS_RSA_WITH_RC4_128_MD5"}, - {"rsa_rc4_128_sha", "TLS_RSA_WITH_RC4_128_SHA"}, - {"rsa_3des_sha", "TLS_RSA_WITH_3DES_EDE_CBC_SHA"}, - {"tls_rsa_3des_sha", "TLS_RSA_WITH_3DES_EDE_CBC_SHA"}, - {"rsa_fips_3des_sha", "SSL_RSA_FIPS_WITH_3DES_EDE_CBC_SHA"}, - {"fips_3des_sha", "SSL_RSA_FIPS_WITH_3DES_EDE_CBC_SHA"}, - {"rsa_des_sha", "TLS_RSA_WITH_DES_CBC_SHA"}, - {"rsa_fips_des_sha", "SSL_RSA_FIPS_WITH_DES_CBC_SHA"}, - {"fips_des_sha", "SSL_RSA_FIPS_WITH_DES_CBC_SHA"}, /* ditto */ - {"rsa_rc4_40_md5", "TLS_RSA_EXPORT_WITH_RC4_40_MD5"}, - {"tls_rsa_rc4_40_md5", "TLS_RSA_EXPORT_WITH_RC4_40_MD5"}, - {"rsa_rc2_40_md5", "TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5"}, - {"tls_rsa_rc2_40_md5", "TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5"}, - {"rsa_null_md5", "TLS_RSA_WITH_NULL_MD5"}, /* disabled by default */ - {"rsa_null_sha", "TLS_RSA_WITH_NULL_SHA"}, /* disabled by default */ - {"tls_rsa_export1024_with_rc4_56_sha", "TLS_RSA_EXPORT1024_WITH_RC4_56_SHA"}, - {"rsa_rc4_56_sha", "TLS_RSA_EXPORT1024_WITH_RC4_56_SHA"}, /* ditto */ - {"tls_rsa_export1024_with_des_cbc_sha", "TLS_RSA_EXPORT1024_WITH_DES_CBC_SHA"}, - {"rsa_des_56_sha", "TLS_RSA_EXPORT1024_WITH_DES_CBC_SHA"}, /* ditto */ - {"fortezza", ""}, /* deprecated */ - {"fortezza_rc4_128_sha", ""}, /* deprecated */ - {"fortezza_null", ""}, /* deprecated */ - - /*{"dhe_dss_40_sha", SSL_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA, 0}, */ - {"dhe_dss_des_sha", "TLS_DHE_DSS_WITH_DES_CBC_SHA"}, - {"dhe_dss_3des_sha", "TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA"}, - {"dhe_rsa_40_sha", "TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA"}, - {"dhe_rsa_des_sha", "TLS_DHE_RSA_WITH_DES_CBC_SHA"}, - {"dhe_rsa_3des_sha", "TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA"}, - - {"tls_rsa_aes_128_sha", "TLS_RSA_WITH_AES_128_CBC_SHA"}, - {"rsa_aes_128_sha", "TLS_RSA_WITH_AES_128_CBC_SHA"}, /* ditto */ - {"tls_dh_dss_aes_128_sha", ""}, /* deprecated */ - {"tls_dh_rsa_aes_128_sha", ""}, /* deprecated */ - {"tls_dhe_dss_aes_128_sha", "TLS_DHE_DSS_WITH_AES_128_CBC_SHA"}, - {"tls_dhe_rsa_aes_128_sha", "TLS_DHE_RSA_WITH_AES_128_CBC_SHA"}, - - {"tls_rsa_aes_256_sha", "TLS_RSA_WITH_AES_256_CBC_SHA"}, - {"rsa_aes_256_sha", "TLS_RSA_WITH_AES_256_CBC_SHA"}, /* ditto */ - {"tls_dss_aes_256_sha", ""}, /* deprecated */ - {"tls_rsa_aes_256_sha", ""}, /* deprecated */ - {"tls_dhe_dss_aes_256_sha", "TLS_DHE_DSS_WITH_AES_256_CBC_SHA"}, - {"tls_dhe_rsa_aes_256_sha", "TLS_DHE_RSA_WITH_AES_256_CBC_SHA"}, - /*{"tls_dhe_dss_1024_des_sha", ""}, */ - {"tls_dhe_dss_1024_rc4_sha", "TLS_RSA_EXPORT1024_WITH_RC4_56_SHA"}, - {"tls_dhe_dss_rc4_128_sha", "TLS_DHE_DSS_WITH_RC4_128_SHA"}, - /* New in NSS 3.15 */ - {"tls_rsa_aes_128_gcm_sha", "TLS_RSA_WITH_AES_128_GCM_SHA256"}, - {"tls_dhe_rsa_aes_128_gcm_sha", "TLS_DHE_RSA_WITH_AES_128_GCM_SHA256"}, - {"tls_dhe_dss_aes_128_gcm_sha", NULL}, /* not available */ - {NULL, NULL}}; /* E.g., "SSL3", "TLS1.2", "Unknown SSL version: 0x0" */ #define VERSION_STR_LENGTH 64 @@ -705,7 +636,6 @@ _conf_setciphers(char *setciphers, int flags) if (strcasecmp(setciphers, "all")) { /* if not all */ PRBool enabled = active ? PR_TRUE : PR_FALSE; - int lookup = 1; for (x = 0; _conf_ciphers[x].name; x++) { if (!PL_strcasecmp(setciphers, _conf_ciphers[x].name)) { if (_conf_ciphers[x].flags & CIPHER_IS_WEAK) { @@ -732,55 +662,10 @@ _conf_setciphers(char *setciphers, int flags) enabledOne = PR_TRUE; /* At least one active cipher is set. */ } SSL_CipherPrefSetDefault(_conf_ciphers[x].num, enabled); - lookup = 0; break; } } - if (lookup) { /* lookup with old cipher name and get NSS cipherSuiteName */ - for (size_t i = 0; _lookup_cipher[i].alias; i++) { - if (!PL_strcasecmp(setciphers, _lookup_cipher[i].alias)) { - if (enabled && !_lookup_cipher[i].name[0]) { - slapd_SSL_warn("Cipher suite %s is not available in NSS %d.%d. Ignoring %s", - setciphers, NSS_VMAJOR, NSS_VMINOR, setciphers); - continue; - } - for (x = 0; _conf_ciphers[x].name; x++) { - if (!PL_strcasecmp(_lookup_cipher[i].name, _conf_ciphers[x].name)) { - if (enabled) { - if (_conf_ciphers[x].flags & CIPHER_IS_WEAK) { - if (active && CIPHER_SET_ALLOWSWEAKCIPHER(flags)) { - slapd_SSL_warn("Cipher %s is weak. " - "It is enabled since allowWeakCipher is \"on\" " - "(default setting for the backward compatibility). " - "We strongly recommend to set it to \"off\". " - "Please replace the value of allowWeakCipher with \"off\" in " - "the encryption config entry cn=encryption,cn=config and " - "restart the server.", - setciphers); - } else { - /* if the cipher is weak and we don't allow weak cipher, - disable it. */ - enabled = PR_FALSE; - } - } - if (enabled) { - /* if the cipher is not weak or we allow weak cipher, - check fips. */ - enabled = cipher_check_fips(x, NULL, &unsuplist); - } - } - if (enabled) { - enabledOne = PR_TRUE; /* At least one active cipher is set. */ - } - SSL_CipherPrefSetDefault(_conf_ciphers[x].num, enabled); - break; - } - } - break; - } - } - } - if (!lookup && !_conf_ciphers[x].name) { /* If lookup, it's already reported. */ + if (!_conf_ciphers[x].name) { slapd_SSL_warn("Cipher suite %s is not available in NSS %d.%d. Ignoring %s", setciphers, NSS_VMAJOR, NSS_VMINOR, setciphers); } @@ -1028,124 +913,6 @@ slapi_getSSLVersion_str(PRUint16 vnum, char *buf, size_t bufsize) #define SSLVGreater(x, y) (((x) > (y)) ? (x) : (y)) -/* - * Check the SSLVersionRange and the old style config params (nsSSL3, nsTLS1) . - * If there are conflicts, choose the secure setting. - */ -static void -restrict_SSLVersionRange(void) -{ - char mymin[VERSION_STR_LENGTH], mymax[VERSION_STR_LENGTH]; - char emin[VERSION_STR_LENGTH], emax[VERSION_STR_LENGTH]; - (void)slapi_getSSLVersion_str(slapdNSSVersions.min, mymin, sizeof(mymin)); - (void)slapi_getSSLVersion_str(slapdNSSVersions.max, mymax, sizeof(mymax)); - (void)slapi_getSSLVersion_str(enabledNSSVersions.max, emax, sizeof(emax)); - (void)slapi_getSSLVersion_str(enabledNSSVersions.min, emin, sizeof(emin)); - if (slapdNSSVersions.min > slapdNSSVersions.max) { - slapd_SSL_warn("Invalid configured SSL range: min: %s, max: %s; " - "Resetting the max to the supported max SSL version: %s.", - mymin, mymax, emax); - slapdNSSVersions.max = enabledNSSVersions.max; - } - if (enableSSL3) { - if (enableTLS1) { - if (slapdNSSVersions.min >= CURRENT_DEFAULT_SSL_VERSION) { - slapd_SSL_warn("Configured range: min: %s, max: %s; " - "but both nsSSL3 and nsTLS1 are on. " - "Respect the supported range.", - mymin, mymax); - enableSSL3 = PR_FALSE; - } else { - slapd_SSL_warn("Min value is too low in range: min: %s, max: %s; " - "We strongly recommend to set sslVersionMin higher than %s.", - mymin, mymax, DEFVERSION); - } - if (slapdNSSVersions.max < CURRENT_DEFAULT_SSL_VERSION) { - slapd_SSL_warn("Configured range: min: %s, max: %s; " - "but both nsSSL3 and nsTLS1 are on. " - "Resetting the max to the supported max SSL version: %s.", - mymin, mymax, emax); - slapdNSSVersions.max = enabledNSSVersions.max; - } - } else { - /* nsTLS1 is explicitly set to off. */ - if (enabledNSSVersions.min >= CURRENT_DEFAULT_SSL_VERSION) { - slapd_SSL_warn("Supported range: min: %s, max: %s; " - "but nsSSL3 is on and nsTLS1 is off. " - "Respect the supported range.", - emin, emax); - slapdNSSVersions.min = SSLVGreater(slapdNSSVersions.min, enabledNSSVersions.min); - enableSSL3 = PR_FALSE; - enableTLS1 = PR_TRUE; - } else if (slapdNSSVersions.min >= CURRENT_DEFAULT_SSL_VERSION) { - slapd_SSL_warn("Configured range: min: %s, max: %s; " - "but nsSSL3 is on and nsTLS1 is off. " - "Respect the configured range.", - mymin, mymax); - enableSSL3 = PR_FALSE; - enableTLS1 = PR_TRUE; - } else if (slapdNSSVersions.min < CURRENT_DEFAULT_SSL_VERSION) { - slapd_SSL_warn("Min value is too low in range: min: %s, max: %s; " - "We strongly recommend to set sslVersionMin higher than %s.", - mymin, mymax, DEFVERSION); - } else { - /* - * slapdNSSVersions.min < SSL_LIBRARY_VERSION_TLS_1_0 && - * slapdNSSVersions.max >= SSL_LIBRARY_VERSION_TLS_1_1 - */ - slapd_SSL_warn("Configured range: min: %s, max: %s; " - "but nsSSL3 is on and nsTLS1 is off. " - "Respect the configured range.", - mymin, mymax); - enableTLS1 = PR_TRUE; - } - } - } else { - if (enableTLS1) { - if (enabledNSSVersions.max < CURRENT_DEFAULT_SSL_VERSION) { - /* TLS1 is on, but TLS1 is not supported by NSS. */ - slapd_SSL_warn("Supported range: min: %s, max: %s; " - "Setting the version range based upon the supported range.", - emin, emax); - slapdNSSVersions.max = enabledNSSVersions.max; - slapdNSSVersions.min = enabledNSSVersions.min; - enableSSL3 = PR_TRUE; - enableTLS1 = PR_FALSE; - } else if ((slapdNSSVersions.max < CURRENT_DEFAULT_SSL_VERSION) || - (slapdNSSVersions.min < CURRENT_DEFAULT_SSL_VERSION)) { - slapdNSSVersions.max = enabledNSSVersions.max; - slapdNSSVersions.min = SSLVGreater(CURRENT_DEFAULT_SSL_VERSION, enabledNSSVersions.min); - slapd_SSL_warn("nsTLS1 is on, but the version range is lower than \"%s\"; " - "Configuring the version range as default min: %s, max: %s.", - DEFVERSION, DEFVERSION, emax); - } else { - /* - * slapdNSSVersions.min >= SSL_LIBRARY_VERSION_TLS_1_0 && - * slapdNSSVersions.max >= SSL_LIBRARY_VERSION_TLS_1_0 - */ - ; - } - } else { - slapd_SSL_info("Supported range: min: %s, max: %s; " - "Respect the configured range.", - emin, emax); - /* nsTLS1 is explicitly set to off. */ - if (slapdNSSVersions.min >= CURRENT_DEFAULT_SSL_VERSION) { - enableTLS1 = PR_TRUE; - } else if (slapdNSSVersions.max < CURRENT_DEFAULT_SSL_VERSION) { - enableSSL3 = PR_TRUE; - } else { - /* - * slapdNSSVersions.min < SSL_LIBRARY_VERSION_TLS_1_0 && - * slapdNSSVersions.max >= SSL_LIBRARY_VERSION_TLS_1_0 - */ - enableSSL3 = PR_TRUE; - enableTLS1 = PR_TRUE; - } - } - } -} - /* * slapd_nss_init() is always called from main(), even if we do not * plan to listen on a secure port. If config_available is 0, the @@ -1483,7 +1250,7 @@ slapd_ssl_init() } /* - * val: sslVersionMin/Max value set in cn=encription,cn=config (INPUT) + * val: sslVersionMin/Max value set in cn=encryption,cn=config (INPUT) * rval: Corresponding value to set SSLVersionRange (OUTPUT) * ismin: True if val is sslVersionMin value */ @@ -1494,8 +1261,7 @@ slapd_ssl_init() static int set_NSS_version(char *val, PRUint16 *rval, int ismin) { - char *vp, *endp; - int64_t vnum; + char *vp; char emin[VERSION_STR_LENGTH], emax[VERSION_STR_LENGTH]; if (NULL == rval) { @@ -1503,73 +1269,20 @@ set_NSS_version(char *val, PRUint16 *rval, int ismin) } (void)slapi_getSSLVersion_str(enabledNSSVersions.min, emin, sizeof(emin)); (void)slapi_getSSLVersion_str(enabledNSSVersions.max, emax, sizeof(emax)); - if (!strncasecmp(val, SSLSTR, SSLLEN)) { /* ssl# */ - vp = val + SSLLEN; - vnum = strtol(vp, &endp, 10); - if (2 == vnum) { - if (ismin) { - if (enabledNSSVersions.min > SSL_LIBRARY_VERSION_2) { - slapd_SSL_warn("The value of sslVersionMin " - "\"%s\" is lower than the supported version; " - "the default value \"%s\" is used.", - val, emin); - (*rval) = enabledNSSVersions.min; - } else { - (*rval) = SSL_LIBRARY_VERSION_2; - } - } else { - if (enabledNSSVersions.max < SSL_LIBRARY_VERSION_2) { - /* never happens */ - slapd_SSL_warn("The value of sslVersionMax " - "\"%s\" is higher than the supported version; " - "the default value \"%s\" is used.", - val, emax); - (*rval) = enabledNSSVersions.max; - } else { - (*rval) = SSL_LIBRARY_VERSION_2; - } - } - } else if (3 == vnum) { - if (ismin) { - if (enabledNSSVersions.min > SSL_LIBRARY_VERSION_3_0) { - slapd_SSL_warn("The value of sslVersionMin " - "\"%s\" is lower than the supported version; " - "the default value \"%s\" is used.", - val, emin); - (*rval) = enabledNSSVersions.min; - } else { - (*rval) = SSL_LIBRARY_VERSION_3_0; - } - } else { - if (enabledNSSVersions.max < SSL_LIBRARY_VERSION_3_0) { - /* never happens */ - slapd_SSL_warn("The value of sslVersionMax " - "\"%s\" is higher than the supported version; " - "the default value \"%s\" is used.", - val, emax); - (*rval) = enabledNSSVersions.max; - } else { - (*rval) = SSL_LIBRARY_VERSION_3_0; - } - } + + if (!strncasecmp(val, SSLSTR, SSLLEN)) { /* ssl# NOT SUPPORTED */ + if (ismin) { + slapd_SSL_warn("SSL3 is no longer supported. Using NSS default min value: %s\n", emin); + (*rval) = enabledNSSVersions.min; } else { - if (ismin) { - slapd_SSL_warn("The value of sslVersionMin " - "\"%s\" is invalid; the default value \"%s\" is used.", - val, emin); - (*rval) = enabledNSSVersions.min; - } else { - slapd_SSL_warn("The value of sslVersionMax " - "\"%s\" is invalid; the default value \"%s\" is used.", - val, emax); - (*rval) = enabledNSSVersions.max; - } + slapd_SSL_warn("SSL3 is no longer supported. Using NSS default max value: %s\n", emax); + (*rval) = enabledNSSVersions.max; } } else if (!strncasecmp(val, TLSSTR, TLSLEN)) { /* tls# */ float tlsv; vp = val + TLSLEN; sscanf(vp, "%4f", &tlsv); - if (tlsv < 1.1) { /* TLS1.0 */ + if (tlsv < 1.1f) { /* TLS1.0 */ if (ismin) { if (enabledNSSVersions.min > CURRENT_DEFAULT_SSL_VERSION) { slapd_SSL_warn("The value of sslVersionMin " @@ -1592,7 +1305,7 @@ set_NSS_version(char *val, PRUint16 *rval, int ismin) (*rval) = CURRENT_DEFAULT_SSL_VERSION; } } - } else if (tlsv < 1.2) { /* TLS1.1 */ + } else if (tlsv < 1.2f) { /* TLS1.1 */ if (ismin) { if (enabledNSSVersions.min > SSL_LIBRARY_VERSION_TLS_1_1) { slapd_SSL_warn("The value of sslVersionMin " @@ -1615,7 +1328,7 @@ set_NSS_version(char *val, PRUint16 *rval, int ismin) (*rval) = SSL_LIBRARY_VERSION_TLS_1_1; } } - } else if (tlsv < 1.3) { /* TLS1.2 */ + } else if (tlsv < 1.3f) { /* TLS1.2 */ if (ismin) { if (enabledNSSVersions.min > SSL_LIBRARY_VERSION_TLS_1_2) { slapd_SSL_warn("The value of sslVersionMin " @@ -1638,6 +1351,29 @@ set_NSS_version(char *val, PRUint16 *rval, int ismin) (*rval) = SSL_LIBRARY_VERSION_TLS_1_2; } } + } else if (tlsv < 1.4f) { /* TLS1.3 */ + if (ismin) { + if (enabledNSSVersions.min > SSL_LIBRARY_VERSION_TLS_1_3) { + slapd_SSL_warn("The value of sslVersionMin " + "\"%s\" is lower than the supported version; " + "the default value \"%s\" is used.", + val, emin); + (*rval) = enabledNSSVersions.min; + } else { + (*rval) = SSL_LIBRARY_VERSION_TLS_1_3; + } + } else { + if (enabledNSSVersions.max < SSL_LIBRARY_VERSION_TLS_1_3) { + /* never happens */ + slapd_SSL_warn("The value of sslVersionMax " + "\"%s\" is higher than the supported version; " + "the default value \"%s\" is used.", + val, emax); + (*rval) = enabledNSSVersions.max; + } else { + (*rval) = SSL_LIBRARY_VERSION_TLS_1_3; + } + } } else { /* Specified TLS is newer than supported */ if (ismin) { slapd_SSL_warn("The value of sslVersionMin " @@ -1683,7 +1419,9 @@ slapd_ssl_init2(PRFileDesc **fd, int startTLS) CERTCertificate *cert = NULL; SECKEYPrivateKey *key = NULL; char errorbuf[SLAPI_DSE_RETURNTEXT_SIZE] = {0}; - char *val = NULL; + const char *val = NULL; + char *cipher_val = NULL; + char *clientauth_val = NULL; char *default_val = NULL; int nFamilies = 0; SECStatus sslStatus; @@ -1722,7 +1460,7 @@ slapd_ssl_init2(PRFileDesc **fd, int startTLS) slapd_SSL_error("Failed get config entry %s", configDN); return 1; } - val = slapi_entry_attr_get_charptr(e, "allowWeakCipher"); + val = slapi_fetch_attr(e, "allowWeakCipher", NULL); if (val) { if (!PL_strcasecmp(val, "off") || !PL_strcasecmp(val, "false") || !PL_strcmp(val, "0") || !PL_strcasecmp(val, "no")) { @@ -1735,15 +1473,14 @@ slapd_ssl_init2(PRFileDesc **fd, int startTLS) "Ignoring it and set it to default.", val, configDN); } } - slapi_ch_free_string(&val); /* Set SSL cipher preferences */ - if (NULL != (val = _conf_setciphers(ciphers, allowweakcipher))) { + if (NULL != (cipher_val = _conf_setciphers(ciphers, allowweakcipher))) { errorCode = PR_GetError(); slapd_SSL_warn("Failed to set SSL cipher " "preference information: %s (" SLAPI_COMPONENT_NAME_NSPR " error %d - %s)", - val, errorCode, slapd_pr_strerror(errorCode)); - slapi_ch_free_string(&val); + cipher_val, errorCode, slapd_pr_strerror(errorCode)); + slapi_ch_free_string(&cipher_val); } slapi_ch_free_string(&ciphers); freeConfigEntry(&e); @@ -1782,8 +1519,6 @@ slapd_ssl_init2(PRFileDesc **fd, int startTLS) return -1; } fipsMode = PR_TRUE; - /* FIPS does not like to use SSLv3 */ - enableSSL3 = PR_FALSE; } slapd_pk11_setSlotPWValues(slot, 0, 0); @@ -1992,26 +1727,14 @@ slapd_ssl_init2(PRFileDesc **fd, int startTLS) return -1; } - /* Explicitly disabling SSL2 - NGK */ - sslStatus = SSL_OptionSet(pr_sock, SSL_ENABLE_SSL2, enableSSL2); - if (sslStatus != SECSuccess) { - errorCode = PR_GetError(); - slapd_SSL_error("Failed to %s SSLv2 " - "on the imported socket (" SLAPI_COMPONENT_NAME_NSPR " error %d - %s)", - enableSSL2 ? "enable" : "disable", - errorCode, slapd_pr_strerror(errorCode)); - return -1; - } - /* Retrieve the SSL Client Authentication status from cn=config */ /* Set a default value if no value found */ getConfigEntry(configDN, &e); - val = NULL; if (e != NULL) { - val = slapi_entry_attr_get_charptr(e, "nssslclientauth"); + clientauth_val = (char *)slapi_fetch_attr(e, "nssslclientauth", NULL); } - if (!val) { + if (!clientauth_val) { errorCode = PR_GetError(); slapd_SSL_warn("Cannot get SSL Client " "Authentication status. No nsslclientauth in %s (" SLAPI_COMPONENT_NAME_NSPR " error %d - %s)", @@ -2030,9 +1753,9 @@ slapd_ssl_init2(PRFileDesc **fd, int startTLS) default_val = "allowed"; break; } - val = default_val; + clientauth_val = default_val; } - if (config_set_SSLclientAuth("nssslclientauth", val, errorbuf, + if (config_set_SSLclientAuth("nssslclientauth", clientauth_val, errorbuf, CONFIG_APPLY) != LDAP_SUCCESS) { errorCode = PR_GetError(); slapd_SSL_warn("Cannot set SSL Client " @@ -2041,53 +1764,28 @@ slapd_ssl_init2(PRFileDesc **fd, int startTLS) "and \"required\". (" SLAPI_COMPONENT_NAME_NSPR " error %d - %s)", val, errorbuf, errorCode, slapd_pr_strerror(errorCode)); } - if (val != default_val) { - slapi_ch_free_string(&val); - } if (e != NULL) { - val = slapi_entry_attr_get_charptr(e, "nsSSL3"); + val = slapi_fetch_attr(e, "nsSSL3", NULL); if (val) { - if (!PL_strcasecmp(val, "off")) { - enableSSL3 = PR_FALSE; - } else if (!PL_strcasecmp(val, "on")) { - enableSSL3 = PR_TRUE; - } else { - enableSSL3 = slapi_entry_attr_get_bool(e, "nsSSL3"); - } - if (fipsMode && enableSSL3) { - slapd_SSL_warn("FIPS mode is enabled and " - "nsSSL3 explicitly set to on - SSLv3 is not approved " - "for use in FIPS mode - SSLv3 will be disabled - if " - "you want to use SSLv3, you must use modutil to " - "disable FIPS in the internal token."); - enableSSL3 = PR_FALSE; + if (!PL_strcasecmp(val, "on")) { + slapd_SSL_warn("NSS no longer support SSL3, the nsSSL3 setting will be ignored"); } } - slapi_ch_free_string(&val); - val = slapi_entry_attr_get_charptr(e, "nsTLS1"); + val = slapi_fetch_attr(e, "nsTLS1", NULL); if (val) { if (!PL_strcasecmp(val, "off")) { - enableTLS1 = PR_FALSE; - } else if (!PL_strcasecmp(val, "on")) { - enableTLS1 = PR_TRUE; - } else { - enableTLS1 = slapi_entry_attr_get_bool(e, "nsTLS1"); + slapd_SSL_warn("NSS only supports TLS, the nsTLS1 setting of \"off\" will be ignored"); } - } else if (enabledNSSVersions.max >= CURRENT_DEFAULT_SSL_VERSION) { - enableTLS1 = PR_TRUE; /* If available, enable TLS1 */ } - slapi_ch_free_string(&val); - val = slapi_entry_attr_get_charptr(e, "sslVersionMin"); + val = slapi_fetch_attr(e, "sslVersionMin", NULL); if (val) { - (void)set_NSS_version(val, &NSSVersionMin, 1); + (void)set_NSS_version((char *)val, &NSSVersionMin, 1); } - slapi_ch_free_string(&val); - val = slapi_entry_attr_get_charptr(e, "sslVersionMax"); + val = slapi_fetch_attr(e, "sslVersionMax", NULL); if (val) { - (void)set_NSS_version(val, &NSSVersionMax, 0); + (void)set_NSS_version((char *)val, &NSSVersionMax, 0); } - slapi_ch_free_string(&val); if (NSSVersionMin > NSSVersionMax) { (void)slapi_getSSLVersion_str(NSSVersionMin, mymin, sizeof(mymin)); (void)slapi_getSSLVersion_str(NSSVersionMax, mymax, sizeof(mymax)); @@ -2103,7 +1801,6 @@ slapd_ssl_init2(PRFileDesc **fd, int startTLS) /* Handle the SSL version range */ slapdNSSVersions.min = NSSVersionMin; slapdNSSVersions.max = NSSVersionMax; - restrict_SSLVersionRange(); (void)slapi_getSSLVersion_str(slapdNSSVersions.min, mymin, sizeof(mymin)); (void)slapi_getSSLVersion_str(slapdNSSVersions.max, mymax, sizeof(mymax)); slapi_log_err(SLAPI_LOG_INFO, "Security Initialization", @@ -2122,7 +1819,7 @@ slapd_ssl_init2(PRFileDesc **fd, int startTLS) */ sslStatus = SSL_VersionRangeGet(pr_sock, &slapdNSSVersions); if (sslStatus == SECSuccess) { - if (slapdNSSVersions.max > LDAP_OPT_X_TLS_PROTOCOL_TLS1_2 && slapd_pk11_isFIPS()) { + if (slapdNSSVersions.max > LDAP_OPT_X_TLS_PROTOCOL_TLS1_2 && fipsMode) { /* * FIPS & NSS currently only support a max version of TLS1.2 * (although NSS advertises 1.3 as a max range in FIPS mode), @@ -2155,7 +1852,7 @@ slapd_ssl_init2(PRFileDesc **fd, int startTLS) val = NULL; if (e != NULL) { - val = slapi_entry_attr_get_charptr(e, "nsTLSAllowClientRenegotiation"); + val = slapi_fetch_attr(e, "nsTLSAllowClientRenegotiation", NULL); } if (val) { /* We default to allowing reneg. If the option is "no", @@ -2170,7 +1867,6 @@ slapd_ssl_init2(PRFileDesc **fd, int startTLS) renegotiation = SSL_RENEGOTIATE_REQUIRES_XTN; } } - slapi_ch_free_string(&val); sslStatus = SSL_OptionSet(pr_sock, SSL_ENABLE_RENEGOTIATION, (PRBool)renegotiation); if (sslStatus != SECSuccess) { diff --git a/src/lib389/lib389/instance/remove.py b/src/lib389/lib389/instance/remove.py index 378cd644b..e85e86613 100644 --- a/src/lib389/lib389/instance/remove.py +++ b/src/lib389/lib389/instance/remove.py @@ -30,7 +30,7 @@ def remove_ds_instance(dirsrv, force=False): :param dirsrv: A directory server instance :type dirsrv: DirSrv - :param force: A psycological aid, for people who think force means do something, harder. Does + :param force: A psychological aid, for people who think force means do something, harder. Does literally nothing in this program because state machines are a thing. :type force: bool """
0
d7d0b2af5dc33478d0346169374bc3d19b540901
389ds/389-ds-base
make sure the DBVERSION file ends in a newline make sure the DBVERSION file ends in a newline Reviewed by: nhosoi (Thanks!)
commit d7d0b2af5dc33478d0346169374bc3d19b540901 Author: Rich Megginson <[email protected]> Date: Wed Aug 10 14:56:54 2011 -0600 make sure the DBVERSION file ends in a newline make sure the DBVERSION file ends in a newline Reviewed by: nhosoi (Thanks!) diff --git a/ldap/servers/slapd/back-ldbm/dbversion.c b/ldap/servers/slapd/back-ldbm/dbversion.c index 5c5cdeb47..4c47c3de2 100644 --- a/ldap/servers/slapd/back-ldbm/dbversion.c +++ b/ldap/servers/slapd/back-ldbm/dbversion.c @@ -122,7 +122,9 @@ dbversion_write(struct ldbminfo *li, const char *directory, len = strlen(buf); ptr = buf + len; } - len = strlen( buf ); + /* end in a newline */ + PL_strncpyz(ptr, "\n", sizeof(buf) - len); + len = strlen(buf); if ( slapi_write_buffer( prfd, buf, len ) != len ) { LDAPDebug( LDAP_DEBUG_ANY, "Could not write to file \"%s\"\n", filename, 0, 0 );
0
00d081397a1b99ca8da61e31bd597a3473d4e772
389ds/389-ds-base
Issue 4319 - Performance search rate: listener may be erroneously waken up (#4323) Bug description: A worker thread usually wakes up the listener when it has completed reading the operation from the operation. In addition upon exceptional event (timeout while reading op or max thread per connection), it sets a local flag (need_wakeup) and wakes the listener. The problem is that it does not reset the flag after wake up. So for any further operation (on any operation) it will trigger this additional wake up. This triggers a write syscall and wakes up listener for nothing. This impacts througput by ~2% Fix description: reset the need_wakeup after signal_listner relates: https://github.com/389ds/389-ds-base/issues/4319 Reviewed by: William Brown, Mark Reynolds (thanks !!) Platforms tested: F31, RHEL8.3
commit 00d081397a1b99ca8da61e31bd597a3473d4e772 Author: tbordaz <[email protected]> Date: Mon Sep 21 08:28:38 2020 +0200 Issue 4319 - Performance search rate: listener may be erroneously waken up (#4323) Bug description: A worker thread usually wakes up the listener when it has completed reading the operation from the operation. In addition upon exceptional event (timeout while reading op or max thread per connection), it sets a local flag (need_wakeup) and wakes the listener. The problem is that it does not reset the flag after wake up. So for any further operation (on any operation) it will trigger this additional wake up. This triggers a write syscall and wakes up listener for nothing. This impacts througput by ~2% Fix description: reset the need_wakeup after signal_listner relates: https://github.com/389ds/389-ds-base/issues/4319 Reviewed by: William Brown, Mark Reynolds (thanks !!) Platforms tested: F31, RHEL8.3 diff --git a/ldap/servers/slapd/connection.c b/ldap/servers/slapd/connection.c index 006e22d8b..74a39cf73 100644 --- a/ldap/servers/slapd/connection.c +++ b/ldap/servers/slapd/connection.c @@ -1467,7 +1467,6 @@ connection_threadmain() Connection *conn = NULL; Operation *op; ber_tag_t tag = 0; - int need_wakeup = 0; int thread_turbo_flag = 0; int ret = 0; int more_data = 0; @@ -1817,6 +1816,8 @@ connection_threadmain() } if (!more_data) { if (!thread_turbo_flag) { + int32_t need_wakeup = 0; + /* * Don't release the connection now. * But note down what to do. @@ -1840,10 +1841,13 @@ connection_threadmain() } conn->c_threadnumber--; connection_release_nolock(conn); - /* Call signal_listner after releasing the - * connection if required. */ + /* If need_wakeup, call signal_listner once. + * Need to release the connection (refcnt--) + * before that call. + */ if (need_wakeup) { signal_listner(); + need_wakeup = 0; } } else if (1 == is_timedout) { /* covscan reports this code is unreachable (2019/6/4) */
0
f9ec9a62b8e386c7918b0b9ea628023ed9bd505e
389ds/389-ds-base
Bug 195302 - Allow fine-grained password storage scheme to be set This patch makes the server use the password storage scheme set in the appropriate fine-grained password policy (if it is set). The previous code was always using the global storage scheme. This fix was based off of a fix contributed by Ulf Weltman of Hewlett Packard.
commit f9ec9a62b8e386c7918b0b9ea628023ed9bd505e Author: Nathan Kinder <[email protected]> Date: Tue Dec 1 16:27:23 2009 -0800 Bug 195302 - Allow fine-grained password storage scheme to be set This patch makes the server use the password storage scheme set in the appropriate fine-grained password policy (if it is set). The previous code was always using the global storage scheme. This fix was based off of a fix contributed by Ulf Weltman of Hewlett Packard. diff --git a/ldap/servers/slapd/add.c b/ldap/servers/slapd/add.c index df683a90d..1a1f86737 100644 --- a/ldap/servers/slapd/add.c +++ b/ldap/servers/slapd/add.c @@ -32,8 +32,14 @@ * * * Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. - * Copyright (C) 2005 Red Hat, Inc. + * Copyright (C) 2009 Red Hat, Inc. + * Copyright (C) 2009 Hewlett-Packard Development Company, L.P. * All rights reserved. + * + * Contributors: + * Hewlett-Packard Development Company, L.P. + * Bugfix for bug #195302 + * * END COPYRIGHT BLOCK **/ #ifdef HAVE_CONFIG_H @@ -512,7 +518,7 @@ static void op_shared_add (Slapi_PBlock *pb) Slapi_Value **vals= NULL; valuearray_add_valuearray(&unhashed_password_vals, present_values, 0); valuearray_add_valuearray(&vals, present_values, 0); - pw_encodevals(vals); + pw_encodevals_ext(pb, slapi_entry_get_sdn (e), vals); add_password_attrs(pb, operation, e); slapi_entry_attr_replace_sv(e, SLAPI_USERPWD_ATTR, vals); valuearray_free(&vals); diff --git a/ldap/servers/slapd/modify.c b/ldap/servers/slapd/modify.c index 1eac8489a..40646b2bc 100644 --- a/ldap/servers/slapd/modify.c +++ b/ldap/servers/slapd/modify.c @@ -32,8 +32,14 @@ * * * Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. - * Copyright (C) 2005 Red Hat, Inc. + * Copyright (C) 2009 Red Hat, Inc. + * Copyright (C) 2009 Hewlett-Packard Development Company, L.P. * All rights reserved. + * + * Contributors: + * Hewlett-Packard Development Company, L.P. + * Bugfix for bug #195302 + * * END COPYRIGHT BLOCK **/ #ifdef HAVE_CONFIG_H @@ -712,7 +718,7 @@ static void op_shared_modify (Slapi_PBlock *pb, int pw_change, char *old_pw) valuearray_init_bervalarray(pw_mod->mod_bvalues, &va); /* encode password */ - pw_encodevals(va); + pw_encodevals_ext(pb, &sdn, va); /* remove current clear value of userpassword */ ber_bvecfree(pw_mod->mod_bvalues); diff --git a/ldap/servers/slapd/pw.c b/ldap/servers/slapd/pw.c index cafdfd68f..09bb96c79 100644 --- a/ldap/servers/slapd/pw.c +++ b/ldap/servers/slapd/pw.c @@ -32,8 +32,14 @@ * * * Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. - * Copyright (C) 2005 Red Hat, Inc. + * Copyright (C) 2009 Red Hat, Inc. + * Copyright (C) 2009 Hewlett-Packard Development Company, L.P. * All rights reserved. + * + * Contributors: + * Hewlett-Packard Development Company, L.P. + * Bugfix for bug #195302 + * * END COPYRIGHT BLOCK **/ #ifdef HAVE_CONFIG_H @@ -176,18 +182,24 @@ int slapi_is_encoded (char *value) char* slapi_encode (char *value, char *alg) +{ + return( slapi_encode_ext( NULL, NULL, value, alg ) ); +} + +char* slapi_encode_ext (Slapi_PBlock *pb, const Slapi_DN *sdn, char *value, char *alg) { struct pw_scheme *enc_scheme = NULL; + char *(*pws_enc) ( char *pwd ) = NULL; char *hashedval = NULL; - int need_to_free = 0; + passwdPolicy *pwpolicy=NULL; - if (alg == NULL) /* use server encoding scheme */ + if (alg == NULL) /* use local scheme, or global if we can't fetch local */ { - slapdFrontendConfig_t * slapdFrontendConfig = getFrontendConfig(); - - enc_scheme = slapdFrontendConfig->pw_storagescheme; + pwpolicy = new_passwdPolicy(pb, (char*)slapi_sdn_get_ndn(sdn) ); + pws_enc = pwpolicy->pw_storagescheme->pws_enc; + delete_passwdPolicy(&pwpolicy); - if (enc_scheme == NULL) + if (pws_enc == NULL) { slapi_log_error( SLAPI_LOG_FATAL, NULL, "slapi_encode: no server scheme\n" ); @@ -212,12 +224,11 @@ char* slapi_encode (char *value, char *alg) } return NULL; } - need_to_free = 1; + pws_enc = enc_scheme->pws_enc; + free_pw_scheme(enc_scheme); } - hashedval = enc_scheme->pws_enc(value); - if (need_to_free) - free_pw_scheme(enc_scheme); + hashedval = (*pws_enc)(value); return hashedval; } @@ -317,19 +328,40 @@ pw_val2scheme( char *val, char **valpwdp, int first_is_default ) int pw_encodevals( Slapi_Value **vals ) +{ + return( pw_encodevals_ext( NULL, NULL, vals ) ); +} + +/* + * Same as pw_encodevals, except if a pb and sdn are passed in, we will try + * to check the password scheme specified by local password policy. + */ +int +pw_encodevals_ext( Slapi_PBlock *pb, const Slapi_DN *sdn, Slapi_Value **vals ) { int i; - slapdFrontendConfig_t * slapdFrontendConfig = getFrontendConfig(); + passwdPolicy *pwpolicy=NULL; + char *dn; + char *(*pws_enc) ( char *pwd ) = NULL; + if ( vals == NULL ) { + return( 0 ); + } + + /* new_passwdPolicy gives us a local policy if sdn and pb are set and + can be used to find a local policy, else we get the global policy */ + pwpolicy = new_passwdPolicy(pb, (char*)slapi_sdn_get_ndn(sdn) ); + pws_enc = pwpolicy->pw_storagescheme->pws_enc; + delete_passwdPolicy(&pwpolicy); - if ( vals == NULL || slapdFrontendConfig->pw_storagescheme == NULL || - slapdFrontendConfig->pw_storagescheme->pws_enc == NULL ) { + /* Password scheme encryption function was not found */ + if ( pws_enc == NULL ) { return( 0 ); } for ( i = 0; vals[ i ] != NULL; ++i ) { - struct pw_scheme *pwsp; - char *enc = NULL; + struct pw_scheme *pwsp = NULL; + char *enc = NULL; if ( (pwsp=pw_val2scheme( (char*)slapi_value_get_string(vals[ i ]), NULL, 0)) != NULL ) { /* JCM Innards */ /* If the value already specifies clear storage, call the * clear storage plug-in */ @@ -340,14 +372,14 @@ pw_encodevals( Slapi_Value **vals ) continue; /* don't touch pre-encoded values */ } } - if ((!enc) && (( enc = (*slapdFrontendConfig->pw_storagescheme->pws_enc)( (char*)slapi_value_get_string(vals[ i ]) )) /* JCM Innards */ - == NULL )) { - free_pw_scheme( pwsp ); + free_pw_scheme( pwsp ); + + if ((!enc) && (( enc = (*pws_enc)( (char*)slapi_value_get_string(vals[ i ]) )) == NULL )) { return( -1 ); } + slapi_value_free(&vals[ i ]); vals[ i ] = slapi_value_new_string_passin(enc); - free_pw_scheme( pwsp ); } return( 0 ); @@ -1460,6 +1492,7 @@ new_passwdPolicy(Slapi_PBlock *pb, char *dn) int attr_free_flags = 0; int rc=0; passwdPolicy *pwdpolicy = NULL; + struct pw_scheme *pwdscheme = NULL; Slapi_Attr *attr; char *attr_name; Slapi_Value **sval; @@ -1471,10 +1504,12 @@ new_passwdPolicy(Slapi_PBlock *pb, char *dn) slapdFrontendConfig = getFrontendConfig(); pwdpolicy = (passwdPolicy *)slapi_ch_calloc(1, sizeof(passwdPolicy)); - slapi_pblock_get( pb, SLAPI_OPERATION, &op); - slapi_pblock_get( pb, SLAPI_OPERATION_TYPE, &optype ); + if (pb) { + slapi_pblock_get( pb, SLAPI_OPERATION, &op); + slapi_pblock_get( pb, SLAPI_OPERATION_TYPE, &optype ); + } - if (dn && (slapdFrontendConfig->pwpolicy_local == 1)) { + if (pb && dn && (slapdFrontendConfig->pwpolicy_local == 1)) { /* If we're doing an add, COS does not apply yet so we check parents for the pwdpolicysubentry. We look only for virtual attributes, because real ones are for single-target policy. */ @@ -1701,13 +1736,20 @@ new_passwdPolicy(Slapi_PBlock *pb, char *dn) pwdpolicy->pw_gracelimit = slapi_value_get_int(*sval); } } + else + if (!strcasecmp(attr_name, "passwordstoragescheme")) { + if ((sval = attr_get_present_values(attr))) { + pwdpolicy->pw_storagescheme = + pw_name2scheme((char*)slapi_value_get_string(*sval)); + } + } } /* end of for() loop */ if (pw_entry) { slapi_entry_free(pw_entry); } return pwdpolicy; - } else if ( e ) { + } else if ( e ) { slapi_entry_free( e ); } } @@ -1718,6 +1760,11 @@ done: */ *pwdpolicy = slapdFrontendConfig->pw_policy; + pwdscheme = (struct pw_scheme *)slapi_ch_calloc(1, sizeof(struct pw_scheme)); + *pwdscheme = *slapdFrontendConfig->pw_storagescheme; + pwdscheme->pws_name = strdup( slapdFrontendConfig->pw_storagescheme->pws_name ); + pwdpolicy->pw_storagescheme = pwdscheme; + return pwdpolicy; } /* End of new_passwdPolicy() */ @@ -1725,7 +1772,10 @@ done: void delete_passwdPolicy( passwdPolicy **pwpolicy) { - slapi_ch_free((void **)pwpolicy); + if (pwpolicy && *pwpolicy) { + free_pw_scheme( (*(*pwpolicy)).pw_storagescheme ); + slapi_ch_free((void **)pwpolicy); + } } /* diff --git a/ldap/servers/slapd/pw.h b/ldap/servers/slapd/pw.h index 5f5eb1be4..38c7fafe2 100644 --- a/ldap/servers/slapd/pw.h +++ b/ldap/servers/slapd/pw.h @@ -32,8 +32,14 @@ * * * Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. - * Copyright (C) 2005 Red Hat, Inc. + * Copyright (C) 2009 Red Hat, Inc. + * Copyright (C) 2009 Hewlett-Packard Development Company, L.P. * All rights reserved. + * + * Contributors: + * Hewlett-Packard Development Company, L.P. + * Bugfix for bug #195302 + * * END COPYRIGHT BLOCK **/ #ifdef HAVE_CONFIG_H @@ -76,6 +82,7 @@ struct pw_scheme { struct pw_scheme *pw_name2scheme( char *name ); struct pw_scheme *pw_val2scheme( char *val, char **valpwdp, int first_is_default ); int pw_encodevals( Slapi_Value **vals ); +int pw_encodevals_ext( Slapi_PBlock *pb, const Slapi_DN *sdn, Slapi_Value **vals ); int checkPrefix(char *cipher, char *schemaName, char **encrypt); struct passwordpolicyarray *new_passwdPolicy ( Slapi_PBlock *pb, char *dn ); void delete_passwdPolicy( struct passwordpolicyarray **pwpolicy); diff --git a/ldap/servers/slapd/slap.h b/ldap/servers/slapd/slap.h index 586035070..0e37c85e2 100644 --- a/ldap/servers/slapd/slap.h +++ b/ldap/servers/slapd/slap.h @@ -32,8 +32,14 @@ * * * Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. - * Copyright (C) 2005 Red Hat, Inc. + * Copyright (C) 2009 Red Hat, Inc. + * Copyright (C) 2009 Hewlett-Packard Development Company, L.P. * All rights reserved. + * + * Contributors: + * Hewlett-Packard Development Company, L.P. + * Bugfix for bug #195302 + * * END COPYRIGHT BLOCK **/ #ifdef HAVE_CONFIG_H @@ -1874,6 +1880,7 @@ typedef struct passwordpolicyarray { long pw_lockduration; long pw_resetfailurecount; int pw_gracelimit; + struct pw_scheme *pw_storagescheme; } passwdPolicy; typedef struct _slapdFrontendConfig { diff --git a/ldap/servers/slapd/slapi-plugin.h b/ldap/servers/slapd/slapi-plugin.h index 1d545a0a0..09f18e7c9 100644 --- a/ldap/servers/slapd/slapi-plugin.h +++ b/ldap/servers/slapd/slapi-plugin.h @@ -32,8 +32,14 @@ * * * Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. - * Copyright (C) 2005 Red Hat, Inc. + * Copyright (C) 2009 Red Hat, Inc. + * Copyright (C) 2009 Hewlett-Packard Development Company, L.P. * All rights reserved. + * + * Contributors: + * Hewlett-Packard Development Company, L.P. + * Bugfix for bug #195302 + * * END COPYRIGHT BLOCK */ #ifdef HAVE_CONFIG_H @@ -2274,6 +2280,9 @@ int slapi_pw_find_sv( Slapi_Value **vals, const Slapi_Value *v ); int slapi_is_encoded(char *value); /* encode value with the specified algorithm */ char* slapi_encode(char *value, char *alg); +/* encode value with the specified algorithm, or with local algorithm if pb + * and sdn are specified instead, or global algorithm if pb and sdn are null */ +char* slapi_encode_ext(Slapi_PBlock *pb, const Slapi_DN *sdn, char *value, char *alg); /* UTF8 related */
0
bb7a41be1d456f74273a301e0ec1926feb091de9
389ds/389-ds-base
Ticket #47330 - changelog db extension / upgrade is obsolete Fix description: There was a typo in commit 845a221350bc58166773f50526adfd186bdc86e6. Replaced _cl5UpgradeMajor with _cl5UpgradeMinor for the minor upgrade.
commit bb7a41be1d456f74273a301e0ec1926feb091de9 Author: Noriko Hosoi <[email protected]> Date: Wed Apr 17 16:07:49 2013 -0700 Ticket #47330 - changelog db extension / upgrade is obsolete Fix description: There was a typo in commit 845a221350bc58166773f50526adfd186bdc86e6. Replaced _cl5UpgradeMajor with _cl5UpgradeMinor for the minor upgrade. diff --git a/ldap/servers/plugins/replication/cl5_api.c b/ldap/servers/plugins/replication/cl5_api.c index 6b621e533..885e3a18a 100644 --- a/ldap/servers/plugins/replication/cl5_api.c +++ b/ldap/servers/plugins/replication/cl5_api.c @@ -3080,7 +3080,7 @@ static int _cl5CheckDBVersion () else if (dbminor < DB_VERSION_MINOR) { /* minor upgrade */ - rc = _cl5UpgradeMajor(dbVersion, clVersion); + rc = _cl5UpgradeMinor(dbVersion, clVersion); if (rc != CL5_SUCCESS) { slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name_cl,
0
4e8d94aeb94c71c596a7221d88cf76408b916996
389ds/389-ds-base
do not erase source tarball
commit 4e8d94aeb94c71c596a7221d88cf76408b916996 Author: richm%stanfordalumni.org <richm%stanfordalumni.org> Date: Fri Jun 23 15:28:29 2006 +0000 do not erase source tarball diff --git a/build_svrcore-devel_rpm b/build_svrcore-devel_rpm index a7b51ab97..1dae4eddf 100644 --- a/build_svrcore-devel_rpm +++ b/build_svrcore-devel_rpm @@ -86,7 +86,7 @@ SVRCORE_TAG=SVRCORE_4_0_2_RTM SVRCORE_CORECONF_TAG=$SVRCORE_TAG PRJ=${PACKAGE}-${VERSION} RPM_HOME=`pwd` -SPEC_FILENAME=svrcore-devel.spec +SPEC_FILENAME=${PACKAGE}.spec # define subroutines for this script usage() { @@ -133,9 +133,6 @@ mkdirs BUILD if [ ! -d ${RPM_HOME}/RPMS ]; then mkdirs ${RPM_HOME}/RPMS fi -if [ ! -d ${RPM_HOME}/SGTARS ]; then - mkdirs ${RPM_HOME}/SGTARS -fi mkdirs SOURCES SPECS if [ ! -d ${RPM_HOME}/SRPMS ]; then mkdirs ${RPM_HOME}/SRPMS @@ -168,9 +165,9 @@ cp ${SPEC_FILENAME} SPECS/${SPEC_FILENAME} # build the ${BITSIZE} RPM and Source RPM echo "Executing ${BITSIZE} rpmbuild of ${SPEC_FILENAME} file . . . " if [ ! -f SRPMS/${PACKAGE}-${VERSION}-${RELEASE}.src.rpm ]; then - rpmbuild --define="_topdir ${RPM_HOME}" --target ${PLATFORM} -ba --clean --rmsource --rmspec SPECS/${SPEC_FILENAME} + rpmbuild --define="_topdir ${RPM_HOME}" --target ${PLATFORM} -ba --clean --rmspec SPECS/${SPEC_FILENAME} else - rpmbuild --define="_topdir ${RPM_HOME}" --target ${PLATFORM} -bb --clean --rmsource --rmspec SPECS/${SPEC_FILENAME} + rpmbuild --define="_topdir ${RPM_HOME}" --target ${PLATFORM} -bb --clean --rmspec SPECS/${SPEC_FILENAME} fi echo "Finished doing ${BITSIZE} rpmbuild of ${SPEC_FILENAME} file." @@ -180,11 +177,11 @@ if [ -d ${RPM_HOME}/BUILD ]; then fi echo "Finished." -echo "Removing SOURCES directory . . ." -if [ -d ${RPM_HOME}/SOURCES ]; then - rm -rf ${RPM_HOME}/SOURCES -fi -echo "Finished." +#echo "Removing SOURCES directory . . ." +#if [ -d ${RPM_HOME}/SOURCES ]; then +# rm -rf ${RPM_HOME}/SOURCES +#fi +#echo "Finished." echo "Removing SPECS directory . . ." if [ -d ${RPM_HOME}/SPECS ]; then
0
e3d870efbb2de5048abb7835ea5fad0b0fc81ec4
389ds/389-ds-base
Ticket 48991 - Fix lib389 spec for python2 and python3 Bug Description: Python3 is soon to be the default in fedora. We should be ready for this, and lay the foundation for rhel adoption of python3 also Fix Description: Fix the spec file to create python2-lib389 and python3-lib389 on rhel we only generate the python2 version. Additionally, we gate our new cli tools to be python3 only in these builds. https://fedorahosted.org/389/ticket/48991 Author: wibrown Review by: mreynolds (Thanks!)
commit e3d870efbb2de5048abb7835ea5fad0b0fc81ec4 Author: William Brown <[email protected]> Date: Wed Sep 14 16:20:40 2016 +1000 Ticket 48991 - Fix lib389 spec for python2 and python3 Bug Description: Python3 is soon to be the default in fedora. We should be ready for this, and lay the foundation for rhel adoption of python3 also Fix Description: Fix the spec file to create python2-lib389 and python3-lib389 on rhel we only generate the python2 version. Additionally, we gate our new cli tools to be python3 only in these builds. https://fedorahosted.org/389/ticket/48991 Author: wibrown Review by: mreynolds (Thanks!) diff --git a/src/lib389/VERSION b/src/lib389/VERSION index 663c61a88..21e8796a0 100644 --- a/src/lib389/VERSION +++ b/src/lib389/VERSION @@ -1,2 +1 @@ -1.0.2 - +1.0.3 diff --git a/src/lib389/python-lib389.spec b/src/lib389/python-lib389.spec index 3eb124ec8..6b8cb7bdb 100644 --- a/src/lib389/python-lib389.spec +++ b/src/lib389/python-lib389.spec @@ -1,6 +1,10 @@ -Summary: A library for accessing, testing, and configuring the 389 Directory Server -Name: python-lib389 -Version: 1.0.2 +%global srcname lib389 +%global sum A library for accessing, testing, and configuring the 389 Directory Server +%global vers 1.0.3 + +Name: python-%{srcname} +Summary:%{sum} +Version: %{vers} Release: 1%{?dist} %global tarver %{version}-1 Source0: http://www.port389.org/binaries/%{name}-%{tarver}.tar.bz2 @@ -8,45 +12,107 @@ License: GPLv3+ Group: Development/Libraries BuildArch: noarch Url: http://port389.org/docs/389ds/FAQ/upstream-test-framework.html -BuildRequires: python2-devel -BuildRequires: python-ldap -BuildRequires: krb5-devel +%if 0%{?rhel} +BuildRequires: python-devel BuildRequires: python-setuptools -Requires: pytest +%else +BuildRequires: python2-devel +BuildRequires: python2-setuptools +BuildRequires: python%{python3_pkgversion}-devel +BuildRequires: python%{python3_pkgversion}-setuptools +%endif +%description +This module contains tools and libraries for accessing, testing, +and configuring the 389 Directory Server. + + +%package -n python2-%{srcname} +Summary: %{sum} Requires: python-ldap +Requires: krb5-workstation +Requires: krb5-server +# Conditional will need to change later. +%if 0%{?rhel} +Requires: pytest Requires: python-six Requires: python-pyasn1 Requires: python-pyasn1-modules +Requires: python-dateutil +%else +Requires: python2-pytest +Requires: python2-six +Requires: python2-pyasn1 +Requires: python2-pyasn1-modules Requires: python2-dateutil +%endif +%{?python_provide:%python_provide python2-%{srcname}} +%description -n python2-%{srcname} +This module contains tools and libraries for accessing, testing, +and configuring the 389 Directory Server. -%{?python_provide:%python_provide python2-lib389} - -%description +# Can't build on EL7! Python3 tooling is too broken :( +# We have to use >= 8, because <= 7 doesn't work .... +%if 0%{?rhel} >= 8 || 0%{?fedora} +%package -n python%{python3_pkgversion}-%{srcname} +Summary: %{sum} +Requires: python%{python3_pkgversion}-pytest +Requires: python%{python3_pkgversion}-pyldap +Requires: python%{python3_pkgversion}-six +Requires: python%{python3_pkgversion}-pyasn1 +Requires: python%{python3_pkgversion}-pyasn1-modules +Requires: python%{python3_pkgversion}-dateutil +%{?python_provide:%python_provide python%{python3_pkgversion}-%{srcname}} +%description -n python%{python3_pkgversion}-%{srcname} This module contains tools and libraries for accessing, testing, and configuring the 389 Directory Server. +%endif %prep -%setup -q -n %{name}-%{tarver} +%autosetup -n %{name}-%{tarver} %build -CFLAGS="$RPM_OPT_FLAGS" %{__python2} setup.py build +%py2_build +%if 0%{?rhel} >= 8 || 0%{?fedora} +%py3_build +%endif %install -%{__python2} setup.py install -O1 --skip-build --root $RPM_BUILD_ROOT -for file in $RPM_BUILD_ROOT%{python2_sitelib}/lib389/clitools/*.py; do - chmod a+x $file -done - -%check -%{__python2} setup.py test +%py2_install +%if 0%{?rhel} >= 8 || 0%{?fedora} +%py3_install +%endif -%files +%files -n python2-%{srcname} %license LICENSE %doc README %{python2_sitelib}/* +# We don't provide the cli tools for python2 %exclude %{_sbindir}/* +%if 0%{?rhel} >= 8 || 0%{?fedora} +%files -n python%{python3_pkgversion}-%{srcname} +%license LICENSE +%doc README +%{python3_sitelib}/* +%{_sbindir}/* +%endif + %changelog +* Thu Sep 22 2016 William Brown <[email protected]> - 1.0.3-1 +- Bump version to 1.0.3 pre-release +- Ticket 48952 - Restart command needs a sleep +- Ticket 47957 - Update the replication "idle" status string +- Ticket 48949 - Fix ups for style and correctness +- Ticket 48951 - dsadm and dsconf base files +- Ticket 48951 - dsadm dsconfig status and plugin +- Ticket 48984 - Add lib389 paths module +- Ticket 48991 - Fix lib389 spec for python2 and python3 +- Ticket 48949 - configparser fallback not python2 compatible +- Ticket 48949 - os.makedirs() exist_ok not python2 compatible, added try/except +- Ticket 48949 - change default file path generation - use os.path.join +- Ticket 48949 - added copying slapd-collations.conf + + * Mon Aug 1 2016 Mark Reynolds <[email protected]> - 1.0.2-1 - Bump version to 1.0.2 - Ticket 48946 - openConnection should not fully popluate DirSrv object
0
7301d4356b19b55de892970a6311b901900e6af8
389ds/389-ds-base
Issue 50747 - Port readnsstate to dsctl Description: Port the legacy tool readnsstate to dsctl, and add a healthcheck for local and remote offset that are close to triggering replication time skew errors relates: https://pagure.io/389-ds-base/issue/50747 Reviewed by: tbordaz(Thanks!) Revise lint messages per Thierry's requests adjust skew calculation Update man page
commit 7301d4356b19b55de892970a6311b901900e6af8 Author: Mark Reynolds <[email protected]> Date: Tue Dec 3 21:00:30 2019 -0500 Issue 50747 - Port readnsstate to dsctl Description: Port the legacy tool readnsstate to dsctl, and add a healthcheck for local and remote offset that are close to triggering replication time skew errors relates: https://pagure.io/389-ds-base/issue/50747 Reviewed by: tbordaz(Thanks!) Revise lint messages per Thierry's requests adjust skew calculation Update man page diff --git a/src/lib389/cli/dsctl b/src/lib389/cli/dsctl index 8b86629ac..8484f862a 100755 --- a/src/lib389/cli/dsctl +++ b/src/lib389/cli/dsctl @@ -1,7 +1,7 @@ #!/usr/bin/python3 # --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2016 Red Hat, Inc. +# Copyright (C) 2019 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). @@ -21,6 +21,7 @@ from lib389.cli_ctl import instance as cli_instance from lib389.cli_ctl import dbtasks as cli_dbtasks from lib389.cli_ctl import tls as cli_tls from lib389.cli_ctl import health as cli_health +from lib389.cli_ctl import nsstate as cli_nsstate from lib389.cli_ctl.instance import instance_remove_all from lib389.cli_base import ( _get_arg, @@ -58,6 +59,7 @@ if not os.path.exists(DSRC_CONTAINER): cli_dbtasks.create_parser(subparsers) cli_tls.create_parser(subparsers) cli_health.create_parser(subparsers) +cli_nsstate.create_parser(subparsers) argcomplete.autocomplete(parser) diff --git a/src/lib389/lib389/cli_ctl/health.py b/src/lib389/lib389/cli_ctl/health.py index d8f3d732b..a42016382 100644 --- a/src/lib389/lib389/cli_ctl/health.py +++ b/src/lib389/lib389/cli_ctl/health.py @@ -1,5 +1,5 @@ # --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2016 Red Hat, Inc. +# Copyright (C) 2019 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). @@ -15,7 +15,7 @@ from lib389.config import Encryption, Config from lib389.monitor import MonitorDiskSpace from lib389.replica import Replica, Changelog5 from lib389.nss_ssl import NssSsl -from lib389.dseldif import FSChecks +from lib389.dseldif import FSChecks, DSEldif from lib389 import plugins from lib389._constants import DSRC_HOME @@ -33,6 +33,7 @@ CHECK_OBJECTS = [ MonitorDiskSpace, Replica, Changelog5, + DSEldif, NssSsl, ] diff --git a/src/lib389/lib389/cli_ctl/nsstate.py b/src/lib389/lib389/cli_ctl/nsstate.py new file mode 100644 index 000000000..6a74178ec --- /dev/null +++ b/src/lib389/lib389/cli_ctl/nsstate.py @@ -0,0 +1,64 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- + +import json +from lib389.dseldif import DSEldif + + +def get_nsstate(inst, log, args): + """Process the nsState attribute""" + dse_ldif = DSEldif(inst) + states = dse_ldif.readNsState(suffix=args.suffix, flip=args.flip) + if args.json: + log.info(json.dumps(states)) + else: + for state in states: + log.info("Replica DN: " + state['dn']) + log.info("Replica Suffix: " + state['suffix']) + log.info("Replica ID: " + state['rid']) + log.info("Gen Time: " + state['gen_time']) + log.info("Gen Time String: " + state['gen_time_str']) + log.info("Gen as CSN: " + state['gencsn']) + log.info("Local Offset: " + state['local_offset']) + log.info("Local Offset String: " + state['local_offset_str']) + log.info("Remote Offset: " + state['remote_offset']) + log.info("Remote Offset String: " + state['remote_offset_str']) + log.info("Time Skew: " + state['time_skew']) + log.info("Time Skew String: " + state['time_skew_str']) + log.info("Seq Num: " + state['seq_num']) + log.info("System Time: " + state['sys_time']) + log.info("Diff in Seconds: " + state['diff_secs']) + log.info("Diff in days/secs: " + state['diff_days_secs']) + log.info("Endian: " + state['endian']) + log.info("") + + +def create_parser(subparsers): + repl_get_nsstate = subparsers.add_parser('get-nsstate', help="""Get the replication nsState in a human readable format + +Replica DN: The DN of the replication configuration entry +Replica SUffix: The replicated suffix +Replica ID: The Replica identifier +Gen Time The time the CSN generator was created +Gen Time String: The time string of generator +Gen as CSN: The generation CSN +Local Offset: The offset due to the local clock being set back +Local Offset String: The offset in a nice human format +Remote Offset: The offset due to clock difference with remote systems +Remote Offset String: The offset in a nice human format +Time Skew: The time skew between this server and its replicas +Time Skew String: The time skew in a nice human format +Seq Num: The number of multiple csns within a second +System Time: The local system time +Diff in Seconds: The time difference in seconds from the CSN generator creation to now +Diff in days/secs: The time difference broken up into days and seconds +Endian: Little/Big Endian +""") + repl_get_nsstate.add_argument('--suffix', default=False, help='The DN of the replication suffix to read the state from') + repl_get_nsstate.add_argument('--flip', default=False, help='Flip between Little/Big Endian, this might be required for certain architectures') + repl_get_nsstate.set_defaults(func=get_nsstate) diff --git a/src/lib389/lib389/dseldif.py b/src/lib389/lib389/dseldif.py index 4155abcdd..75fc76a46 100644 --- a/src/lib389/lib389/dseldif.py +++ b/src/lib389/lib389/dseldif.py @@ -9,9 +9,22 @@ import copy import os +import sys +import base64 +import time +from struct import pack, unpack +from datetime import timedelta from stat import ST_MODE +# from lib389.utils import print_nice_time from lib389.paths import Paths -from lib389.lint import DSPERMLE0001, DSPERMLE0002 +from lib389.lint import ( + DSPERMLE0001, + DSPERMLE0002, + DSSKEWLE0001, + DSSKEWLE0002, + DSSKEWLE0003 +) + class DSEldif(object): """A class for working with dse.ldif file @@ -46,6 +59,37 @@ class DSEldif(object): processed_line = line else: processed_line = processed_line[:-1] + line[1:] + self._lint_functions = [self._lint_nsstate] + + def lint(self): + results = [] + for fn in self._lint_functions: + for result in fn(): + if result is not None: + results.append(result) + return results + + def _lint_nsstate(self): + suffixes = self.readNsState() + for suffix in suffixes: + # Check the local offset first + report = None + skew = int(suffix['time_skew']) + if skew >= 86400: + # 24 hours - replication will break + report = copy.deepcopy(DSSKEWLE0003) + elif skew >= 43200: + # 12 hours + report = copy.deepcopy(DSSKEWLE0002) + elif skew >= 21600: + # 6 hours + report = copy.deepcopy(DSSKEWLE0001) + if report is not None: + report['items'].append(suffix['suffix']) + report['items'].append('Time Skew') + report['items'].append('Skew: ' + suffix['time_skew_str']) + report['fix'] = report['fix'].replace('YOUR_INSTANCE', self._instance.serverid) + yield report def _update(self): """Update the dse.ldif with a new contents""" @@ -159,6 +203,123 @@ class DSEldif(object): self.add(entry_dn, attr, value) self._update() + # Read NsState helper functions + def _flipend(self, end): + if end == '<': + return '>' + if end == '>': + return '<' + + def _getGenState(self, dn, replica_suffix, nsstate, flip): + """Return a dict ofall the nsState properties + """ + from lib389.utils import print_nice_time + if pack('<h', 1) == pack('=h',1): + endian = "Little Endian" + end = '<' + if flip: + end = flipend(end) + elif pack('>h', 1) == pack('=h',1): + endian = "Big Endian" + end = '>' + if flip: + end = flipend(end) + else: + raise ValueError("Unknown endian, unable to proceed") + + thelen = len(nsstate) + if thelen <= 20: + pad = 2 # padding for short H values + timefmt = 'I' # timevals are unsigned 32-bit int + else: + pad = 6 # padding for short H values + timefmt = 'Q' # timevals are unsigned 64-bit int + + base_fmtstr = "H%dx3%sH%dx" % (pad, timefmt, pad) + fmtstr = end + base_fmtstr + (rid, sampled_time, local_offset, remote_offset, seq_num) = unpack(fmtstr, nsstate) + now = int(time.time()) + tdiff = now-sampled_time + wrongendian = False + try: + tdelta = timedelta(seconds=tdiff) + wrongendian = tdelta.days > 10*365 + except OverflowError: # int overflow + wrongendian = True + + # if the sampled time is more than 20 years off, this is + # probably the wrong endianness + if wrongendian: + end = flipend(end) + fmtstr = end + base_fmtstr + (rid, sampled_time, local_offset, remote_offset, seq_num) = unpack(fmtstr, nsstate) + tdiff = now-sampled_time + tdelta = timedelta(seconds=tdiff) + + return { + 'dn': dn, + 'suffix': replica_suffix, + 'endian': endian, + 'rid': str(rid), + 'gen_time': str(sampled_time), + 'gencsn': "%08x%04d%04d0000" % (sampled_time, seq_num, rid), + 'gen_time_str': time.ctime(sampled_time), + 'local_offset': str(local_offset), + 'local_offset_str': print_nice_time(local_offset), + 'remote_offset': str(remote_offset), + 'remote_offset_str': print_nice_time(remote_offset), + 'time_skew': str(local_offset + remote_offset), + 'time_skew_str': print_nice_time(local_offset + remote_offset), + 'seq_num': str(seq_num), + 'sys_time': str(time.ctime(now)), + 'diff_secs': str(tdiff), + 'diff_days_secs': "%d:%d" % (tdelta.days, tdelta.seconds), + } + + def readNsState(self, suffix=None, flip=False): + """Look for the nsState attribute in replication configuration entries, + then decode the base64 value and provide a dict of all stats it + contains + + :param suffix: specific suffix to read nsState from + :type suffix: str + """ + found_replica = False + found_suffix = False + replica_suffix = "" + nsstate = "" + states = [] + + for line in self._contents: + if line.startswith("dn: "): + dn = line[4:].strip() + if dn.startswith("cn=replica"): + found_replica = True + else: + found_replica = False + else: + if line.lower().startswith("nsstate:: ") and dn.startswith("cn=replica"): + b64val = line[10:].strip() + nsstate = base64.decodebytes(b64val.encode()) + elif line.lower().startswith("nsds5replicaroot"): + found_suffix = True + replica_suffix = line.lower().split(':')[1].strip() + + if found_replica and found_suffix and nsstate != "": + # We have everything we need to proceed + if suffix is not None and suffix == replica_suffix: + states.append(self._getGenState(dn, replica_suffix, nsstate, flip)) + break + else: + states.append(self._getGenState(dn, replica_suffix, nsstate, flip)) + # reset flags for next round... + found_replica = False + found_suffix = False + replica_suffix = "" + nsstate = "" + + return states + class FSChecks(object): """This is for the healthcheck feature, check commonly used system config files the diff --git a/src/lib389/lib389/lint.py b/src/lib389/lib389/lint.py index 736dffa14..b2bd8cd41 100644 --- a/src/lib389/lib389/lint.py +++ b/src/lib389/lib389/lint.py @@ -344,3 +344,55 @@ security database pin/password files should only be readable by Directory Server # chmod PERMS FILE""" } + +# NsState time skew issues +DSSKEWLE0001 = { + 'dsle': 'DSSKEWLE0001', + 'severity': 'Low', + 'items' : ['Replication'], + 'detail': """The time skew is over 6 hours. If this time skew continues to increase +to 24 hours then replication can potentially stop working. Please continue to +monitor the time skew offsets for increasing values.""", + 'fix' : """Monitor the time skew and avoid making changes to the system time. +Also look at https://access.redhat.com/documentation/en-us/red_hat_directory_server/11/html/administration_guide/managing_replication-troubleshooting_replication_related_problems +and find the paragraph "Too much time skew".""" +} + +DSSKEWLE0002 = { + 'dsle': 'DSSKEWLE0002', + 'severity': 'Medium', + 'items' : ['Replication'], + 'detail': """The time skew is over 12 hours. If this time skew continues to increase +to 24 hours then replication can potentially stop working. Please continue to +monitor the time skew offsets for increasing values. Setting nsslapd-ignore-time-skew +to "on" on each replica will allow replication to continue, but if the time skew +continues to increase other more serious replication problems can occur.""", + 'fix' : """Monitor the time skew and avoid making changes to the system time. +If you get close to 24 hours of time skew replication may stop working. +In that case configure the server to ignore the time skew until the system +times can be fixed/synchronized: + + # dsconf slapd-YOUR_INSTANCE config replace nsslapd-ignore-time-skew=on + +Also look at https://access.redhat.com/documentation/en-us/red_hat_directory_server/11/html/administration_guide/managing_replication-troubleshooting_replication_related_problems +and find the paragraph "Too much time skew".""" +} + +DSSKEWLE0003 = { + 'dsle': 'DSSKEWLE0003', + 'severity': 'High', + 'items' : ['Replication'], + 'detail': """The time skew is over 24 hours. Setting nsslapd-ignore-time-skew +to "on" on each replica will allow replication to continue, but if the +time skew continues to increase other serious replication problems can +occur.""", + 'fix' : """Avoid making changes to the system time, and make sure the clocks +on all the replicas are correct. If you haven't set the server's +"ignore time skew" setting then do the following on all the replicas +until the time issues have been resolved: + + # dsconf slapd-YOUR_INSTANCE config replace nsslapd-ignore-time-skew=on + +Also look at https://access.redhat.com/documentation/en-us/red_hat_directory_server/11/html/administration_guide/managing_replication-troubleshooting_replication_related_problems +and find the paragraph "Too much time skew".""" +} diff --git a/src/lib389/lib389/utils.py b/src/lib389/lib389/utils.py index 459a490e6..70a3a10cb 100644 --- a/src/lib389/lib389/utils.py +++ b/src/lib389/lib389/utils.py @@ -1327,3 +1327,31 @@ def search_filter_escape_bytes(bytes_value): else: raise RuntimeError('Running with Python 2 is unsupported') + +def print_nice_time(seconds): + """Convert seconds to a pretty format + """ + seconds = int(seconds) + d, s = divmod(seconds, 24*60*60) + h, s = divmod(s, 60*60) + m, s = divmod(s, 60) + d_plural = "" + h_plural = "" + m_plural = "" + s_plural = "" + if d > 1: + d_plural = "s" + if h != 1: + h_plural = "s" + if m != 1: + m_plural = "s" + if s != 1: + s_plural = "s" + if d > 0: + return f'{d:d} day{d_plural}, {h:d} hour{h_plural}, {m:d} minute{m_plural}, {s:d} second{s_plural}' + elif h > 0: + return f'{h:d} hour{h_plural}, {m:d} minute{m_plural}, {s:d} second{s_plural}' + elif m > 0: + return f'{m:d} minute{m_plural}, {s:d} second{s_plural}' + else: + return f'{s:d} second{s_plural}'
0