commit_id
string
repo
string
commit_message
string
diff
string
label
int64
5afcbb0d53e06aee85373df32c87bf830d41c251
389ds/389-ds-base
Issue 50933 - Fix OID change between 10rfc2307 and 10rfc2307compat Bug Description: 10rfc2307compat changed the OID for nisMap objectclass to match the standard OID, but this breaks replication with older versions of DS. Fix Description: Continue to use the old(invalid?) oid for nisMap so that replication does not break in a mixed version environment. Fixes: https://pagure.io/389-ds-base/issue/50933 Reviewed by: firstyear & tbordaz(Thanks!!)
commit 5afcbb0d53e06aee85373df32c87bf830d41c251 Author: Mark Reynolds <[email protected]> Date: Wed Aug 12 12:46:42 2020 -0400 Issue 50933 - Fix OID change between 10rfc2307 and 10rfc2307compat Bug Description: 10rfc2307compat changed the OID for nisMap objectclass to match the standard OID, but this breaks replication with older versions of DS. Fix Description: Continue to use the old(invalid?) oid for nisMap so that replication does not break in a mixed version environment. Fixes: https://pagure.io/389-ds-base/issue/50933 Reviewed by: firstyear & tbordaz(Thanks!!) diff --git a/ldap/schema/10rfc2307compat.ldif b/ldap/schema/10rfc2307compat.ldif index 78c588d08..8ba72e1e3 100644 --- a/ldap/schema/10rfc2307compat.ldif +++ b/ldap/schema/10rfc2307compat.ldif @@ -253,7 +253,7 @@ objectClasses: ( MAY ( nisNetgroupTriple $ memberNisNetgroup $ description ) ) objectClasses: ( - 1.3.6.1.1.1.2.9 NAME 'nisMap' SUP top STRUCTURAL + 1.3.6.1.1.1.2.13 NAME 'nisMap' SUP top STRUCTURAL DESC 'A generic abstraction of a NIS map' MUST nisMapName MAY description
0
900485260e789f6e83bbd1d2ed67774e8948c884
389ds/389-ds-base
Issue 4334 - RFE - Task timeout may cause larger dataset imports to fail (#4359) Bug Description: The task.wait() function had a hardcoded timeout and no method to "disable" that check. This could cause very large databases to fail to import. Fix Description: Support timeout=None, which allows the task to take 'infinite' time. Additionally, this provides a warning that this is occuring. fixes: #4334 Author: William Brown <[email protected]> Review by: @mreynolds389 @droideck (thanks!)
commit 900485260e789f6e83bbd1d2ed67774e8948c884 Author: Firstyear <[email protected]> Date: Mon Oct 12 07:54:01 2020 +1000 Issue 4334 - RFE - Task timeout may cause larger dataset imports to fail (#4359) Bug Description: The task.wait() function had a hardcoded timeout and no method to "disable" that check. This could cause very large databases to fail to import. Fix Description: Support timeout=None, which allows the task to take 'infinite' time. Additionally, this provides a warning that this is occuring. fixes: #4334 Author: William Brown <[email protected]> Review by: @mreynolds389 @droideck (thanks!) diff --git a/src/lib389/lib389/cli_conf/backend.py b/src/lib389/lib389/cli_conf/backend.py index 39f4e92f4..d7a6e670c 100644 --- a/src/lib389/lib389/cli_conf/backend.py +++ b/src/lib389/lib389/cli_conf/backend.py @@ -241,7 +241,7 @@ def backend_import(inst, basedn, log, args): task = mc.import_ldif(ldifs=args.ldifs, chunk_size=args.chunks_size, encrypted=args.encrypted, gen_uniq_id=args.gen_uniq_id, only_core=args.only_core, include_suffixes=args.include_suffixes, exclude_suffixes=args.exclude_suffixes) - task.wait() + task.wait(timeout=None) result = task.get_exit_code() if task.is_complete() and result == 0: @@ -269,7 +269,7 @@ def backend_export(inst, basedn, log, args): encrypted=args.encrypted, min_base64=args.min_base64, no_dump_uniq_id=args.no_dump_uniq_id, replication=args.replication, not_folded=args.not_folded, no_seq_num=args.no_seq_num, include_suffixes=args.include_suffixes, exclude_suffixes=args.exclude_suffixes) - task.wait() + task.wait(timeout=None) result = task.get_exit_code() if task.is_complete() and result == 0: diff --git a/src/lib389/lib389/tasks.py b/src/lib389/lib389/tasks.py index 549333837..9911d5276 100644 --- a/src/lib389/lib389/tasks.py +++ b/src/lib389/lib389/tasks.py @@ -81,7 +81,10 @@ class Task(DSLdapObject): """Wait until task is complete.""" count = 0 - while count < timeout: + if timeout is None: + self._log.debug("No timeout is set, this may take a long time ...") + + while timeout is None or count < timeout: if self.is_complete(): break count = count + 1
0
a32b2a9ddc5764d449b051c20077643585e81def
389ds/389-ds-base
Ticket #47709 - package issue in 389-ds-base Description: Following the package guideline, moving pytyon binaries to the architecture aware location and libns-dshttpd.so* from 389-ds-base to 389-ds-base-libs. https://fedorahosted.org/389/ticket/47709 Reviewed by [email protected] (Thank you, Rich!!)
commit a32b2a9ddc5764d449b051c20077643585e81def Author: Noriko Hosoi <[email protected]> Date: Thu Feb 20 13:18:06 2014 -0800 Ticket #47709 - package issue in 389-ds-base Description: Following the package guideline, moving pytyon binaries to the architecture aware location and libns-dshttpd.so* from 389-ds-base to 389-ds-base-libs. https://fedorahosted.org/389/ticket/47709 Reviewed by [email protected] (Thank you, Rich!!) diff --git a/Makefile.am b/Makefile.am index 60c3d3cc7..04bada83e 100644 --- a/Makefile.am +++ b/Makefile.am @@ -168,6 +168,7 @@ initdir = @initdir@ initconfigdir = $(sysconfdir)@initconfigdir@ instconfigdir = @instconfigdir@ perldir = $(libdir)@perldir@ +pythondir = $(libdir)@pythondir@ infdir = $(datadir)@infdir@ mibdir = $(datadir)@mibdir@ updatedir = $(datadir)@updatedir@ @@ -254,9 +255,7 @@ config_DATA = $(srcdir)/lib/ldaputil/certmap.conf \ # with the default schema e.g. there is # considerable overlap of 60changelog.ldif and 01common.ldif # and 60inetmail.ldif and 50ns-mail.ldif among others -sampledata_DATA = ldap/admin/src/scripts/failedbinds.py \ - ldap/admin/src/scripts/DSSharedLib \ - ldap/admin/src/scripts/logregex.py \ +sampledata_DATA = ldap/admin/src/scripts/DSSharedLib \ $(srcdir)/ldap/ldif/Ace.ldif \ $(srcdir)/ldap/ldif/European.ldif \ $(srcdir)/ldap/ldif/Eurosuffix.ldif \ @@ -408,6 +407,9 @@ perl_DATA = ldap/admin/src/scripts/SetupLog.pm \ ldap/admin/src/scripts/DSUpdate.pm \ ldap/admin/src/scripts/DSUpdateDialogs.pm +python_DATA = ldap/admin/src/scripts/failedbinds.py \ + ldap/admin/src/scripts/logregex.py + property_DATA = ldap/admin/src/scripts/setup-ds.res \ ldap/admin/src/scripts/migrate-ds.res diff --git a/configure.ac b/configure.ac index 7216f88c0..16ab6a93c 100644 --- a/configure.ac +++ b/configure.ac @@ -264,6 +264,8 @@ if test "$with_fhs_opt" = "yes"; then propertydir=/properties # relative to libdir perldir=/perl + # relative to libdir + pythondir=/python else if test "$with_fhs" = "yes"; then ac_default_prefix=/usr @@ -295,6 +297,8 @@ else propertydir=/$PACKAGE_NAME/properties # relative to libdir perldir=/$PACKAGE_NAME/perl + # relative to libdir + pythondir=/$PACKAGE_NAME/python fi # if mandir is the default value, override it @@ -355,6 +359,23 @@ else with_perldir= fi +AC_MSG_CHECKING(for --with-pythondir) +AC_ARG_WITH([pythondir], + AS_HELP_STRING([--with-pythondir=PATH], + [Directory for python)]) +) +if test -n "$with_pythondir"; then + if test "$with_pythondir" = yes ; then + AC_MSG_ERROR([You must specify --with-pythondir=/full/path/to/python]) + elif test "$with_pythondir" = no ; then + with_pythondir= + else + AC_MSG_RESULT([$with_pythondir]) + fi +else + with_pythondir= +fi + AC_MSG_CHECKING(for --with-systemdsystemunitdir) AC_ARG_WITH([systemdsystemunitdir], AS_HELP_STRING([--with-systemdsystemunitdir=PATH], @@ -421,6 +442,7 @@ AC_SUBST(serverincdir) AC_SUBST(serverplugindir) AC_SUBST(scripttemplatedir) AC_SUBST(perldir) +AC_SUBST(pythondir) AC_SUBST(infdir) AC_SUBST(mibdir) AC_SUBST(mandir)
0
bc7f87709449298c3c76fd73246e794013605067
389ds/389-ds-base
Resolves: #251090 Summary: ds_remove cannot remove/rename directories (Comment #2) Description: when creating inst_dir and config_dir, make sure the parent dirs (lib/<brand>-ds and etc/<brand>-ds) have the specified gid.
commit bc7f87709449298c3c76fd73246e794013605067 Author: Noriko Hosoi <[email protected]> Date: Tue Aug 7 23:28:03 2007 +0000 Resolves: #251090 Summary: ds_remove cannot remove/rename directories (Comment #2) Description: when creating inst_dir and config_dir, make sure the parent dirs (lib/<brand>-ds and etc/<brand>-ds) have the specified gid. diff --git a/ldap/admin/src/scripts/DSCreate.pm.in b/ldap/admin/src/scripts/DSCreate.pm.in index a122d536b..2557f3983 100644 --- a/ldap/admin/src/scripts/DSCreate.pm.in +++ b/ldap/admin/src/scripts/DSCreate.pm.in @@ -53,7 +53,7 @@ use Net::Domain qw(hostfqdn); use File::Temp qw(tempfile tempdir); use File::Path; use File::Copy; -use File::Basename qw(basename); +use File::Basename qw(basename dirname); # load perldap use Mozilla::LDAP::Conn; @@ -135,10 +135,14 @@ sub sanityCheckParams { sub getMode { my $inf = shift; my $mode = shift; + my $rest = shift; + if (!$rest) { + $rest = "0"; + } if (defined($inf->{General}->{SuiteSpotGroup})) { - $mode = "0" . $mode . $mode . "0"; + $mode = "0" . $mode . $mode . $rest; } else { - $mode = "0" . $mode . "00"; + $mode = "0" . $mode . $rest . $rest; } return oct($mode); @@ -152,6 +156,7 @@ sub changeOwnerMode { my $inf = shift; my $mode = shift; my $it = shift; + my $gidonly = shift; my $uid = getpwnam $inf->{General}->{SuiteSpotUserID}; my $gid = -1; # default to leave it alone @@ -160,14 +165,18 @@ sub changeOwnerMode { $gid = getgrnam $inf->{General}->{SuiteSpotGroup}; } - $mode = getMode($inf, $mode); + $mode = getMode($inf, $mode, $gidonly); $! = 0; # clear errno chmod $mode, $it; if ($!) { return ('error_chmoding_file', $it, $!); } $! = 0; # clear errno - chown $uid, $gid, $it; + if ( $gidonly ) { + chown -1, $gid, $it; + } else { + chown $uid, $gid, $it; + } if ($!) { return ('error_chowning_file', $it, $inf->{General}->{SuiteSpotUserID}, $!); } @@ -195,6 +204,18 @@ sub makeDSDirs { return @errs; } } + # set the group of the parent dir of config_dir and inst_dir + if (defined($inf->{General}->{SuiteSpotGroup})) { + for (qw(inst_dir config_dir)) { + my $dir = $inf->{slapd}->{$_}; + my $parent = dirname($dir); + # changeOwnerMode(inf, mode, file, gidonly & default mode); + @errs = changeOwnerMode($inf, 7, $parent, 5); + if (@errs) { + return @errs; + } + } + } return @errs; } @@ -511,9 +532,9 @@ sub startServer { last; } sleep(1); - if (!($ii % 10)) { - debug(0, "Attempting to obtain server status . . .\n"); - } + if (!($ii % 10)) { + debug(0, "Attempting to obtain server status . . .\n"); + } ++$ii; }
0
4e39b5bd17f1e1a306496e0bbb51943fc665186d
389ds/389-ds-base
Ticket 48147 - Install section for [email protected] template Bug Description: The commands systemctl enable [email protected] do not currently operate. This is because of a missing Install section Fix Description: Add the install section to the .service template. https://fedorahosted.org/389/ticket/48147 Author: wibrown Review by: mreynolds (thanks!)
commit 4e39b5bd17f1e1a306496e0bbb51943fc665186d Author: William Brown <[email protected]> Date: Mon Jan 25 14:51:45 2016 +1000 Ticket 48147 - Install section for [email protected] template Bug Description: The commands systemctl enable [email protected] do not currently operate. This is because of a missing Install section Fix Description: Add the install section to the .service template. https://fedorahosted.org/389/ticket/48147 Author: wibrown Review by: mreynolds (thanks!) diff --git a/wrappers/systemd.template.service.in b/wrappers/systemd.template.service.in index 9432cb01e..f0aa28716 100644 --- a/wrappers/systemd.template.service.in +++ b/wrappers/systemd.template.service.in @@ -26,3 +26,7 @@ ExecStopPost=/bin/rm -f @localstatedir@/run/@package_name@/slapd-%i.pid # if you need to set other directives e.g. LimitNOFILE=8192 # set them in this file .include @initconfigdir@/@[email protected] + +[Install] +WantedBy=dirsrv.target +
0
bc0c543c95152857cc5eec5067c2b07bff6b3668
389ds/389-ds-base
Issue 6368 - fix basic test suite Bug Description: Basic test suite started to error out: > target_be = CustomSetup._search_be(backend_options, backend) E TypeError: CustomSetup._search_be() missing 1 required positional argument: 'beinfo' Fix Description: Add missing argument. Relates: https://github.com/389ds/389-ds-base/issues/6368 Reviewed by: @progier389, @droideck (Thanks!)
commit bc0c543c95152857cc5eec5067c2b07bff6b3668 Author: Viktor Ashirov <[email protected]> Date: Tue Dec 10 12:45:23 2024 +0100 Issue 6368 - fix basic test suite Bug Description: Basic test suite started to error out: > target_be = CustomSetup._search_be(backend_options, backend) E TypeError: CustomSetup._search_be() missing 1 required positional argument: 'beinfo' Fix Description: Add missing argument. Relates: https://github.com/389ds/389-ds-base/issues/6368 Reviewed by: @progier389, @droideck (Thanks!) diff --git a/dirsrvtests/tests/suites/basic/basic_test.py b/dirsrvtests/tests/suites/basic/basic_test.py index 6f3c31ea7..f5830b006 100644 --- a/dirsrvtests/tests/suites/basic/basic_test.py +++ b/dirsrvtests/tests/suites/basic/basic_test.py @@ -124,7 +124,7 @@ exec {nsslapd} -D {cfgdir} -i {pidfile} if self.status(): return time.sleep(1) - raise TimeoutException('Failed to start ns-slpad') + raise TimeoutError('Failed to start ns-slpad') def stop(self, timeout=120): self._reset_systemd() @@ -173,7 +173,7 @@ exec {nsslapd} -D {cfgdir} -i {pidfile} if not backend_list: continue for backend in backend_list: - target_be = CustomSetup._search_be(backend_options, backend) + target_be = CustomSetup._search_be(self, backend_options, backend) if not target_be: target_be = {} backend_options.append(target_be)
0
1a769808a6e169913be66ffbabc7f278594798a7
389ds/389-ds-base
544089 - Referential Integrity Plugin does not take into account the attribute subtypes Bug Description: Referential Integrity Plugin does not change the references in subtyped attributes like "manager;en" or "ou;19" Fix Description: The problem is in the way the function int update_integrity (char **argv, char *origDN, char *newrDN, int logChanges) in referint.c makes the changes. The initial search with the filter ldap_create_filter( filter, filtlen, "(%a=%e)", NULL, NULL, argv[i], origDN, NULL) finds the entries with attributes and with attribute subtypes. But after that when generating the necessary changes (attribute1.mod_type = argv[i] and attribute2.mod_type = argv[i]) the function takes care only of the "base" attributes listed in the plugin arguments. We should parse each found entry to find all the attribute subtypes with the value concerned and then make changes to them all. Note: This bug was reported by [email protected], and the bug fix was also provided by him. The patch was reviewed by [email protected] as well as [email protected].
commit 1a769808a6e169913be66ffbabc7f278594798a7 Author: Noriko Hosoi <[email protected]> Date: Mon Jan 25 15:05:59 2010 -0800 544089 - Referential Integrity Plugin does not take into account the attribute subtypes Bug Description: Referential Integrity Plugin does not change the references in subtyped attributes like "manager;en" or "ou;19" Fix Description: The problem is in the way the function int update_integrity (char **argv, char *origDN, char *newrDN, int logChanges) in referint.c makes the changes. The initial search with the filter ldap_create_filter( filter, filtlen, "(%a=%e)", NULL, NULL, argv[i], origDN, NULL) finds the entries with attributes and with attribute subtypes. But after that when generating the necessary changes (attribute1.mod_type = argv[i] and attribute2.mod_type = argv[i]) the function takes care only of the "base" attributes listed in the plugin arguments. We should parse each found entry to find all the attribute subtypes with the value concerned and then make changes to them all. Note: This bug was reported by [email protected], and the bug fix was also provided by him. The patch was reviewed by [email protected] as well as [email protected]. diff --git a/ldap/servers/plugins/referint/referint.c b/ldap/servers/plugins/referint/referint.c index 70fccd22c..1c3698250 100644 --- a/ldap/servers/plugins/referint/referint.c +++ b/ldap/servers/plugins/referint/referint.c @@ -317,6 +317,7 @@ int update_integrity(char **argv, char *origDN, char *newrDN, int logChanges){ Slapi_Entry **search_entries = NULL; int search_result; Slapi_DN *sdn = NULL; + Slapi_Value *oldDNslv = NULL; void *node = NULL; LDAPMod attribute1, attribute2; const LDAPMod *list_of_mods[3]; @@ -338,6 +339,7 @@ int update_integrity(char **argv, char *origDN, char *newrDN, int logChanges){ goto free_and_return; } + oldDNslv = slapi_value_new_string(origDN); /* for now, just putting attributes to keep integrity on in conf file, until resolve the other timing mode issue */ @@ -355,9 +357,9 @@ int update_integrity(char **argv, char *origDN, char *newrDN, int logChanges){ if (( search_result = ldap_create_filter( filter, filtlen, "(%a=%e)", NULL, NULL, argv[i], origDN, NULL )) == LDAP_SUCCESS ) { - /* Don't need any attribute */ + /* Need only the current attribute and its subtypes */ char * attrs[2]; - attrs[0]="1.1"; + attrs[0]=argv[i]; attrs[1]=NULL; /* Use new search API */ @@ -378,76 +380,88 @@ int update_integrity(char **argv, char *origDN, char *newrDN, int logChanges){ for(j=0; search_entries[j] != NULL; j++) { - /* no matter what mode in always going to delete old dn so set that up */ - values_del[0]= origDN; - values_del[1]= NULL; - attribute1.mod_type = argv[i]; - attribute1.mod_op = LDAP_MOD_DELETE; - attribute1.mod_values = values_del; - list_of_mods[0] = &attribute1; - - if(newrDN == NULL){ - /* in delete mode so terminate list of mods cause this is the only one */ - list_of_mods[1] = NULL; - }else if(newrDN != NULL){ - /* in modrdn mode */ - - /* need to put together rdn into a dn */ - dnParts = ldap_explode_dn( origDN, 0 ); + Slapi_Attr *attr = NULL; + char *attrName = NULL; + + /* Loop over all the attributes of the entry and search for the integrity attribute and its subtypes */ + for (slapi_entry_first_attr(search_entries[j], &attr); attr; slapi_entry_next_attr(search_entries[j], attr, &attr)) + { + /* Take into account only the subtypes of the attribute in argv[i] having the necessary value - origDN */ + slapi_attr_get_type(attr, &attrName); + if ((slapi_attr_type_cmp(argv[i], attrName, SLAPI_TYPE_CMP_SUBTYPE) == 0) && + (slapi_attr_value_find(attr, slapi_value_get_berval(oldDNslv)) == 0)) + { + /* no matter what mode in always going to delete old dn so set that up */ + values_del[0]= origDN; + values_del[1]= NULL; + attribute1.mod_type = attrName; + attribute1.mod_op = LDAP_MOD_DELETE; + attribute1.mod_values = values_del; + list_of_mods[0] = &attribute1; + + if(newrDN == NULL){ + /* in delete mode so terminate list of mods cause this is the only one */ + list_of_mods[1] = NULL; + }else if(newrDN != NULL){ + /* in modrdn mode */ + + /* need to put together rdn into a dn */ + dnParts = ldap_explode_dn( origDN, 0 ); - /* skip original rdn so start at 1*/ - dnsize = 0; - for(x=1; dnParts[x] != NULL; x++) - { - /* +1 for comma adding later */ - dnsize += strlen(dnParts[x]) + 1; - } - /* add the newrDN length */ - dnsize += strlen(newrDN) + 1; - - newDN = slapi_ch_calloc(dnsize, sizeof(char)); - strcat(newDN, newrDN); - for(x=1; dnParts[x] != NULL; x++) - { - strcat(newDN, ","); - strcat(newDN, dnParts[x]); - } + /* skip original rdn so start at 1*/ + dnsize = 0; + for(x=1; dnParts[x] != NULL; x++) + { + /* +1 for comma adding later */ + dnsize += strlen(dnParts[x]) + 1; + } + /* add the newrDN length */ + dnsize += strlen(newrDN) + 1; + + newDN = slapi_ch_calloc(dnsize, sizeof(char)); + strcat(newDN, newrDN); + for(x=1; dnParts[x] != NULL; x++) + { + strcat(newDN, ","); + strcat(newDN, dnParts[x]); + } - values_add[0]=newDN; - values_add[1]=NULL; - attribute2.mod_type = argv[i]; - attribute2.mod_op = LDAP_MOD_ADD; - attribute2.mod_values = values_add; - - /* add the new dn to list of mods and terminate list of mods */ - list_of_mods[1] = &attribute2; - list_of_mods[2] = NULL; - - } - - /* try to cleanup entry */ - - /* Use new internal operation API */ - mod_result_pb=slapi_pblock_new(); - slapi_modify_internal_set_pb(mod_result_pb,slapi_entry_get_dn(search_entries[j]), - (LDAPMod **)list_of_mods,NULL,NULL,referint_plugin_identity,0); - slapi_modify_internal_pb(mod_result_pb); - - /* could check the result code here if want to log it or something later - for now, continue no matter what result is */ - - slapi_pblock_destroy(mod_result_pb); - - /* cleanup memory allocated for dnParts and newDN */ - if(dnParts != NULL){ - for(x=0; dnParts[x] != NULL; x++) - { - slapi_ch_free_string(&dnParts[x]); + values_add[0]=newDN; + values_add[1]=NULL; + attribute2.mod_type = attrName; + attribute2.mod_op = LDAP_MOD_ADD; + attribute2.mod_values = values_add; + + /* add the new dn to list of mods and terminate list of mods */ + list_of_mods[1] = &attribute2; + list_of_mods[2] = NULL; + + } + + /* try to cleanup entry */ + + /* Use new internal operation API */ + mod_result_pb=slapi_pblock_new(); + slapi_modify_internal_set_pb(mod_result_pb,slapi_entry_get_dn(search_entries[j]), + (LDAPMod **)list_of_mods,NULL,NULL,referint_plugin_identity,0); + slapi_modify_internal_pb(mod_result_pb); + + /* could check the result code here if want to log it or something later + for now, continue no matter what result is */ + + slapi_pblock_destroy(mod_result_pb); + + /* cleanup memory allocated for dnParts and newDN */ + if(dnParts != NULL){ + for(x=0; dnParts[x] != NULL; x++) + { + slapi_ch_free_string(&dnParts[x]); + } + slapi_ch_free((void **)&dnParts); + } + slapi_ch_free_string(&newDN); } - slapi_ch_free((void **)&dnParts); } - slapi_ch_free_string(&newDN); - } @@ -480,6 +494,8 @@ int update_integrity(char **argv, char *origDN, char *newrDN, int logChanges){ free_and_return: + slapi_value_free(&oldDNslv); + /* free filter and search_results_pb */ slapi_ch_free_string(&filter);
0
897b284c3095bb4e95cb239af4b2cb23de0ebbf9
389ds/389-ds-base
Resolves: bug 483254 Bug Description: Modification of nsViewFilter of a virtual view OU crashes the server Reviewed by: nhosoi, andrey.ivanov (Thanks!) Fix Description: When we delete a node, not only do we need to have the parent node discover its new children, we need to have each child discover a new parent. Platforms tested: RHEL5 Flag Day: no Doc impact: no
commit 897b284c3095bb4e95cb239af4b2cb23de0ebbf9 Author: Rich Megginson <[email protected]> Date: Thu Feb 5 15:19:01 2009 +0000 Resolves: bug 483254 Bug Description: Modification of nsViewFilter of a virtual view OU crashes the server Reviewed by: nhosoi, andrey.ivanov (Thanks!) Fix Description: When we delete a node, not only do we need to have the parent node discover its new children, we need to have each child discover a new parent. Platforms tested: RHEL5 Flag Day: no Doc impact: no diff --git a/ldap/servers/plugins/views/views.c b/ldap/servers/plugins/views/views.c index 1d1f2e823..cc3100778 100644 --- a/ldap/servers/plugins/views/views.c +++ b/ldap/servers/plugins/views/views.c @@ -131,6 +131,7 @@ static int views_dn_views_cb (Slapi_Entry* e, void *callback_data); static int views_cache_add_dn_views(char *dn, viewEntry **pViews); static void views_cache_add_ll_entry(void** attrval, void *theVal); static void views_cache_discover_parent(viewEntry *pView); +static void views_cache_discover_parent_for_children(viewEntry *pView); static void views_cache_discover_children(viewEntry *pView); static void views_cache_discover_view_scope(viewEntry *pView); static void views_cache_create_applied_filter(viewEntry *pView); @@ -658,6 +659,22 @@ static void views_cache_discover_parent(viewEntry *pView) } } +/* + views_cache_discover_parent_for_children + ------------------------------ + The current node is being deleted - for each child, need + to find a new parent +*/ +static void views_cache_discover_parent_for_children(viewEntry *pView) +{ + int ii = 0; + + for (ii = 0; pView->pChildren && (ii < pView->child_count); ++ii) + { + viewEntry *current = (viewEntry *)pView->pChildren[ii]; + views_cache_discover_parent(current); + } +} /* views_cache_discover_children @@ -1471,9 +1488,11 @@ static void views_update_views_cache( Slapi_Entry *e, char *dn, int modtype, Sla theCache.pCacheViews = (viewEntry*)(theView->list.pNext); } - /* update children */ + /* the parent of this node needs to know about its children */ if(theView->pParent) views_cache_discover_children((viewEntry*)theView->pParent); + /* each child of the deleted node will need to discover a new parent */ + views_cache_discover_parent_for_children((viewEntry*)theView); /* update filters */ for(current = theCache.pCacheViews; current != NULL; current = current->list.pNext)
0
f0005282be64c72927cd3716f4eb76e9cb1c6d67
389ds/389-ds-base
Ticket 48681 - logconv.pl lists sasl binds with no dn as anonymous Bug Description: logconv.pl incorrectly processes the SASL bind steps as individual anonymous binds. It also incorrectly counts each sasl bind step as an individual SASL bind. Fix Description: Do not count SASL bind steps as anonymous binds unless the mech ANONYMOUS is used. This also adjusts the SASL bind count to account for the bind steps. So although a SASL bind might perform three BIND ops, we only count it as one. https://fedorahosted.org/389/ticket/48681 Reviewed by: nhosoi(Thanks!)
commit f0005282be64c72927cd3716f4eb76e9cb1c6d67 Author: Mark Reynolds <[email protected]> Date: Thu Dec 8 09:37:31 2016 -0500 Ticket 48681 - logconv.pl lists sasl binds with no dn as anonymous Bug Description: logconv.pl incorrectly processes the SASL bind steps as individual anonymous binds. It also incorrectly counts each sasl bind step as an individual SASL bind. Fix Description: Do not count SASL bind steps as anonymous binds unless the mech ANONYMOUS is used. This also adjusts the SASL bind count to account for the bind steps. So although a SASL bind might perform three BIND ops, we only count it as one. https://fedorahosted.org/389/ticket/48681 Reviewed by: nhosoi(Thanks!) diff --git a/ldap/admin/src/logconv.pl b/ldap/admin/src/logconv.pl index 9b5e34bbf..96639f294 100755 --- a/ldap/admin/src/logconv.pl +++ b/ldap/admin/src/logconv.pl @@ -257,7 +257,7 @@ map {$conn{$_} = $_} @conncodes; # hash db-backed hashes my @hashnames = qw(attr rc src rsrc excount conn_hash ip_hash conncount nentries - filter base ds6xbadpwd saslmech bindlist etime oid + filter base ds6xbadpwd saslmech saslconnop bindlist etime oid start_time_of_connection end_time_of_connection notesa_conn_op notesu_conn_op etime_conn_op nentries_conn_op optype_conn_op time_conn_op srch_conn_op del_conn_op mod_conn_op @@ -2117,7 +2117,7 @@ sub parseLineNormal ($connID) = $_ =~ /conn=(\d*)\s/; handleConnClose($connID); } - if (m/ BIND/ && $_ =~ /dn=\"(.*)\" method/i ){ + if (m/ BIND/ && $_ =~ /dn=\"(.*)\" method=128/i ){ my $binddn = $1; if($reportStats){ inc_stats('bind',$s_stats,$m_stats); } $bindCount++; @@ -2534,7 +2534,28 @@ sub parseLineNormal if (/ BIND / && /method=sasl/i){ $saslBindCount++; if ($_ =~ /mech=(.*)/i ){ - $hashes->{saslmech}->{$1}++; + my $mech = $1; + $hashes->{saslmech}->{$mech}++; + my ($conn, $op); + if ($_ =~ /conn= *([0-9A-Z]+) +op= *([0-9\-]+)/i){ + $conn = $1; + $op = $2; + $hashes->{saslconnop}->{$conn-$op} = $mech; + } + } + if (/ mech=ANONYMOUS/){ + $anonymousBindCount++; + } + } + if (/ RESULT err=14 tag=97 / && / SASL bind in progress/){ + # Drop the sasl bind count since this is step in the bind process + $saslBindCount--; + my ($conn, $op); + if ($_ =~ /conn= *([0-9A-Z]+) +op= *([0-9\-]+)/i){ + $conn = $1; + $op = $2; + my $mech = $hashes->{saslconnop}->{$conn-$op}; + $hashes->{saslmech}->{$mech}--; } } if (/ conn=Internal op=-1 / && !/ RESULT err=/){ $internalOpCount++; }
0
e23f2af63da5d37c45ecc27a6b6617111516c7f3
389ds/389-ds-base
Ticket 48996 - Fix rpm to work with ns 0.2.0 Bug Description: During the update to NS 0.2.0, I didn't test the rpm build. The changes to make liblfds710.so, broke library path resolution during the build. Fix Description: Alter the nunc-stans bundle build to properly allow resolution of nunc-stans during make time. https://fedorahosted.org/389/ticket/48996 Author: wibrown Review by: nhosoi (Thanks)
commit e23f2af63da5d37c45ecc27a6b6617111516c7f3 Author: William Brown <[email protected]> Date: Thu Oct 6 12:51:48 2016 +1000 Ticket 48996 - Fix rpm to work with ns 0.2.0 Bug Description: During the update to NS 0.2.0, I didn't test the rpm build. The changes to make liblfds710.so, broke library path resolution during the build. Fix Description: Alter the nunc-stans bundle build to properly allow resolution of nunc-stans during make time. https://fedorahosted.org/389/ticket/48996 Author: wibrown Review by: nhosoi (Thanks) diff --git a/Makefile.in b/Makefile.in index cd07f40e2..f7b3c9585 100644 --- a/Makefile.in +++ b/Makefile.in @@ -1,7 +1,7 @@ -# Makefile.in generated by automake 1.15 from Makefile.am. +# Makefile.in generated by automake 1.13.4 from Makefile.am. # @configure_input@ -# Copyright (C) 1994-2014 Free Software Foundation, Inc. +# Copyright (C) 1994-2013 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, @@ -20,17 +20,7 @@ VPATH = @srcdir@ -am__is_gnu_make = { \ - if test -z '$(MAKELEVEL)'; then \ - false; \ - elif test -n '$(MAKE_HOST)'; then \ - true; \ - elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ - true; \ - else \ - false; \ - fi; \ -} +am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)' am__make_running_with_option = \ case $${target_option-} in \ ?) ;; \ @@ -103,6 +93,12 @@ noinst_PROGRAMS = makstrdb$(EXEEXT) @SOLARIS_TRUE@am__append_2 = -lrt @SOLARIS_TRUE@am__append_3 = ldap/servers/slapd/tools/ldclt/opCheck.c subdir = . +DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \ + $(top_srcdir)/configure $(am__configure_deps) \ + $(srcdir)/config.h.in $(top_srcdir)/rpm/389-ds-base.spec.in \ + depcomp $(dist_man_MANS) $(dist_noinst_DATA) \ + $(dist_noinst_HEADERS) $(serverinc_HEADERS) README compile \ + config.guess config.sub install-sh missing ltmain.sh ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ @@ -117,9 +113,6 @@ am__aclocal_m4_deps = $(top_srcdir)/m4/libtool.m4 \ $(top_srcdir)/m4/systemd.m4 $(top_srcdir)/configure.ac am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ $(ACLOCAL_M4) -DIST_COMMON = $(srcdir)/Makefile.am $(top_srcdir)/configure \ - $(am__configure_deps) $(dist_noinst_DATA) \ - $(dist_noinst_HEADERS) $(serverinc_HEADERS) $(am__DIST_COMMON) am__CONFIG_DISTCLEAN_FILES = config.status config.cache config.log \ configure.lineno config.status.lineno mkinstalldirs = $(install_sh) -d @@ -1262,10 +1255,6 @@ ETAGS = etags CTAGS = ctags CSCOPE = cscope AM_RECURSIVE_TARGETS = cscope -am__DIST_COMMON = $(dist_man_MANS) $(srcdir)/Makefile.in \ - $(srcdir)/config.h.in $(top_srcdir)/rpm/389-ds-base.spec.in \ - README compile config.guess config.sub depcomp install-sh \ - ltmain.sh missing DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) distdir = $(PACKAGE)-$(VERSION) top_distdir = $(distdir) @@ -3262,6 +3251,7 @@ $(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__confi echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign Makefile'; \ $(am__cd) $(top_srcdir) && \ $(AUTOMAKE) --foreign Makefile +.PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ *config.status*) \ @@ -3282,8 +3272,8 @@ $(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) $(am__aclocal_m4_deps): config.h: stamp-h1 - @test -f $@ || rm -f stamp-h1 - @test -f $@ || $(MAKE) $(AM_MAKEFLAGS) stamp-h1 + @if test ! -f $@; then rm -f stamp-h1; else :; fi + @if test ! -f $@; then $(MAKE) $(AM_MAKEFLAGS) stamp-h1; else :; fi stamp-h1: $(srcdir)/config.h.in $(top_builddir)/config.status @rm -f stamp-h1 @@ -10365,7 +10355,7 @@ distdir: $(DISTFILES) ! -type d ! -perm -444 -exec $(install_sh) -c -m a+r {} {} \; \ || chmod -R a+r "$(distdir)" dist-gzip: distdir - tardir=$(distdir) && $(am__tar) | eval GZIP= gzip $(GZIP_ENV) -c >$(distdir).tar.gz + tardir=$(distdir) && $(am__tar) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).tar.gz $(am__post_remove_distdir) dist-bzip2: distdir tardir=$(distdir) && $(am__tar) | BZIP2=$${BZIP2--9} bzip2 -c >$(distdir).tar.bz2 @@ -10380,17 +10370,11 @@ dist-xz: distdir $(am__post_remove_distdir) dist-tarZ: distdir - @echo WARNING: "Support for distribution archives compressed with" \ - "legacy program 'compress' is deprecated." >&2 - @echo WARNING: "It will be removed altogether in Automake 2.0" >&2 tardir=$(distdir) && $(am__tar) | compress -c >$(distdir).tar.Z $(am__post_remove_distdir) dist-shar: distdir - @echo WARNING: "Support for shar distribution archives is" \ - "deprecated." >&2 - @echo WARNING: "It will be removed altogether in Automake 2.0" >&2 - shar $(distdir) | eval GZIP= gzip $(GZIP_ENV) -c >$(distdir).shar.gz + shar $(distdir) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).shar.gz $(am__post_remove_distdir) dist-zip: distdir @@ -10408,7 +10392,7 @@ dist dist-all: distcheck: dist case '$(DIST_ARCHIVES)' in \ *.tar.gz*) \ - eval GZIP= gzip $(GZIP_ENV) -dc $(distdir).tar.gz | $(am__untar) ;;\ + GZIP=$(GZIP_ENV) gzip -dc $(distdir).tar.gz | $(am__untar) ;;\ *.tar.bz2*) \ bzip2 -dc $(distdir).tar.bz2 | $(am__untar) ;;\ *.tar.lz*) \ @@ -10418,23 +10402,22 @@ distcheck: dist *.tar.Z*) \ uncompress -c $(distdir).tar.Z | $(am__untar) ;;\ *.shar.gz*) \ - eval GZIP= gzip $(GZIP_ENV) -dc $(distdir).shar.gz | unshar ;;\ + GZIP=$(GZIP_ENV) gzip -dc $(distdir).shar.gz | unshar ;;\ *.zip*) \ unzip $(distdir).zip ;;\ esac chmod -R a-w $(distdir) chmod u+w $(distdir) - mkdir $(distdir)/_build $(distdir)/_build/sub $(distdir)/_inst + mkdir $(distdir)/_build $(distdir)/_inst chmod a-w $(distdir) test -d $(distdir)/_build || exit 0; \ dc_install_base=`$(am__cd) $(distdir)/_inst && pwd | sed -e 's,^[^:\\/]:[\\/],/,'` \ && dc_destdir="$${TMPDIR-/tmp}/am-dc-$$$$/" \ && am__cwd=`pwd` \ - && $(am__cd) $(distdir)/_build/sub \ - && ../../configure \ + && $(am__cd) $(distdir)/_build \ + && ../configure --srcdir=.. --prefix="$$dc_install_base" \ $(AM_DISTCHECK_CONFIGURE_FLAGS) \ $(DISTCHECK_CONFIGURE_FLAGS) \ - --srcdir=../.. --prefix="$$dc_install_base" \ && $(MAKE) $(AM_MAKEFLAGS) \ && $(MAKE) $(AM_MAKEFLAGS) dvi \ && $(MAKE) $(AM_MAKEFLAGS) check \ @@ -10762,8 +10745,6 @@ uninstall-man: uninstall-man1 uninstall-man8 uninstall-systemdsystemunitDATA uninstall-taskSCRIPTS \ uninstall-updateDATA uninstall-updateSCRIPTS -.PRECIOUS: Makefile - clean-local: -rm -rf dist diff --git a/aclocal.m4 b/aclocal.m4 index 8d926b8e7..58e04a09c 100644 --- a/aclocal.m4 +++ b/aclocal.m4 @@ -1,6 +1,6 @@ -# generated automatically by aclocal 1.15 -*- Autoconf -*- +# generated automatically by aclocal 1.13.4 -*- Autoconf -*- -# Copyright (C) 1996-2014 Free Software Foundation, Inc. +# Copyright (C) 1996-2013 Free Software Foundation, Inc. # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, @@ -20,7 +20,7 @@ You have another version of autoconf. It may work, but is not guaranteed to. If you have problems, you may need to regenerate the build system entirely. To do so, use the procedure documented by the package, typically 'autoreconf'.])]) -# Copyright (C) 2002-2014 Free Software Foundation, Inc. +# Copyright (C) 2002-2013 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, @@ -32,10 +32,10 @@ To do so, use the procedure documented by the package, typically 'autoreconf'.]) # generated from the m4 files accompanying Automake X.Y. # (This private macro should not be called outside this file.) AC_DEFUN([AM_AUTOMAKE_VERSION], -[am__api_version='1.15' +[am__api_version='1.13' dnl Some users find AM_AUTOMAKE_VERSION and mistake it for a way to dnl require some minimum version. Point them to the right macro. -m4_if([$1], [1.15], [], +m4_if([$1], [1.13.4], [], [AC_FATAL([Do not call $0, use AM_INIT_AUTOMAKE([$1]).])])dnl ]) @@ -51,14 +51,14 @@ m4_define([_AM_AUTOCONF_VERSION], []) # Call AM_AUTOMAKE_VERSION and AM_AUTOMAKE_VERSION so they can be traced. # This function is AC_REQUIREd by AM_INIT_AUTOMAKE. AC_DEFUN([AM_SET_CURRENT_AUTOMAKE_VERSION], -[AM_AUTOMAKE_VERSION([1.15])dnl +[AM_AUTOMAKE_VERSION([1.13.4])dnl m4_ifndef([AC_AUTOCONF_VERSION], [m4_copy([m4_PACKAGE_VERSION], [AC_AUTOCONF_VERSION])])dnl _AM_AUTOCONF_VERSION(m4_defn([AC_AUTOCONF_VERSION]))]) # Figure out how to run the assembler. -*- Autoconf -*- -# Copyright (C) 2001-2014 Free Software Foundation, Inc. +# Copyright (C) 2001-2013 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, @@ -78,7 +78,7 @@ _AM_IF_OPTION([no-dependencies],, [_AM_DEPENDENCIES([CCAS])])dnl # AM_AUX_DIR_EXPAND -*- Autoconf -*- -# Copyright (C) 2001-2014 Free Software Foundation, Inc. +# Copyright (C) 2001-2013 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, @@ -123,14 +123,15 @@ _AM_IF_OPTION([no-dependencies],, [_AM_DEPENDENCIES([CCAS])])dnl # configured tree to be moved without reconfiguration. AC_DEFUN([AM_AUX_DIR_EXPAND], -[AC_REQUIRE([AC_CONFIG_AUX_DIR_DEFAULT])dnl -# Expand $ac_aux_dir to an absolute path. -am_aux_dir=`cd "$ac_aux_dir" && pwd` +[dnl Rely on autoconf to set up CDPATH properly. +AC_PREREQ([2.50])dnl +# expand $ac_aux_dir to an absolute path +am_aux_dir=`cd $ac_aux_dir && pwd` ]) # AM_COND_IF -*- Autoconf -*- -# Copyright (C) 2008-2014 Free Software Foundation, Inc. +# Copyright (C) 2008-2013 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, @@ -167,7 +168,7 @@ fi[]dnl # AM_CONDITIONAL -*- Autoconf -*- -# Copyright (C) 1997-2014 Free Software Foundation, Inc. +# Copyright (C) 1997-2013 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, @@ -198,7 +199,7 @@ AC_CONFIG_COMMANDS_PRE( Usually this means the macro was only invoked conditionally.]]) fi])]) -# Copyright (C) 1999-2014 Free Software Foundation, Inc. +# Copyright (C) 1999-2013 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, @@ -389,7 +390,7 @@ _AM_SUBST_NOTMAKE([am__nodep])dnl # Generate code to set up dependency tracking. -*- Autoconf -*- -# Copyright (C) 1999-2014 Free Software Foundation, Inc. +# Copyright (C) 1999-2013 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, @@ -465,7 +466,7 @@ AC_DEFUN([AM_OUTPUT_DEPENDENCY_COMMANDS], # Do all the work for Automake. -*- Autoconf -*- -# Copyright (C) 1996-2014 Free Software Foundation, Inc. +# Copyright (C) 1996-2013 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, @@ -474,12 +475,6 @@ AC_DEFUN([AM_OUTPUT_DEPENDENCY_COMMANDS], # This macro actually does too much. Some checks are only needed if # your package does certain things. But this isn't really a big deal. -dnl Redefine AC_PROG_CC to automatically invoke _AM_PROG_CC_C_O. -m4_define([AC_PROG_CC], -m4_defn([AC_PROG_CC]) -[_AM_PROG_CC_C_O -]) - # AM_INIT_AUTOMAKE(PACKAGE, VERSION, [NO-DEFINE]) # AM_INIT_AUTOMAKE([OPTIONS]) # ----------------------------------------------- @@ -555,8 +550,8 @@ AC_REQUIRE([AC_PROG_MKDIR_P])dnl # <http://lists.gnu.org/archive/html/automake/2012-07/msg00001.html> # <http://lists.gnu.org/archive/html/automake/2012-07/msg00014.html> AC_SUBST([mkdir_p], ['$(MKDIR_P)']) -# We need awk for the "check" target (and possibly the TAP driver). The -# system "awk" is bad on some platforms. +# We need awk for the "check" target. The system "awk" is bad on +# some platforms. AC_REQUIRE([AC_PROG_AWK])dnl AC_REQUIRE([AC_PROG_MAKE_SET])dnl AC_REQUIRE([AM_SET_LEADING_DOT])dnl @@ -588,51 +583,6 @@ dnl macro is hooked onto _AC_COMPILER_EXEEXT early, see below. AC_CONFIG_COMMANDS_PRE(dnl [m4_provide_if([_AM_COMPILER_EXEEXT], [AM_CONDITIONAL([am__EXEEXT], [test -n "$EXEEXT"])])])dnl - -# POSIX will say in a future version that running "rm -f" with no argument -# is OK; and we want to be able to make that assumption in our Makefile -# recipes. So use an aggressive probe to check that the usage we want is -# actually supported "in the wild" to an acceptable degree. -# See automake bug#10828. -# To make any issue more visible, cause the running configure to be aborted -# by default if the 'rm' program in use doesn't match our expectations; the -# user can still override this though. -if rm -f && rm -fr && rm -rf; then : OK; else - cat >&2 <<'END' -Oops! - -Your 'rm' program seems unable to run without file operands specified -on the command line, even when the '-f' option is present. This is contrary -to the behaviour of most rm programs out there, and not conforming with -the upcoming POSIX standard: <http://austingroupbugs.net/view.php?id=542> - -Please tell [email protected] about your system, including the value -of your $PATH and any error possibly output before this message. This -can help us improve future automake versions. - -END - if test x"$ACCEPT_INFERIOR_RM_PROGRAM" = x"yes"; then - echo 'Configuration will proceed anyway, since you have set the' >&2 - echo 'ACCEPT_INFERIOR_RM_PROGRAM variable to "yes"' >&2 - echo >&2 - else - cat >&2 <<'END' -Aborting the configuration process, to ensure you take notice of the issue. - -You can download and install GNU coreutils to get an 'rm' implementation -that behaves properly: <http://www.gnu.org/software/coreutils/>. - -If you want to complete the configuration process using your problematic -'rm' anyway, export the environment variable ACCEPT_INFERIOR_RM_PROGRAM -to "yes", and re-run configure. - -END - AC_MSG_ERROR([Your 'rm' program is bad, sorry.]) - fi -fi -dnl The trailing newline in this macro's definition is deliberate, for -dnl backward compatibility and to allow trailing 'dnl'-style comments -dnl after the AM_INIT_AUTOMAKE invocation. See automake bug#16841. ]) dnl Hook into '_AC_COMPILER_EXEEXT' early to learn its expansion. Do not @@ -641,6 +591,7 @@ dnl mangled by Autoconf and run in a shell conditional statement. m4_define([_AC_COMPILER_EXEEXT], m4_defn([_AC_COMPILER_EXEEXT])[m4_provide([_AM_COMPILER_EXEEXT])]) + # When config.status generates a header, we must update the stamp-h file. # This file resides in the same directory as the config header # that is generated. The stamp files are numbered to have different names. @@ -662,7 +613,7 @@ for _am_header in $config_headers :; do done echo "timestamp for $_am_arg" >`AS_DIRNAME(["$_am_arg"])`/stamp-h[]$_am_stamp_count]) -# Copyright (C) 2001-2014 Free Software Foundation, Inc. +# Copyright (C) 2001-2013 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, @@ -673,7 +624,7 @@ echo "timestamp for $_am_arg" >`AS_DIRNAME(["$_am_arg"])`/stamp-h[]$_am_stamp_co # Define $install_sh. AC_DEFUN([AM_PROG_INSTALL_SH], [AC_REQUIRE([AM_AUX_DIR_EXPAND])dnl -if test x"${install_sh+set}" != xset; then +if test x"${install_sh}" != xset; then case $am_aux_dir in *\ * | *\ *) install_sh="\${SHELL} '$am_aux_dir/install-sh'" ;; @@ -683,7 +634,7 @@ if test x"${install_sh+set}" != xset; then fi AC_SUBST([install_sh])]) -# Copyright (C) 2003-2014 Free Software Foundation, Inc. +# Copyright (C) 2003-2013 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, @@ -705,7 +656,7 @@ AC_SUBST([am__leading_dot])]) # Add --enable-maintainer-mode option to configure. -*- Autoconf -*- # From Jim Meyering -# Copyright (C) 1996-2014 Free Software Foundation, Inc. +# Copyright (C) 1996-2013 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, @@ -740,7 +691,7 @@ AC_MSG_CHECKING([whether to enable maintainer-specific portions of Makefiles]) # Check to see how 'make' treats includes. -*- Autoconf -*- -# Copyright (C) 2001-2014 Free Software Foundation, Inc. +# Copyright (C) 2001-2013 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, @@ -788,9 +739,41 @@ AC_MSG_RESULT([$_am_result]) rm -f confinc confmf ]) +# Copyright (C) 1999-2013 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# AM_PROG_CC_C_O +# -------------- +# Like AC_PROG_CC_C_O, but changed for automake. +AC_DEFUN([AM_PROG_CC_C_O], +[AC_REQUIRE([AC_PROG_CC_C_O])dnl +AC_REQUIRE([AM_AUX_DIR_EXPAND])dnl +AC_REQUIRE_AUX_FILE([compile])dnl +# FIXME: we rely on the cache variable name because +# there is no other way. +set dummy $CC +am_cc=`echo $[2] | sed ['s/[^a-zA-Z0-9_]/_/g;s/^[0-9]/_/']` +eval am_t=\$ac_cv_prog_cc_${am_cc}_c_o +if test "$am_t" != yes; then + # Losing compiler, so override with the script. + # FIXME: It is wrong to rewrite CC. + # But if we don't then we get into trouble of one sort or another. + # A longer-term fix would be to have automake use am__CC in this case, + # and then we could set am__CC="\$(top_srcdir)/compile \$(CC)" + CC="$am_aux_dir/compile $CC" +fi +dnl Make sure AC_PROG_CC is never called again, or it will override our +dnl setting of CC. +m4_define([AC_PROG_CC], + [m4_fatal([AC_PROG_CC cannot be called after AM_PROG_CC_C_O])]) +]) + # Fake the existence of programs that GNU maintainers use. -*- Autoconf -*- -# Copyright (C) 1997-2014 Free Software Foundation, Inc. +# Copyright (C) 1997-2013 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, @@ -829,7 +812,7 @@ fi # Helper functions for option handling. -*- Autoconf -*- -# Copyright (C) 2001-2014 Free Software Foundation, Inc. +# Copyright (C) 2001-2013 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, @@ -858,54 +841,7 @@ AC_DEFUN([_AM_SET_OPTIONS], AC_DEFUN([_AM_IF_OPTION], [m4_ifset(_AM_MANGLE_OPTION([$1]), [$2], [$3])]) -# Copyright (C) 1999-2014 Free Software Foundation, Inc. -# -# This file is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# _AM_PROG_CC_C_O -# --------------- -# Like AC_PROG_CC_C_O, but changed for automake. We rewrite AC_PROG_CC -# to automatically call this. -AC_DEFUN([_AM_PROG_CC_C_O], -[AC_REQUIRE([AM_AUX_DIR_EXPAND])dnl -AC_REQUIRE_AUX_FILE([compile])dnl -AC_LANG_PUSH([C])dnl -AC_CACHE_CHECK( - [whether $CC understands -c and -o together], - [am_cv_prog_cc_c_o], - [AC_LANG_CONFTEST([AC_LANG_PROGRAM([])]) - # Make sure it works both with $CC and with simple cc. - # Following AC_PROG_CC_C_O, we do the test twice because some - # compilers refuse to overwrite an existing .o file with -o, - # though they will create one. - am_cv_prog_cc_c_o=yes - for am_i in 1 2; do - if AM_RUN_LOG([$CC -c conftest.$ac_ext -o conftest2.$ac_objext]) \ - && test -f conftest2.$ac_objext; then - : OK - else - am_cv_prog_cc_c_o=no - break - fi - done - rm -f core conftest* - unset am_i]) -if test "$am_cv_prog_cc_c_o" != yes; then - # Losing compiler, so override with the script. - # FIXME: It is wrong to rewrite CC. - # But if we don't then we get into trouble of one sort or another. - # A longer-term fix would be to have automake use am__CC in this case, - # and then we could set am__CC="\$(top_srcdir)/compile \$(CC)" - CC="$am_aux_dir/compile $CC" -fi -AC_LANG_POP([C])]) - -# For backward compatibility. -AC_DEFUN_ONCE([AM_PROG_CC_C_O], [AC_REQUIRE([AC_PROG_CC])]) - -# Copyright (C) 2001-2014 Free Software Foundation, Inc. +# Copyright (C) 2001-2013 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, @@ -924,7 +860,7 @@ AC_DEFUN([AM_RUN_LOG], # Check to make sure that the build environment is sane. -*- Autoconf -*- -# Copyright (C) 1996-2014 Free Software Foundation, Inc. +# Copyright (C) 1996-2013 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, @@ -1005,7 +941,7 @@ AC_CONFIG_COMMANDS_PRE( rm -f conftest.file ]) -# Copyright (C) 2009-2014 Free Software Foundation, Inc. +# Copyright (C) 2009-2013 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, @@ -1065,7 +1001,7 @@ AC_SUBST([AM_BACKSLASH])dnl _AM_SUBST_NOTMAKE([AM_BACKSLASH])dnl ]) -# Copyright (C) 2001-2014 Free Software Foundation, Inc. +# Copyright (C) 2001-2013 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, @@ -1093,7 +1029,7 @@ fi INSTALL_STRIP_PROGRAM="\$(install_sh) -c -s" AC_SUBST([INSTALL_STRIP_PROGRAM])]) -# Copyright (C) 2006-2014 Free Software Foundation, Inc. +# Copyright (C) 2006-2013 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, @@ -1112,7 +1048,7 @@ AC_DEFUN([AM_SUBST_NOTMAKE], [_AM_SUBST_NOTMAKE($@)]) # Check how to create a tarball. -*- Autoconf -*- -# Copyright (C) 2004-2014 Free Software Foundation, Inc. +# Copyright (C) 2004-2013 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, diff --git a/compile b/compile index a85b723c7..531136b06 100755 --- a/compile +++ b/compile @@ -3,7 +3,7 @@ scriptversion=2012-10-14.11; # UTC -# Copyright (C) 1999-2014 Free Software Foundation, Inc. +# Copyright (C) 1999-2013 Free Software Foundation, Inc. # Written by Tom Tromey <[email protected]>. # # This program is free software; you can redistribute it and/or modify diff --git a/config.guess b/config.guess index dbfb9786c..b79252d6b 100755 --- a/config.guess +++ b/config.guess @@ -1,8 +1,8 @@ #! /bin/sh # Attempt to guess a canonical system name. -# Copyright 1992-2015 Free Software Foundation, Inc. +# Copyright 1992-2013 Free Software Foundation, Inc. -timestamp='2015-01-01' +timestamp='2013-06-10' # This file is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by @@ -24,12 +24,12 @@ timestamp='2015-01-01' # program. This Exception is an additional permission under section 7 # of the GNU General Public License, version 3 ("GPLv3"). # -# Originally written by Per Bothner; maintained since 2000 by Ben Elliston. +# Originally written by Per Bothner. # # You can get the latest version of this script from: # http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess;hb=HEAD # -# Please send patches to <[email protected]>. +# Please send patches with a ChangeLog entry to [email protected]. me=`echo "$0" | sed -e 's,.*/,,'` @@ -50,7 +50,7 @@ version="\ GNU config.guess ($timestamp) Originally written by Per Bothner. -Copyright 1992-2015 Free Software Foundation, Inc. +Copyright 1992-2013 Free Software Foundation, Inc. This is free software; see the source for copying conditions. There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." @@ -149,7 +149,7 @@ Linux|GNU|GNU/*) LIBC=gnu #endif EOF - eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^LIBC' | sed 's, ,,g'` + eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^LIBC'` ;; esac @@ -579,9 +579,8 @@ EOF else IBM_ARCH=powerpc fi - if [ -x /usr/bin/lslpp ] ; then - IBM_REV=`/usr/bin/lslpp -Lqc bos.rte.libc | - awk -F: '{ print $3 }' | sed s/[0-9]*$/0/` + if [ -x /usr/bin/oslevel ] ; then + IBM_REV=`/usr/bin/oslevel` else IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE} fi @@ -827,7 +826,7 @@ EOF *:MINGW*:*) echo ${UNAME_MACHINE}-pc-mingw32 exit ;; - *:MSYS*:*) + i*:MSYS*:*) echo ${UNAME_MACHINE}-pc-msys exit ;; i*:windows32*:*) @@ -970,10 +969,10 @@ EOF eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^CPU'` test x"${CPU}" != x && { echo "${CPU}-unknown-linux-${LIBC}"; exit; } ;; - openrisc*:Linux:*:*) - echo or1k-unknown-linux-${LIBC} + or1k:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; - or32:Linux:*:* | or1k*:Linux:*:*) + or32:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; padre:Linux:*:*) @@ -1261,26 +1260,16 @@ EOF if test "$UNAME_PROCESSOR" = unknown ; then UNAME_PROCESSOR=powerpc fi - if test `echo "$UNAME_RELEASE" | sed -e 's/\..*//'` -le 10 ; then - if [ "$CC_FOR_BUILD" != 'no_compiler_found' ]; then - if (echo '#ifdef __LP64__'; echo IS_64BIT_ARCH; echo '#endif') | \ - (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | \ - grep IS_64BIT_ARCH >/dev/null - then - case $UNAME_PROCESSOR in - i386) UNAME_PROCESSOR=x86_64 ;; - powerpc) UNAME_PROCESSOR=powerpc64 ;; - esac - fi + if [ "$CC_FOR_BUILD" != 'no_compiler_found' ]; then + if (echo '#ifdef __LP64__'; echo IS_64BIT_ARCH; echo '#endif') | \ + (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | \ + grep IS_64BIT_ARCH >/dev/null + then + case $UNAME_PROCESSOR in + i386) UNAME_PROCESSOR=x86_64 ;; + powerpc) UNAME_PROCESSOR=powerpc64 ;; + esac fi - elif test "$UNAME_PROCESSOR" = i386 ; then - # Avoid executing cc on OS X 10.9, as it ships with a stub - # that puts up a graphical alert prompting to install - # developer tools. Any system running Mac OS X 10.7 or - # later (Darwin 11 and later) is required to have a 64-bit - # processor. This is not true of the ARM version of Darwin - # that Apple uses in portable devices. - UNAME_PROCESSOR=x86_64 fi echo ${UNAME_PROCESSOR}-apple-darwin${UNAME_RELEASE} exit ;; @@ -1372,6 +1361,154 @@ EOF exit ;; esac +eval $set_cc_for_build +cat >$dummy.c <<EOF +#ifdef _SEQUENT_ +# include <sys/types.h> +# include <sys/utsname.h> +#endif +main () +{ +#if defined (sony) +#if defined (MIPSEB) + /* BFD wants "bsd" instead of "newsos". Perhaps BFD should be changed, + I don't know.... */ + printf ("mips-sony-bsd\n"); exit (0); +#else +#include <sys/param.h> + printf ("m68k-sony-newsos%s\n", +#ifdef NEWSOS4 + "4" +#else + "" +#endif + ); exit (0); +#endif +#endif + +#if defined (__arm) && defined (__acorn) && defined (__unix) + printf ("arm-acorn-riscix\n"); exit (0); +#endif + +#if defined (hp300) && !defined (hpux) + printf ("m68k-hp-bsd\n"); exit (0); +#endif + +#if defined (NeXT) +#if !defined (__ARCHITECTURE__) +#define __ARCHITECTURE__ "m68k" +#endif + int version; + version=`(hostinfo | sed -n 's/.*NeXT Mach \([0-9]*\).*/\1/p') 2>/dev/null`; + if (version < 4) + printf ("%s-next-nextstep%d\n", __ARCHITECTURE__, version); + else + printf ("%s-next-openstep%d\n", __ARCHITECTURE__, version); + exit (0); +#endif + +#if defined (MULTIMAX) || defined (n16) +#if defined (UMAXV) + printf ("ns32k-encore-sysv\n"); exit (0); +#else +#if defined (CMU) + printf ("ns32k-encore-mach\n"); exit (0); +#else + printf ("ns32k-encore-bsd\n"); exit (0); +#endif +#endif +#endif + +#if defined (__386BSD__) + printf ("i386-pc-bsd\n"); exit (0); +#endif + +#if defined (sequent) +#if defined (i386) + printf ("i386-sequent-dynix\n"); exit (0); +#endif +#if defined (ns32000) + printf ("ns32k-sequent-dynix\n"); exit (0); +#endif +#endif + +#if defined (_SEQUENT_) + struct utsname un; + + uname(&un); + + if (strncmp(un.version, "V2", 2) == 0) { + printf ("i386-sequent-ptx2\n"); exit (0); + } + if (strncmp(un.version, "V1", 2) == 0) { /* XXX is V1 correct? */ + printf ("i386-sequent-ptx1\n"); exit (0); + } + printf ("i386-sequent-ptx\n"); exit (0); + +#endif + +#if defined (vax) +# if !defined (ultrix) +# include <sys/param.h> +# if defined (BSD) +# if BSD == 43 + printf ("vax-dec-bsd4.3\n"); exit (0); +# else +# if BSD == 199006 + printf ("vax-dec-bsd4.3reno\n"); exit (0); +# else + printf ("vax-dec-bsd\n"); exit (0); +# endif +# endif +# else + printf ("vax-dec-bsd\n"); exit (0); +# endif +# else + printf ("vax-dec-ultrix\n"); exit (0); +# endif +#endif + +#if defined (alliant) && defined (i860) + printf ("i860-alliant-bsd\n"); exit (0); +#endif + + exit (1); +} +EOF + +$CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null && SYSTEM_NAME=`$dummy` && + { echo "$SYSTEM_NAME"; exit; } + +# Apollos put the system type in the environment. + +test -d /usr/apollo && { echo ${ISP}-apollo-${SYSTYPE}; exit; } + +# Convex versions that predate uname can use getsysinfo(1) + +if [ -x /usr/convex/getsysinfo ] +then + case `getsysinfo -f cpu_type` in + c1*) + echo c1-convex-bsd + exit ;; + c2*) + if getsysinfo -f scalar_acc + then echo c32-convex-bsd + else echo c2-convex-bsd + fi + exit ;; + c34*) + echo c34-convex-bsd + exit ;; + c38*) + echo c38-convex-bsd + exit ;; + c4*) + echo c4-convex-bsd + exit ;; + esac +fi + cat >&2 <<EOF $0: unable to guess system type diff --git a/config.h.in b/config.h.in index 1b549f7a4..ea9369bb4 100644 --- a/config.h.in +++ b/config.h.in @@ -357,6 +357,9 @@ /* no getdomainname */ #undef NO_DOMAINNAME +/* Define to 1 if your C compiler doesn't accept -c and -o together. */ +#undef NO_MINUS_C_MINUS_O + /* OS version */ #undef OSVERSION diff --git a/config.sub b/config.sub index 6467c95af..c765b34b7 100755 --- a/config.sub +++ b/config.sub @@ -1,8 +1,8 @@ #! /bin/sh # Configuration validation subroutine script. -# Copyright 1992-2015 Free Software Foundation, Inc. +# Copyright 1992-2013 Free Software Foundation, Inc. -timestamp='2015-01-01' +timestamp='2013-04-24' # This file is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by @@ -25,7 +25,7 @@ timestamp='2015-01-01' # of the GNU General Public License, version 3 ("GPLv3"). -# Please send patches to <[email protected]>. +# Please send patches with a ChangeLog entry to [email protected]. # # Configuration subroutine to validate and canonicalize a configuration type. # Supply the specified configuration type as an argument. @@ -68,7 +68,7 @@ Report bugs and patches to <[email protected]>." version="\ GNU config.sub ($timestamp) -Copyright 1992-2015 Free Software Foundation, Inc. +Copyright 1992-2013 Free Software Foundation, Inc. This is free software; see the source for copying conditions. There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." @@ -257,15 +257,14 @@ case $basic_machine in | avr | avr32 \ | be32 | be64 \ | bfin \ - | c4x | c8051 | clipper \ + | c4x | clipper \ | d10v | d30v | dlx | dsp16xx \ | epiphany \ - | fido | fr30 | frv | ft32 \ + | fido | fr30 | frv \ | h8300 | h8500 | hppa | hppa1.[01] | hppa2.0 | hppa2.0[nw] | hppa64 \ | hexagon \ | i370 | i860 | i960 | ia64 \ | ip2k | iq2000 \ - | k1om \ | le32 | le64 \ | lm32 \ | m32c | m32r | m32rle | m68000 | m68k | m88k \ @@ -283,10 +282,8 @@ case $basic_machine in | mips64vr5900 | mips64vr5900el \ | mipsisa32 | mipsisa32el \ | mipsisa32r2 | mipsisa32r2el \ - | mipsisa32r6 | mipsisa32r6el \ | mipsisa64 | mipsisa64el \ | mipsisa64r2 | mipsisa64r2el \ - | mipsisa64r6 | mipsisa64r6el \ | mipsisa64sb1 | mipsisa64sb1el \ | mipsisa64sr71k | mipsisa64sr71kel \ | mipsr5900 | mipsr5900el \ @@ -298,11 +295,11 @@ case $basic_machine in | nds32 | nds32le | nds32be \ | nios | nios2 | nios2eb | nios2el \ | ns16k | ns32k \ - | open8 | or1k | or1knd | or32 \ + | open8 \ + | or1k | or32 \ | pdp10 | pdp11 | pj | pjl \ | powerpc | powerpc64 | powerpc64le | powerpcle \ | pyramid \ - | riscv32 | riscv64 \ | rl78 | rx \ | score \ | sh | sh[1234] | sh[24]a | sh[24]aeb | sh[23]e | sh[34]eb | sheb | shbe | shle | sh[1234]le | sh3ele \ @@ -313,7 +310,6 @@ case $basic_machine in | tahoe | tic4x | tic54x | tic55x | tic6x | tic80 | tron \ | ubicom32 \ | v850 | v850e | v850e1 | v850e2 | v850es | v850e2v3 \ - | visium \ | we32k \ | x86 | xc16x | xstormy16 | xtensa \ | z8k | z80) @@ -328,10 +324,7 @@ case $basic_machine in c6x) basic_machine=tic6x-unknown ;; - leon|leon[3-9]) - basic_machine=sparc-$basic_machine - ;; - m6811 | m68hc11 | m6812 | m68hc12 | m68hcs12x | nvptx | picochip) + m6811 | m68hc11 | m6812 | m68hc12 | m68hcs12x | picochip) basic_machine=$basic_machine-unknown os=-none ;; @@ -379,7 +372,7 @@ case $basic_machine in | be32-* | be64-* \ | bfin-* | bs2000-* \ | c[123]* | c30-* | [cjt]90-* | c4x-* \ - | c8051-* | clipper-* | craynv-* | cydra-* \ + | clipper-* | craynv-* | cydra-* \ | d10v-* | d30v-* | dlx-* \ | elxsi-* \ | f30[01]-* | f700-* | fido-* | fr30-* | frv-* | fx80-* \ @@ -388,7 +381,6 @@ case $basic_machine in | hexagon-* \ | i*86-* | i860-* | i960-* | ia64-* \ | ip2k-* | iq2000-* \ - | k1om-* \ | le32-* | le64-* \ | lm32-* \ | m32c-* | m32r-* | m32rle-* \ @@ -408,10 +400,8 @@ case $basic_machine in | mips64vr5900-* | mips64vr5900el-* \ | mipsisa32-* | mipsisa32el-* \ | mipsisa32r2-* | mipsisa32r2el-* \ - | mipsisa32r6-* | mipsisa32r6el-* \ | mipsisa64-* | mipsisa64el-* \ | mipsisa64r2-* | mipsisa64r2el-* \ - | mipsisa64r6-* | mipsisa64r6el-* \ | mipsisa64sb1-* | mipsisa64sb1el-* \ | mipsisa64sr71k-* | mipsisa64sr71kel-* \ | mipsr5900-* | mipsr5900el-* \ @@ -423,7 +413,6 @@ case $basic_machine in | nios-* | nios2-* | nios2eb-* | nios2el-* \ | none-* | np1-* | ns16k-* | ns32k-* \ | open8-* \ - | or1k*-* \ | orion-* \ | pdp10-* | pdp11-* | pj-* | pjl-* | pn-* | power-* \ | powerpc-* | powerpc64-* | powerpc64le-* | powerpcle-* \ @@ -441,7 +430,6 @@ case $basic_machine in | ubicom32-* \ | v850-* | v850e-* | v850e1-* | v850es-* | v850e2-* | v850e2v3-* \ | vax-* \ - | visium-* \ | we32k-* \ | x86-* | x86_64-* | xc16x-* | xps100-* \ | xstormy16-* | xtensa*-* \ @@ -779,9 +767,6 @@ case $basic_machine in basic_machine=m68k-isi os=-sysv ;; - leon-*|leon[3-9]-*) - basic_machine=sparc-`echo $basic_machine | sed 's/-.*//'` - ;; m68knommu) basic_machine=m68k-unknown os=-linux @@ -809,7 +794,7 @@ case $basic_machine in os=-mingw64 ;; mingw32) - basic_machine=i686-pc + basic_machine=i386-pc os=-mingw32 ;; mingw32ce) @@ -837,10 +822,6 @@ case $basic_machine in basic_machine=powerpc-unknown os=-morphos ;; - moxiebox) - basic_machine=moxie-unknown - os=-moxiebox - ;; msdos) basic_machine=i386-pc os=-msdos @@ -849,7 +830,7 @@ case $basic_machine in basic_machine=`echo $basic_machine | sed -e 's/ms1-/mt-/'` ;; msys) - basic_machine=i686-pc + basic_machine=i386-pc os=-msys ;; mvs) @@ -1386,14 +1367,14 @@ case $os in | -cygwin* | -msys* | -pe* | -psos* | -moss* | -proelf* | -rtems* \ | -mingw32* | -mingw64* | -linux-gnu* | -linux-android* \ | -linux-newlib* | -linux-musl* | -linux-uclibc* \ - | -uxpv* | -beos* | -mpeix* | -udk* | -moxiebox* \ + | -uxpv* | -beos* | -mpeix* | -udk* \ | -interix* | -uwin* | -mks* | -rhapsody* | -darwin* | -opened* \ | -openstep* | -oskit* | -conix* | -pw32* | -nonstopux* \ | -storm-chaos* | -tops10* | -tenex* | -tops20* | -its* \ | -os2* | -vos* | -palmos* | -uclinux* | -nucleus* \ | -morphos* | -superux* | -rtmk* | -rtmk-nova* | -windiss* \ | -powermax* | -dnix* | -nx6 | -nx7 | -sei* | -dragonfly* \ - | -skyos* | -haiku* | -rdos* | -toppers* | -drops* | -es* | -tirtos*) + | -skyos* | -haiku* | -rdos* | -toppers* | -drops* | -es*) # Remember, each alternative MUST END IN *, to match a version number. ;; -qnx*) @@ -1565,9 +1546,6 @@ case $basic_machine in c4x-* | tic4x-*) os=-coff ;; - c8051-*) - os=-elf - ;; hexagon-*) os=-elf ;; @@ -1611,6 +1589,9 @@ case $basic_machine in mips*-*) os=-elf ;; + or1k-*) + os=-elf + ;; or32-*) os=-coff ;; diff --git a/configure b/configure index 8fbd53041..1290d52dc 100755 --- a/configure +++ b/configure @@ -2807,7 +2807,7 @@ cat >>confdefs.h <<_ACEOF #define DS_PACKAGE_STRING "$PACKAGE_STRING" _ACEOF -am__api_version='1.15' +am__api_version='1.13' ac_aux_dir= for ac_dir in "$srcdir" "$srcdir/.." "$srcdir/../.."; do @@ -3008,8 +3008,8 @@ test "$program_suffix" != NONE && ac_script='s/[\\$]/&&/g;s/;s,x,x,$//' program_transform_name=`$as_echo "$program_transform_name" | sed "$ac_script"` -# Expand $ac_aux_dir to an absolute path. -am_aux_dir=`cd "$ac_aux_dir" && pwd` +# expand $ac_aux_dir to an absolute path +am_aux_dir=`cd $ac_aux_dir && pwd` if test x"${MISSING+set}" != xset; then case $am_aux_dir in @@ -3028,7 +3028,7 @@ else $as_echo "$as_me: WARNING: 'missing' script is too old or missing" >&2;} fi -if test x"${install_sh+set}" != xset; then +if test x"${install_sh}" != xset; then case $am_aux_dir in *\ * | *\ *) install_sh="\${SHELL} '$am_aux_dir/install-sh'" ;; @@ -3347,8 +3347,8 @@ MAKEINFO=${MAKEINFO-"${am_missing_run}makeinfo"} # <http://lists.gnu.org/archive/html/automake/2012-07/msg00014.html> mkdir_p='$(MKDIR_P)' -# We need awk for the "check" target (and possibly the TAP driver). The -# system "awk" is bad on some platforms. +# We need awk for the "check" target. The system "awk" is bad on +# some platforms. # Always define AMTAR for backward compatibility. Yes, it's still used # in the wild :-( We should find a proper way to deprecate it ... AMTAR='$${TAR-tar}' @@ -3449,48 +3449,6 @@ $as_echo "$am_cv_prog_tar_pax" >&6; } -# POSIX will say in a future version that running "rm -f" with no argument -# is OK; and we want to be able to make that assumption in our Makefile -# recipes. So use an aggressive probe to check that the usage we want is -# actually supported "in the wild" to an acceptable degree. -# See automake bug#10828. -# To make any issue more visible, cause the running configure to be aborted -# by default if the 'rm' program in use doesn't match our expectations; the -# user can still override this though. -if rm -f && rm -fr && rm -rf; then : OK; else - cat >&2 <<'END' -Oops! - -Your 'rm' program seems unable to run without file operands specified -on the command line, even when the '-f' option is present. This is contrary -to the behaviour of most rm programs out there, and not conforming with -the upcoming POSIX standard: <http://austingroupbugs.net/view.php?id=542> - -Please tell [email protected] about your system, including the value -of your $PATH and any error possibly output before this message. This -can help us improve future automake versions. - -END - if test x"$ACCEPT_INFERIOR_RM_PROGRAM" = x"yes"; then - echo 'Configuration will proceed anyway, since you have set the' >&2 - echo 'ACCEPT_INFERIOR_RM_PROGRAM variable to "yes"' >&2 - echo >&2 - else - cat >&2 <<'END' -Aborting the configuration process, to ensure you take notice of the issue. - -You can download and install GNU coreutils to get an 'rm' implementation -that behaves properly: <http://www.gnu.org/software/coreutils/>. - -If you want to complete the configuration process using your problematic -'rm' anyway, export the environment variable ACCEPT_INFERIOR_RM_PROGRAM -to "yes", and re-run configure. - -END - as_fn_error $? "Your 'rm' program is bad, sorry." "$LINENO" 5 - fi -fi - # define these for automake distdir VERSION=$PACKAGE_VERSION PACKAGE=$PACKAGE_TARNAME @@ -4841,65 +4799,6 @@ ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu -ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC understands -c and -o together" >&5 -$as_echo_n "checking whether $CC understands -c and -o together... " >&6; } -if ${am_cv_prog_cc_c_o+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -int -main () -{ - - ; - return 0; -} -_ACEOF - # Make sure it works both with $CC and with simple cc. - # Following AC_PROG_CC_C_O, we do the test twice because some - # compilers refuse to overwrite an existing .o file with -o, - # though they will create one. - am_cv_prog_cc_c_o=yes - for am_i in 1 2; do - if { echo "$as_me:$LINENO: $CC -c conftest.$ac_ext -o conftest2.$ac_objext" >&5 - ($CC -c conftest.$ac_ext -o conftest2.$ac_objext) >&5 2>&5 - ac_status=$? - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } \ - && test -f conftest2.$ac_objext; then - : OK - else - am_cv_prog_cc_c_o=no - break - fi - done - rm -f core conftest* - unset am_i -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_prog_cc_c_o" >&5 -$as_echo "$am_cv_prog_cc_c_o" >&6; } -if test "$am_cv_prog_cc_c_o" != yes; then - # Losing compiler, so override with the script. - # FIXME: It is wrong to rewrite CC. - # But if we don't then we get into trouble of one sort or another. - # A longer-term fix would be to have automake use am__CC in this case, - # and then we could set am__CC="\$(top_srcdir)/compile \$(CC)" - CC="$am_aux_dir/compile $CC" -fi -ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu - - depcc="$CC" am_compiler_list= { $as_echo "$as_me:${as_lineno-$LINENO}: checking dependency style of $depcc" >&5 @@ -5028,6 +4927,131 @@ else fi +if test "x$CC" != xcc; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC and cc understand -c and -o together" >&5 +$as_echo_n "checking whether $CC and cc understand -c and -o together... " >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether cc understands -c and -o together" >&5 +$as_echo_n "checking whether cc understands -c and -o together... " >&6; } +fi +set dummy $CC; ac_cc=`$as_echo "$2" | + sed 's/[^a-zA-Z0-9_]/_/g;s/^[0-9]/_/'` +if eval \${ac_cv_prog_cc_${ac_cc}_c_o+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +# Make sure it works both with $CC and with simple cc. +# We do the test twice because some compilers refuse to overwrite an +# existing .o file with -o, though they will create one. +ac_try='$CC -c conftest.$ac_ext -o conftest2.$ac_objext >&5' +rm -f conftest2.* +if { { case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_try") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && + test -f conftest2.$ac_objext && { { case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_try") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; +then + eval ac_cv_prog_cc_${ac_cc}_c_o=yes + if test "x$CC" != xcc; then + # Test first that cc exists at all. + if { ac_try='cc -c conftest.$ac_ext >&5' + { { case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_try") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; }; then + ac_try='cc -c conftest.$ac_ext -o conftest2.$ac_objext >&5' + rm -f conftest2.* + if { { case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_try") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && + test -f conftest2.$ac_objext && { { case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_try") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; + then + # cc works too. + : + else + # cc exists but doesn't like -o. + eval ac_cv_prog_cc_${ac_cc}_c_o=no + fi + fi + fi +else + eval ac_cv_prog_cc_${ac_cc}_c_o=no +fi +rm -f core conftest* + +fi +if eval test \$ac_cv_prog_cc_${ac_cc}_c_o = yes; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + +$as_echo "#define NO_MINUS_C_MINUS_O 1" >>confdefs.h + +fi + +# FIXME: we rely on the cache variable name because +# there is no other way. +set dummy $CC +am_cc=`echo $2 | sed 's/[^a-zA-Z0-9_]/_/g;s/^[0-9]/_/'` +eval am_t=\$ac_cv_prog_cc_${am_cc}_c_o +if test "$am_t" != yes; then + # Losing compiler, so override with the script. + # FIXME: It is wrong to rewrite CC. + # But if we don't then we get into trouble of one sort or another. + # A longer-term fix would be to have automake use am__CC in this case, + # and then we could set am__CC="\$(top_srcdir)/compile \$(CC)" + CC="$am_aux_dir/compile $CC" +fi + # By default we simply use the C compiler to build assembly code. @@ -5162,6 +5186,299 @@ else fi + case $ac_cv_prog_cc_stdc in #( + no) : + ac_cv_prog_cc_c99=no; ac_cv_prog_cc_c89=no ;; #( + *) : + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $CC option to accept ISO C99" >&5 +$as_echo_n "checking for $CC option to accept ISO C99... " >&6; } +if ${ac_cv_prog_cc_c99+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_cv_prog_cc_c99=no +ac_save_CC=$CC +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include <stdarg.h> +#include <stdbool.h> +#include <stdlib.h> +#include <wchar.h> +#include <stdio.h> + +// Check varargs macros. These examples are taken from C99 6.10.3.5. +#define debug(...) fprintf (stderr, __VA_ARGS__) +#define showlist(...) puts (#__VA_ARGS__) +#define report(test,...) ((test) ? puts (#test) : printf (__VA_ARGS__)) +static void +test_varargs_macros (void) +{ + int x = 1234; + int y = 5678; + debug ("Flag"); + debug ("X = %d\n", x); + showlist (The first, second, and third items.); + report (x>y, "x is %d but y is %d", x, y); +} + +// Check long long types. +#define BIG64 18446744073709551615ull +#define BIG32 4294967295ul +#define BIG_OK (BIG64 / BIG32 == 4294967297ull && BIG64 % BIG32 == 0) +#if !BIG_OK + your preprocessor is broken; +#endif +#if BIG_OK +#else + your preprocessor is broken; +#endif +static long long int bignum = -9223372036854775807LL; +static unsigned long long int ubignum = BIG64; + +struct incomplete_array +{ + int datasize; + double data[]; +}; + +struct named_init { + int number; + const wchar_t *name; + double average; +}; + +typedef const char *ccp; + +static inline int +test_restrict (ccp restrict text) +{ + // See if C++-style comments work. + // Iterate through items via the restricted pointer. + // Also check for declarations in for loops. + for (unsigned int i = 0; *(text+i) != '\0'; ++i) + continue; + return 0; +} + +// Check varargs and va_copy. +static void +test_varargs (const char *format, ...) +{ + va_list args; + va_start (args, format); + va_list args_copy; + va_copy (args_copy, args); + + const char *str; + int number; + float fnumber; + + while (*format) + { + switch (*format++) + { + case 's': // string + str = va_arg (args_copy, const char *); + break; + case 'd': // int + number = va_arg (args_copy, int); + break; + case 'f': // float + fnumber = va_arg (args_copy, double); + break; + default: + break; + } + } + va_end (args_copy); + va_end (args); +} + +int +main () +{ + + // Check bool. + _Bool success = false; + + // Check restrict. + if (test_restrict ("String literal") == 0) + success = true; + char *restrict newvar = "Another string"; + + // Check varargs. + test_varargs ("s, d' f .", "string", 65, 34.234); + test_varargs_macros (); + + // Check flexible array members. + struct incomplete_array *ia = + malloc (sizeof (struct incomplete_array) + (sizeof (double) * 10)); + ia->datasize = 10; + for (int i = 0; i < ia->datasize; ++i) + ia->data[i] = i * 1.234; + + // Check named initializers. + struct named_init ni = { + .number = 34, + .name = L"Test wide string", + .average = 543.34343, + }; + + ni.number = 58; + + int dynamic_array[ni.number]; + dynamic_array[ni.number - 1] = 543; + + // work around unused variable warnings + return (!success || bignum == 0LL || ubignum == 0uLL || newvar[0] == 'x' + || dynamic_array[ni.number - 1] != 543); + + ; + return 0; +} +_ACEOF +for ac_arg in '' -std=gnu99 -std=c99 -c99 -AC99 -D_STDC_C99= -qlanglvl=extc99 +do + CC="$ac_save_CC $ac_arg" + if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_prog_cc_c99=$ac_arg +fi +rm -f core conftest.err conftest.$ac_objext + test "x$ac_cv_prog_cc_c99" != "xno" && break +done +rm -f conftest.$ac_ext +CC=$ac_save_CC + +fi +# AC_CACHE_VAL +case "x$ac_cv_prog_cc_c99" in + x) + { $as_echo "$as_me:${as_lineno-$LINENO}: result: none needed" >&5 +$as_echo "none needed" >&6; } ;; + xno) + { $as_echo "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5 +$as_echo "unsupported" >&6; } ;; + *) + CC="$CC $ac_cv_prog_cc_c99" + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c99" >&5 +$as_echo "$ac_cv_prog_cc_c99" >&6; } ;; +esac +if test "x$ac_cv_prog_cc_c99" != xno; then : + ac_cv_prog_cc_stdc=$ac_cv_prog_cc_c99 +else + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $CC option to accept ISO C89" >&5 +$as_echo_n "checking for $CC option to accept ISO C89... " >&6; } +if ${ac_cv_prog_cc_c89+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_cv_prog_cc_c89=no +ac_save_CC=$CC +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include <stdarg.h> +#include <stdio.h> +struct stat; +/* Most of the following tests are stolen from RCS 5.7's src/conf.sh. */ +struct buf { int x; }; +FILE * (*rcsopen) (struct buf *, struct stat *, int); +static char *e (p, i) + char **p; + int i; +{ + return p[i]; +} +static char *f (char * (*g) (char **, int), char **p, ...) +{ + char *s; + va_list v; + va_start (v,p); + s = g (p, va_arg (v,int)); + va_end (v); + return s; +} + +/* OSF 4.0 Compaq cc is some sort of almost-ANSI by default. It has + function prototypes and stuff, but not '\xHH' hex character constants. + These don't provoke an error unfortunately, instead are silently treated + as 'x'. The following induces an error, until -std is added to get + proper ANSI mode. Curiously '\x00'!='x' always comes out true, for an + array size at least. It's necessary to write '\x00'==0 to get something + that's true only with -std. */ +int osf4_cc_array ['\x00' == 0 ? 1 : -1]; + +/* IBM C 6 for AIX is almost-ANSI by default, but it replaces macro parameters + inside strings and character constants. */ +#define FOO(x) 'x' +int xlc6_cc_array[FOO(a) == 'x' ? 1 : -1]; + +int test (int i, double x); +struct s1 {int (*f) (int a);}; +struct s2 {int (*f) (double a);}; +int pairnames (int, char **, FILE *(*)(struct buf *, struct stat *, int), int, int); +int argc; +char **argv; +int +main () +{ +return f (e, argv, 0) != argv[0] || f (e, argv, 1) != argv[1]; + ; + return 0; +} +_ACEOF +for ac_arg in '' -qlanglvl=extc89 -qlanglvl=ansi -std \ + -Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__" +do + CC="$ac_save_CC $ac_arg" + if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_prog_cc_c89=$ac_arg +fi +rm -f core conftest.err conftest.$ac_objext + test "x$ac_cv_prog_cc_c89" != "xno" && break +done +rm -f conftest.$ac_ext +CC=$ac_save_CC + +fi +# AC_CACHE_VAL +case "x$ac_cv_prog_cc_c89" in + x) + { $as_echo "$as_me:${as_lineno-$LINENO}: result: none needed" >&5 +$as_echo "none needed" >&6; } ;; + xno) + { $as_echo "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5 +$as_echo "unsupported" >&6; } ;; + *) + CC="$CC $ac_cv_prog_cc_c89" + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c89" >&5 +$as_echo "$ac_cv_prog_cc_c89" >&6; } ;; +esac +if test "x$ac_cv_prog_cc_c89" != xno; then : + ac_cv_prog_cc_stdc=$ac_cv_prog_cc_c89 +else + ac_cv_prog_cc_stdc=no +fi + +fi + ;; +esac + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $CC option to accept ISO Standard C" >&5 +$as_echo_n "checking for $CC option to accept ISO Standard C... " >&6; } + if ${ac_cv_prog_cc_stdc+:} false; then : + $as_echo_n "(cached) " >&6 +fi + + case $ac_cv_prog_cc_stdc in #( + no) : + { $as_echo "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5 +$as_echo "unsupported" >&6; } ;; #( + '') : + { $as_echo "$as_me:${as_lineno-$LINENO}: result: none needed" >&5 +$as_echo "none needed" >&6; } ;; #( + *) : + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_stdc" >&5 +$as_echo "$ac_cv_prog_cc_stdc" >&6; } ;; +esac + + # disable static libs by default - we only use a couple # Check whether --enable-static was given. if test "${enable_static+set}" = set; then : @@ -7533,7 +7850,7 @@ ia64-*-hpux*) rm -rf conftest* ;; -x86_64-*kfreebsd*-gnu|x86_64-*linux*|powerpc*-*linux*| \ +x86_64-*kfreebsd*-gnu|x86_64-*linux*|ppc*-*linux*|powerpc*-*linux*| \ s390*-*linux*|s390*-*tpf*|sparc*-*linux*) # Find out which ABI we are using. echo 'int i;' > conftest.$ac_ext @@ -7551,10 +7868,7 @@ s390*-*linux*|s390*-*tpf*|sparc*-*linux*) x86_64-*linux*) LD="${LD-ld} -m elf_i386" ;; - powerpc64le-*linux*) - LD="${LD-ld} -m elf32lppclinux" - ;; - powerpc64-*linux*) + ppc64-*linux*|powerpc64-*linux*) LD="${LD-ld} -m elf32ppclinux" ;; s390x-*linux*) @@ -7573,10 +7887,7 @@ s390*-*linux*|s390*-*tpf*|sparc*-*linux*) x86_64-*linux*) LD="${LD-ld} -m elf_x86_64" ;; - powerpcle-*linux*) - LD="${LD-ld} -m elf64lppc" - ;; - powerpc-*linux*) + ppc*-*linux*|powerpc*-*linux*) LD="${LD-ld} -m elf64ppc" ;; s390*-*linux*|s390*-*tpf*) diff --git a/configure.ac b/configure.ac index 3e0f8aa76..431e1c603 100644 --- a/configure.ac +++ b/configure.ac @@ -29,6 +29,8 @@ AC_PROG_CXX AC_PROG_CC AM_PROG_CC_C_O AM_PROG_AS +AC_PROG_CC_STDC + # disable static libs by default - we only use a couple AC_DISABLE_STATIC AC_PROG_LIBTOOL diff --git a/depcomp b/depcomp index fc98710e2..4ebd5b3a2 100755 --- a/depcomp +++ b/depcomp @@ -3,7 +3,7 @@ scriptversion=2013-05-30.07; # UTC -# Copyright (C) 1999-2014 Free Software Foundation, Inc. +# Copyright (C) 1999-2013 Free Software Foundation, Inc. # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by diff --git a/install-sh b/install-sh index 0b0fdcbba..377bb8687 100755 --- a/install-sh +++ b/install-sh @@ -1,7 +1,7 @@ #!/bin/sh # install - install a program, script, or datafile -scriptversion=2013-12-25.23; # UTC +scriptversion=2011-11-20.07; # UTC # This originates from X11R5 (mit/util/scripts/install.sh), which was # later released in X11R6 (xc/config/util/install.sh) with the @@ -41,15 +41,19 @@ scriptversion=2013-12-25.23; # UTC # This script is compatible with the BSD install script, but was written # from scratch. -tab=' ' nl=' ' -IFS=" $tab$nl" +IFS=" "" $nl" -# Set DOITPROG to "echo" to test this script. +# set DOITPROG to echo to test this script +# Don't use :- since 4.3BSD and earlier shells don't like it. doit=${DOITPROG-} -doit_exec=${doit:-exec} +if test -z "$doit"; then + doit_exec=exec +else + doit_exec=$doit +fi # Put in absolute file names if you don't have them in your path; # or use environment vars. @@ -64,6 +68,17 @@ mvprog=${MVPROG-mv} rmprog=${RMPROG-rm} stripprog=${STRIPPROG-strip} +posix_glob='?' +initialize_posix_glob=' + test "$posix_glob" != "?" || { + if (set -f) 2>/dev/null; then + posix_glob= + else + posix_glob=: + fi + } +' + posix_mkdir= # Desired mode of installed file. @@ -82,7 +97,7 @@ dir_arg= dst_arg= copy_on_change=false -is_target_a_directory=possibly +no_target_directory= usage="\ Usage: $0 [OPTION]... [-T] SRCFILE DSTFILE @@ -122,57 +137,46 @@ while test $# -ne 0; do -d) dir_arg=true;; -g) chgrpcmd="$chgrpprog $2" - shift;; + shift;; --help) echo "$usage"; exit $?;; -m) mode=$2 - case $mode in - *' '* | *"$tab"* | *"$nl"* | *'*'* | *'?'* | *'['*) - echo "$0: invalid mode: $mode" >&2 - exit 1;; - esac - shift;; + case $mode in + *' '* | *' '* | *' +'* | *'*'* | *'?'* | *'['*) + echo "$0: invalid mode: $mode" >&2 + exit 1;; + esac + shift;; -o) chowncmd="$chownprog $2" - shift;; + shift;; -s) stripcmd=$stripprog;; - -t) - is_target_a_directory=always - dst_arg=$2 - # Protect names problematic for 'test' and other utilities. - case $dst_arg in - -* | [=\(\)!]) dst_arg=./$dst_arg;; - esac - shift;; + -t) dst_arg=$2 + # Protect names problematic for 'test' and other utilities. + case $dst_arg in + -* | [=\(\)!]) dst_arg=./$dst_arg;; + esac + shift;; - -T) is_target_a_directory=never;; + -T) no_target_directory=true;; --version) echo "$0 $scriptversion"; exit $?;; - --) shift - break;; + --) shift + break;; - -*) echo "$0: invalid option: $1" >&2 - exit 1;; + -*) echo "$0: invalid option: $1" >&2 + exit 1;; *) break;; esac shift done -# We allow the use of options -d and -T together, by making -d -# take the precedence; this is for compatibility with GNU install. - -if test -n "$dir_arg"; then - if test -n "$dst_arg"; then - echo "$0: target directory not allowed when installing a directory." >&2 - exit 1 - fi -fi - if test $# -ne 0 && test -z "$dir_arg$dst_arg"; then # When -d is used, all remaining arguments are directories to create. # When -t is used, the destination is already specified. @@ -203,15 +207,6 @@ if test $# -eq 0; then exit 0 fi -if test -z "$dir_arg"; then - if test $# -gt 1 || test "$is_target_a_directory" = always; then - if test ! -d "$dst_arg"; then - echo "$0: $dst_arg: Is not a directory." >&2 - exit 1 - fi - fi -fi - if test -z "$dir_arg"; then do_exit='(exit $ret); exit $ret' trap "ret=129; $do_exit" 1 @@ -228,16 +223,16 @@ if test -z "$dir_arg"; then *[0-7]) if test -z "$stripcmd"; then - u_plus_rw= + u_plus_rw= else - u_plus_rw='% 200' + u_plus_rw='% 200' fi cp_umask=`expr '(' 777 - $mode % 1000 ')' $u_plus_rw`;; *) if test -z "$stripcmd"; then - u_plus_rw= + u_plus_rw= else - u_plus_rw=,u+rw + u_plus_rw=,u+rw fi cp_umask=$mode$u_plus_rw;; esac @@ -274,15 +269,41 @@ do # If destination is a directory, append the input filename; won't work # if double slashes aren't ignored. if test -d "$dst"; then - if test "$is_target_a_directory" = never; then - echo "$0: $dst_arg: Is a directory" >&2 - exit 1 + if test -n "$no_target_directory"; then + echo "$0: $dst_arg: Is a directory" >&2 + exit 1 fi dstdir=$dst dst=$dstdir/`basename "$src"` dstdir_status=0 else - dstdir=`dirname "$dst"` + # Prefer dirname, but fall back on a substitute if dirname fails. + dstdir=` + (dirname "$dst") 2>/dev/null || + expr X"$dst" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$dst" : 'X\(//\)[^/]' \| \ + X"$dst" : 'X\(//\)$' \| \ + X"$dst" : 'X\(/\)' \| . 2>/dev/null || + echo X"$dst" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q' + ` + test -d "$dstdir" dstdir_status=$? fi @@ -293,74 +314,74 @@ do if test $dstdir_status != 0; then case $posix_mkdir in '') - # Create intermediate dirs using mode 755 as modified by the umask. - # This is like FreeBSD 'install' as of 1997-10-28. - umask=`umask` - case $stripcmd.$umask in - # Optimize common cases. - *[2367][2367]) mkdir_umask=$umask;; - .*0[02][02] | .[02][02] | .[02]) mkdir_umask=22;; - - *[0-7]) - mkdir_umask=`expr $umask + 22 \ - - $umask % 100 % 40 + $umask % 20 \ - - $umask % 10 % 4 + $umask % 2 - `;; - *) mkdir_umask=$umask,go-w;; - esac - - # With -d, create the new directory with the user-specified mode. - # Otherwise, rely on $mkdir_umask. - if test -n "$dir_arg"; then - mkdir_mode=-m$mode - else - mkdir_mode= - fi - - posix_mkdir=false - case $umask in - *[123567][0-7][0-7]) - # POSIX mkdir -p sets u+wx bits regardless of umask, which - # is incompatible with FreeBSD 'install' when (umask & 300) != 0. - ;; - *) - tmpdir=${TMPDIR-/tmp}/ins$RANDOM-$$ - trap 'ret=$?; rmdir "$tmpdir/d" "$tmpdir" 2>/dev/null; exit $ret' 0 - - if (umask $mkdir_umask && - exec $mkdirprog $mkdir_mode -p -- "$tmpdir/d") >/dev/null 2>&1 - then - if test -z "$dir_arg" || { - # Check for POSIX incompatibilities with -m. - # HP-UX 11.23 and IRIX 6.5 mkdir -m -p sets group- or - # other-writable bit of parent directory when it shouldn't. - # FreeBSD 6.1 mkdir -m -p sets mode of existing directory. - ls_ld_tmpdir=`ls -ld "$tmpdir"` - case $ls_ld_tmpdir in - d????-?r-*) different_mode=700;; - d????-?--*) different_mode=755;; - *) false;; - esac && - $mkdirprog -m$different_mode -p -- "$tmpdir" && { - ls_ld_tmpdir_1=`ls -ld "$tmpdir"` - test "$ls_ld_tmpdir" = "$ls_ld_tmpdir_1" - } - } - then posix_mkdir=: - fi - rmdir "$tmpdir/d" "$tmpdir" - else - # Remove any dirs left behind by ancient mkdir implementations. - rmdir ./$mkdir_mode ./-p ./-- 2>/dev/null - fi - trap '' 0;; - esac;; + # Create intermediate dirs using mode 755 as modified by the umask. + # This is like FreeBSD 'install' as of 1997-10-28. + umask=`umask` + case $stripcmd.$umask in + # Optimize common cases. + *[2367][2367]) mkdir_umask=$umask;; + .*0[02][02] | .[02][02] | .[02]) mkdir_umask=22;; + + *[0-7]) + mkdir_umask=`expr $umask + 22 \ + - $umask % 100 % 40 + $umask % 20 \ + - $umask % 10 % 4 + $umask % 2 + `;; + *) mkdir_umask=$umask,go-w;; + esac + + # With -d, create the new directory with the user-specified mode. + # Otherwise, rely on $mkdir_umask. + if test -n "$dir_arg"; then + mkdir_mode=-m$mode + else + mkdir_mode= + fi + + posix_mkdir=false + case $umask in + *[123567][0-7][0-7]) + # POSIX mkdir -p sets u+wx bits regardless of umask, which + # is incompatible with FreeBSD 'install' when (umask & 300) != 0. + ;; + *) + tmpdir=${TMPDIR-/tmp}/ins$RANDOM-$$ + trap 'ret=$?; rmdir "$tmpdir/d" "$tmpdir" 2>/dev/null; exit $ret' 0 + + if (umask $mkdir_umask && + exec $mkdirprog $mkdir_mode -p -- "$tmpdir/d") >/dev/null 2>&1 + then + if test -z "$dir_arg" || { + # Check for POSIX incompatibilities with -m. + # HP-UX 11.23 and IRIX 6.5 mkdir -m -p sets group- or + # other-writable bit of parent directory when it shouldn't. + # FreeBSD 6.1 mkdir -m -p sets mode of existing directory. + ls_ld_tmpdir=`ls -ld "$tmpdir"` + case $ls_ld_tmpdir in + d????-?r-*) different_mode=700;; + d????-?--*) different_mode=755;; + *) false;; + esac && + $mkdirprog -m$different_mode -p -- "$tmpdir" && { + ls_ld_tmpdir_1=`ls -ld "$tmpdir"` + test "$ls_ld_tmpdir" = "$ls_ld_tmpdir_1" + } + } + then posix_mkdir=: + fi + rmdir "$tmpdir/d" "$tmpdir" + else + # Remove any dirs left behind by ancient mkdir implementations. + rmdir ./$mkdir_mode ./-p ./-- 2>/dev/null + fi + trap '' 0;; + esac;; esac if $posix_mkdir && ( - umask $mkdir_umask && - $doit_exec $mkdirprog $mkdir_mode -p -- "$dstdir" + umask $mkdir_umask && + $doit_exec $mkdirprog $mkdir_mode -p -- "$dstdir" ) then : else @@ -370,51 +391,53 @@ do # directory the slow way, step by step, checking for races as we go. case $dstdir in - /*) prefix='/';; - [-=\(\)!]*) prefix='./';; - *) prefix='';; + /*) prefix='/';; + [-=\(\)!]*) prefix='./';; + *) prefix='';; esac + eval "$initialize_posix_glob" + oIFS=$IFS IFS=/ - set -f + $posix_glob set -f set fnord $dstdir shift - set +f + $posix_glob set +f IFS=$oIFS prefixes= for d do - test X"$d" = X && continue - - prefix=$prefix$d - if test -d "$prefix"; then - prefixes= - else - if $posix_mkdir; then - (umask=$mkdir_umask && - $doit_exec $mkdirprog $mkdir_mode -p -- "$dstdir") && break - # Don't fail if two instances are running concurrently. - test -d "$prefix" || exit 1 - else - case $prefix in - *\'*) qprefix=`echo "$prefix" | sed "s/'/'\\\\\\\\''/g"`;; - *) qprefix=$prefix;; - esac - prefixes="$prefixes '$qprefix'" - fi - fi - prefix=$prefix/ + test X"$d" = X && continue + + prefix=$prefix$d + if test -d "$prefix"; then + prefixes= + else + if $posix_mkdir; then + (umask=$mkdir_umask && + $doit_exec $mkdirprog $mkdir_mode -p -- "$dstdir") && break + # Don't fail if two instances are running concurrently. + test -d "$prefix" || exit 1 + else + case $prefix in + *\'*) qprefix=`echo "$prefix" | sed "s/'/'\\\\\\\\''/g"`;; + *) qprefix=$prefix;; + esac + prefixes="$prefixes '$qprefix'" + fi + fi + prefix=$prefix/ done if test -n "$prefixes"; then - # Don't fail if two instances are running concurrently. - (umask $mkdir_umask && - eval "\$doit_exec \$mkdirprog $prefixes") || - test -d "$dstdir" || exit 1 - obsolete_mkdir_used=true + # Don't fail if two instances are running concurrently. + (umask $mkdir_umask && + eval "\$doit_exec \$mkdirprog $prefixes") || + test -d "$dstdir" || exit 1 + obsolete_mkdir_used=true fi fi fi @@ -449,12 +472,15 @@ do # If -C, don't bother to copy if it wouldn't change the file. if $copy_on_change && - old=`LC_ALL=C ls -dlL "$dst" 2>/dev/null` && - new=`LC_ALL=C ls -dlL "$dsttmp" 2>/dev/null` && - set -f && + old=`LC_ALL=C ls -dlL "$dst" 2>/dev/null` && + new=`LC_ALL=C ls -dlL "$dsttmp" 2>/dev/null` && + + eval "$initialize_posix_glob" && + $posix_glob set -f && set X $old && old=:$2:$4:$5:$6 && set X $new && new=:$2:$4:$5:$6 && - set +f && + $posix_glob set +f && + test "$old" = "$new" && $cmpprog "$dst" "$dsttmp" >/dev/null 2>&1 then @@ -467,24 +493,24 @@ do # to itself, or perhaps because mv is so ancient that it does not # support -f. { - # Now remove or move aside any old file at destination location. - # We try this two ways since rm can't unlink itself on some - # systems and the destination file might be busy for other - # reasons. In this case, the final cleanup might fail but the new - # file should still install successfully. - { - test ! -f "$dst" || - $doit $rmcmd -f "$dst" 2>/dev/null || - { $doit $mvcmd -f "$dst" "$rmtmp" 2>/dev/null && - { $doit $rmcmd -f "$rmtmp" 2>/dev/null; :; } - } || - { echo "$0: cannot unlink or rename $dst" >&2 - (exit 1); exit 1 - } - } && - - # Now rename the file to the real destination. - $doit $mvcmd "$dsttmp" "$dst" + # Now remove or move aside any old file at destination location. + # We try this two ways since rm can't unlink itself on some + # systems and the destination file might be busy for other + # reasons. In this case, the final cleanup might fail but the new + # file should still install successfully. + { + test ! -f "$dst" || + $doit $rmcmd -f "$dst" 2>/dev/null || + { $doit $mvcmd -f "$dst" "$rmtmp" 2>/dev/null && + { $doit $rmcmd -f "$rmtmp" 2>/dev/null; :; } + } || + { echo "$0: cannot unlink or rename $dst" >&2 + (exit 1); exit 1 + } + } && + + # Now rename the file to the real destination. + $doit $mvcmd "$dsttmp" "$dst" } fi || exit 1 diff --git a/m4/libtool.m4 b/m4/libtool.m4 index f12cfdf0b..56666f0ec 100644 --- a/m4/libtool.m4 +++ b/m4/libtool.m4 @@ -1312,7 +1312,7 @@ ia64-*-hpux*) rm -rf conftest* ;; -x86_64-*kfreebsd*-gnu|x86_64-*linux*|powerpc*-*linux*| \ +x86_64-*kfreebsd*-gnu|x86_64-*linux*|ppc*-*linux*|powerpc*-*linux*| \ s390*-*linux*|s390*-*tpf*|sparc*-*linux*) # Find out which ABI we are using. echo 'int i;' > conftest.$ac_ext @@ -1326,10 +1326,7 @@ s390*-*linux*|s390*-*tpf*|sparc*-*linux*) x86_64-*linux*) LD="${LD-ld} -m elf_i386" ;; - powerpc64le-*linux*) - LD="${LD-ld} -m elf32lppclinux" - ;; - powerpc64-*linux*) + ppc64-*linux*|powerpc64-*linux*) LD="${LD-ld} -m elf32ppclinux" ;; s390x-*linux*) @@ -1348,10 +1345,7 @@ s390*-*linux*|s390*-*tpf*|sparc*-*linux*) x86_64-*linux*) LD="${LD-ld} -m elf_x86_64" ;; - powerpcle-*linux*) - LD="${LD-ld} -m elf64lppc" - ;; - powerpc-*linux*) + ppc*-*linux*|powerpc*-*linux*) LD="${LD-ld} -m elf64ppc" ;; s390*-*linux*|s390*-*tpf*) diff --git a/missing b/missing index f62bbae30..cdea51493 100755 --- a/missing +++ b/missing @@ -1,9 +1,9 @@ #! /bin/sh # Common wrapper for a few potentially missing GNU programs. -scriptversion=2013-10-28.13; # UTC +scriptversion=2012-06-26.16; # UTC -# Copyright (C) 1996-2014 Free Software Foundation, Inc. +# Copyright (C) 1996-2013 Free Software Foundation, Inc. # Originally written by Fran,cois Pinard <[email protected]>, 1996. # This program is free software; you can redistribute it and/or modify @@ -160,7 +160,7 @@ give_advice () ;; autom4te*) echo "You might have modified some maintainer files that require" - echo "the 'autom4te' program to be rebuilt." + echo "the 'automa4te' program to be rebuilt." program_details 'autom4te' ;; bison*|yacc*) diff --git a/rpm/389-ds-base.spec.in b/rpm/389-ds-base.spec.in index 75fae329f..ad0ffdadd 100644 --- a/rpm/389-ds-base.spec.in +++ b/rpm/389-ds-base.spec.in @@ -243,14 +243,19 @@ The lib389 CI tests that can be run against the Directory Server. cp %{SOURCE2} README.devel %build + %if %{use_nunc_stans} pushd ../nunc-stans-%{nunc_stans_ver} %configure --with-fhs --libdir=%{_libdir}/%{pkgname} -make %{?_smp_mflags} -mkdir -p lib -cp .libs/libnunc-stans.so.0.0.0 lib/libnunc-stans.so -mkdir -p include/nunc-stans -cp nunc-stans.h include/nunc-stans/nunc-stans.h +# We install into our build dir first, then later we install to the correct build root. +# This is to make it possible for directory server to use us, else we can't resolve +# liblfds during link time. +# Frankly, this is horrible, but until we can make nunc-stans a package, it'll +# have to do... :( +# Can't use SMP flags here +make DESTDIR=`pwd` install +# Remove these else it breaks libtool +rm ./%{_libdir}/%{pkgname}/*.{a,la} popd %endif @@ -262,7 +267,7 @@ OPENLDAP_FLAG="--with-openldap" NSSARGS="--with-svrcore-inc=%{_includedir} --with-svrcore-lib=%{_libdir} --with-nss-lib=%{_libdir} --with-nss-inc=%{_includedir}/nss3" %if %{use_nunc_stans} -NUNC_STANS_FLAGS="--enable-nunc-stans --with-nunc-stans=../nunc-stans-%{nunc_stans_ver}" +NUNC_STANS_FLAGS="--enable-nunc-stans --with-nunc-stans=../nunc-stans-%{nunc_stans_ver}/usr --with-nunc-stans-inc=../nunc-stans-%{nunc_stans_ver}/%{_includedir} --with-nunc-stans-lib=../nunc-stans-%{nunc_stans_ver}/%{_libdir}/%{pkgname}" %endif %if %{use_asan} @@ -286,13 +291,12 @@ make %{?_smp_mflags} %install -rm -rf $RPM_BUILD_ROOT +rm -rf $RPM_BUILD_ROOT %if %{use_nunc_stans} pushd ../nunc-stans-%{nunc_stans_ver} +# This is the *actual* install of nunc-stans. make DESTDIR="$RPM_BUILD_ROOT" install -rm -rf $RPM_BUILD_ROOT%{_includedir} $RPM_BUILD_ROOT%{_datadir} \ - $RPM_BUILD_ROOT%{_libdir}/%{pkgname}/pkgconfig popd %endif @@ -454,6 +458,14 @@ fi %{_mandir}/man8/* %exclude %{_sbindir}/ldap-agent* %exclude %{_mandir}/man1/ldap-agent.1.gz +# Don't rm these files, just exclude them! +%exclude %{_datadir}/doc/nunc-stans/* +%exclude %{_libdir}/%{pkgname}/pkgconfig/nunc-stans.pc +%exclude %{_includedir}/nunc-stans/nunc-stans.h +%exclude %{_mandir}/man3/md_docs_job-safety.3.gz +%exclude %{_mandir}/man3/ns_job_t.3.gz +%exclude %{_mandir}/man3/ns_thrpool_config.3.gz +%exclude %{_mandir}/man3/nunc-stans.h.3.gz %files devel %defattr(-,root,root,-) @@ -463,6 +475,7 @@ fi %{_libdir}/%{pkgname}/libns-dshttpd.so %if %{use_nunc_stans} %{_libdir}/%{pkgname}/libnunc-stans.so +%{_libdir}/%{pkgname}/liblfds710.so %endif %{_libdir}/pkgconfig/* @@ -474,6 +487,7 @@ fi %{_libdir}/%{pkgname}/libns-dshttpd.so.* %if %{use_nunc_stans} %{_libdir}/%{pkgname}/libnunc-stans.so.* +%{_libdir}/%{pkgname}/liblfds710.so.* %endif %files snmp
0
f7a3bc6f789ec58b3cc08e2af6b29e673aa9800b
389ds/389-ds-base
Bug 622903 - fix coverity Defect Type: Code maintainability issues https://bugzilla.redhat.com/show_bug.cgi?id=622903 Comment: Calling slapi_sdn_get_dn and assigning the return value to base is not needed.
commit f7a3bc6f789ec58b3cc08e2af6b29e673aa9800b Author: Noriko Hosoi <[email protected]> Date: Tue Aug 10 17:27:15 2010 -0700 Bug 622903 - fix coverity Defect Type: Code maintainability issues https://bugzilla.redhat.com/show_bug.cgi?id=622903 Comment: Calling slapi_sdn_get_dn and assigning the return value to base is not needed. diff --git a/ldap/servers/slapd/back-ldbm/ldbm_usn.c b/ldap/servers/slapd/back-ldbm/ldbm_usn.c index e1fe5690e..4bcd95ecf 100644 --- a/ldap/servers/slapd/back-ldbm/ldbm_usn.c +++ b/ldap/servers/slapd/back-ldbm/ldbm_usn.c @@ -64,7 +64,6 @@ ldbm_usn_init(struct ldbminfo *li) { Slapi_DN *sdn = NULL; void *node = NULL; - const char *base = NULL; int rc = 0; Slapi_Backend *be = NULL; PRUint64 last_usn = 0; @@ -77,7 +76,6 @@ ldbm_usn_init(struct ldbminfo *li) /* Search each namingContext in turn */ for ( sdn = slapi_get_first_suffix( &node, 0 ); sdn != NULL; sdn = slapi_get_next_suffix( &node, 0 )) { - base = slapi_sdn_get_dn( sdn ); be = slapi_mapping_tree_find_backend_for_sdn(sdn); slapi_log_error(SLAPI_LOG_TRACE, "ldbm_usn_init", "backend: %s\n", be->be_name);
0
a360ab7119d764b11b898d212b5405a87f912cd9
389ds/389-ds-base
Bug 745259 - Incorrect entryUSN index under high load in replicated environment https://bugzilla.redhat.com/show_bug.cgi?id=745259 Bug Description: When replication conflicts occur in replacing entryusn, dangling entryusn keys pointing the same entryid are generated in the entryusn index file. There is no way to clean them up unless reindexing the entryusn. Fix Description: When replication conflicts occur in the replace op and new entry still contains the old value, then the old value won't be removed from the index file. Also, if the new value is not added to the entry, the new value won't be added to the index file. The failure is informed to entryusn by by setting LDAP_MOD_IGNORE in the mod. Entryusn plugin bepostop_modify uses the info to determine to increment the entryusn or not.
commit a360ab7119d764b11b898d212b5405a87f912cd9 Author: Noriko Hosoi <[email protected]> Date: Tue Nov 29 13:31:39 2011 -0800 Bug 745259 - Incorrect entryUSN index under high load in replicated environment https://bugzilla.redhat.com/show_bug.cgi?id=745259 Bug Description: When replication conflicts occur in replacing entryusn, dangling entryusn keys pointing the same entryid are generated in the entryusn index file. There is no way to clean them up unless reindexing the entryusn. Fix Description: When replication conflicts occur in the replace op and new entry still contains the old value, then the old value won't be removed from the index file. Also, if the new value is not added to the entry, the new value won't be added to the index file. The failure is informed to entryusn by by setting LDAP_MOD_IGNORE in the mod. Entryusn plugin bepostop_modify uses the info to determine to increment the entryusn or not. diff --git a/ldap/servers/plugins/usn/usn.c b/ldap/servers/plugins/usn/usn.c index 792ea47b9..b2110196b 100644 --- a/ldap/servers/plugins/usn/usn.c +++ b/ldap/servers/plugins/usn/usn.c @@ -60,6 +60,7 @@ static int usn_bepreop_delete(Slapi_PBlock *pb); static int usn_bepreop_modify(Slapi_PBlock *pb); static int usn_bepostop(Slapi_PBlock *pb); static int usn_bepostop_delete (Slapi_PBlock *pb); +static int usn_bepostop_modify (Slapi_PBlock *pb); static int usn_start(Slapi_PBlock *pb); static int usn_close(Slapi_PBlock *pb); static int usn_get_attr(Slapi_PBlock *pb, const char* type, void *value); @@ -180,7 +181,7 @@ usn_bepostop_init(Slapi_PBlock *pb) slapi_pblock_set(pb, SLAPI_PLUGIN_BE_POST_DELETE_FN, (void *)usn_bepostop_delete) != 0 || slapi_pblock_set(pb, SLAPI_PLUGIN_BE_POST_MODIFY_FN, - (void *)usn_bepostop) != 0 || + (void *)usn_bepostop_modify) != 0 || slapi_pblock_set(pb, SLAPI_PLUGIN_BE_POST_MODRDN_FN, (void *)usn_bepostop) != 0) { slapi_log_error(SLAPI_LOG_FATAL, USN_PLUGIN_SUBSYSTEM, @@ -517,6 +518,53 @@ bail: return rc; } +/* count up the counter */ +static int +usn_bepostop_modify (Slapi_PBlock *pb) +{ + int rc = -1; + Slapi_Backend *be = NULL; + LDAPMod **mods = NULL; + int i; + + slapi_log_error(SLAPI_LOG_TRACE, USN_PLUGIN_SUBSYSTEM, + "--> usn_bepostop_mod\n"); + + /* if op is not successful, don't increment the counter */ + slapi_pblock_get(pb, SLAPI_RESULT_CODE, &rc); + if (LDAP_SUCCESS != rc) { + goto bail; + } + + slapi_pblock_get(pb, SLAPI_MODIFY_MODS, &mods); + for (i = 0; mods && mods[i]; i++) { + if (0 == strcasecmp(mods[i]->mod_type, SLAPI_ATTR_ENTRYUSN)) { + if (mods[i]->mod_op & LDAP_MOD_IGNORE) { + slapi_log_error(SLAPI_LOG_TRACE, USN_PLUGIN_SUBSYSTEM, + "usn_bepostop_mod: MOD_IGNORE detected\n"); + goto bail; /* conflict occurred. + skip incrementing the counter. */ + } else { + break; + } + } + } + + slapi_pblock_get(pb, SLAPI_BACKEND, &be); + if (NULL == be) { + rc = LDAP_PARAM_ERROR; + goto bail; + } + + if (be->be_usn_counter) { + slapi_counter_increment(be->be_usn_counter); + } +bail: + slapi_log_error(SLAPI_LOG_TRACE, USN_PLUGIN_SUBSYSTEM, + "<-- usn_bepostop_mod\n"); + return rc; +} + /* count up the counter */ /* if the op is delete and the op was not successful, remove preventryusn */ static int diff --git a/ldap/servers/slapd/back-ldbm/index.c b/ldap/servers/slapd/back-ldbm/index.c index 08218ad04..5f45e0a61 100644 --- a/ldap/servers/slapd/back-ldbm/index.c +++ b/ldap/servers/slapd/back-ldbm/index.c @@ -485,7 +485,7 @@ index_addordel_entry( int index_add_mods( backend *be, - const LDAPMod **mods, + LDAPMod **mods, struct backentry *olde, struct backentry *newe, back_txn *txn @@ -580,10 +580,11 @@ index_add_mods( flags |= BE_INDEX_EQUALITY; } } else { - /* Remove duplicate value from deleted value array */ Slapi_Value *rval = valuearray_remove_value(curr_attr, deleted_valueArray, deleted_valueArray[j]); slapi_value_free( &rval ); j--; + /* indicates there was some conflict */ + mods[i]->mod_op |= LDAP_MOD_IGNORE; } } } else { @@ -604,16 +605,38 @@ index_add_mods( if ( mods_valueArray == NULL ) { rc = 0; } else { - rc = index_addordel_values_sv( be, + /* Verify if the value is in newe. + * If it is in, we will add the attr value to the index file. */ + slapi_entry_attr_find( newe->ep_entry, + mods[i]->mod_type, &curr_attr ); + + for (j = 0; mods_valueArray[j] != NULL; j++) { + /* mods_valueArray[j] is in curr_attr ==> return 0 */ + if (slapi_attr_value_find(curr_attr, + slapi_value_get_berval(mods_valueArray[j]))) { + /* The value is NOT in newe, remove it. */ + Slapi_Value *rval = valuearray_remove_value(curr_attr, + mods_valueArray, + mods_valueArray[j]); + slapi_value_free( &rval ); + /* indicates there was some conflict */ + mods[i]->mod_op |= LDAP_MOD_IGNORE; + } + } + if (mods_valueArray) { + rc = index_addordel_values_sv( be, mods[i]->mod_type, mods_valueArray, NULL, id, BE_INDEX_ADD, txn ); + } else { + rc = 0; + } } break; case LDAP_MOD_DELETE: if ( (mods[i]->mod_bvalues == NULL) || - (mods[i]->mod_bvalues[0] == NULL) ) { + (mods[i]->mod_bvalues[0] == NULL) ) { rc = 0; flags = BE_INDEX_DEL; @@ -713,7 +736,7 @@ index_add_mods( } rc = 0; break; - } + } /* switch ( mods[i]->mod_op & ~LDAP_MOD_BVALUES ) */ /* free memory */ slapi_ch_free((void **)&tmp); @@ -724,7 +747,7 @@ index_add_mods( ldbm_nasty(errmsg, 1040, rc); return( rc ); } - } + } /* for ( i = 0; mods[i] != NULL; i++ ) */ return( 0 ); } diff --git a/ldap/servers/slapd/back-ldbm/ldbm_modify.c b/ldap/servers/slapd/back-ldbm/ldbm_modify.c index ba6527f8e..943a09543 100644 --- a/ldap/servers/slapd/back-ldbm/ldbm_modify.c +++ b/ldap/servers/slapd/back-ldbm/ldbm_modify.c @@ -159,7 +159,7 @@ int modify_update_all(backend *be, Slapi_PBlock *pb, } goto error; } - retval = index_add_mods( be, (const LDAPMod **)slapi_mods_get_ldapmods_byref(mc->smods), mc->old_entry, mc->new_entry, txn ); + retval = index_add_mods( be, slapi_mods_get_ldapmods_byref(mc->smods), mc->old_entry, mc->new_entry, txn ); if ( 0 != retval ) { if (DB_LOCK_DEADLOCK != retval) { @@ -456,7 +456,7 @@ ldbm_back_modify( Slapi_PBlock *pb ) goto error_return; } ec_in_cache = 1; - retval = index_add_mods( be, (const LDAPMod**)mods, e, ec, &txn ); + retval = index_add_mods( be, mods, e, ec, &txn ); if (DB_LOCK_DEADLOCK == retval) { /* Abort and re-try */ diff --git a/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c b/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c index ac5c77549..2dadefc93 100644 --- a/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c +++ b/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c @@ -1432,7 +1432,7 @@ modrdn_rename_entry_update_indexes(back_txn *ptxn, Slapi_PBlock *pb, struct ldbm /* * update the indexes: lastmod, rdn, etc. */ - retval = index_add_mods( be, (const LDAPMod **)slapi_mods_get_ldapmods_byref(smods1), e, ec, ptxn ); + retval = index_add_mods( be, slapi_mods_get_ldapmods_byref(smods1), e, ec, ptxn ); if (DB_LOCK_DEADLOCK == retval) { /* Retry txn */ @@ -1455,7 +1455,7 @@ modrdn_rename_entry_update_indexes(back_txn *ptxn, Slapi_PBlock *pb, struct ldbm /* * update the indexes: lastmod, rdn, etc. */ - retval = index_add_mods( be, (const LDAPMod **)slapi_mods_get_ldapmods_byref(smods2), e, ec, ptxn ); + retval = index_add_mods( be, slapi_mods_get_ldapmods_byref(smods2), e, ec, ptxn ); if (DB_LOCK_DEADLOCK == retval) { /* Retry txn */ @@ -1472,7 +1472,7 @@ modrdn_rename_entry_update_indexes(back_txn *ptxn, Slapi_PBlock *pb, struct ldbm /* * update the indexes: lastmod, rdn, etc. */ - retval = index_add_mods( be, (const LDAPMod **)slapi_mods_get_ldapmods_byref(smods3), e, ec, ptxn ); + retval = index_add_mods( be, slapi_mods_get_ldapmods_byref(smods3), e, ec, ptxn ); if (DB_LOCK_DEADLOCK == retval) { /* Retry txn */ diff --git a/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h b/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h index c30e98751..771f0c5f3 100644 --- a/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h +++ b/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h @@ -278,7 +278,7 @@ int idl_new_compare_dups( * index.c */ int index_addordel_entry( backend *be, struct backentry *e, int flags, back_txn *txn ); -int index_add_mods( backend *be, const LDAPMod**mods, struct backentry *olde, struct backentry *newe, back_txn *txn ); +int index_add_mods( backend *be, /*const*/LDAPMod**mods, struct backentry *olde, struct backentry *newe, back_txn *txn ); int index_addordel_string(backend *be, const char *type, const char *s, ID id, int flags, back_txn *txn); int index_addordel_values_sv( backend *be, const char *type, Slapi_Value **vals, Slapi_Value **evals, ID id, int flags, back_txn *txn ); int index_addordel_values_ext_sv( backend *be, const char *type, Slapi_Value **vals, Slapi_Value **evals, ID id, int flags, back_txn *txn,int *idl_disposition, void *buffer_handle ); diff --git a/ldap/servers/slapd/entrywsi.c b/ldap/servers/slapd/entrywsi.c index 506a1f9db..a749ceec8 100644 --- a/ldap/servers/slapd/entrywsi.c +++ b/ldap/servers/slapd/entrywsi.c @@ -719,11 +719,15 @@ entry_apply_mod_wsi(Slapi_Entry *e, const LDAPMod *mod, const CSN *csn, int urp) retVal = entry_replace_present_values_wsi( e, mod->mod_type, mod->mod_bvalues, csn, urp ); break; } - for ( i = 0; mod->mod_bvalues != NULL && mod->mod_bvalues[i] != NULL; i++ ) - { - LDAPDebug( LDAP_DEBUG_ARGS, " %s: %s\n", mod->mod_type, mod->mod_bvalues[i]->bv_val, 0 ); + if ( LDAPDebugLevelIsSet( LDAP_DEBUG_ARGS )) { + for ( i = 0; + mod->mod_bvalues != NULL && mod->mod_bvalues[i] != NULL; + i++ ) { + LDAPDebug( LDAP_DEBUG_ARGS, " %s: %s\n", + mod->mod_type, mod->mod_bvalues[i]->bv_val, 0 ); + } + LDAPDebug( LDAP_DEBUG_ARGS, " -\n", 0, 0, 0 ); } - LDAPDebug( LDAP_DEBUG_ARGS, " -\n", 0, 0, 0 ); return retVal; } diff --git a/ldap/servers/slapd/slapi-private.h b/ldap/servers/slapd/slapi-private.h index 2b717a09c..fda28a2cc 100644 --- a/ldap/servers/slapd/slapi-private.h +++ b/ldap/servers/slapd/slapi-private.h @@ -440,8 +440,10 @@ unsigned long operation_get_type(Slapi_Operation *op); * #define LDAP_MOD_ADD 0x00 * #define LDAP_MOD_DELETE 0x01 * #define LDAP_MOD_REPLACE 0x02 + * #define LDAP_MOD_INCREMENT 0x03 -- Openldap extension + * #define LDAP_MOD_BVALUES 0x80 */ -#define LDAP_MOD_IGNORE 0x09 +#define LDAP_MOD_IGNORE 0x100 /* dl.c */
0
c058a2b574e441037478f711d3c314a2974ee4d1
389ds/389-ds-base
Issue 5793 - UI - Fix minor crashes (#5827) Description: After a massive move from webpack to esbuild bundler rework, fix two minor crashes which happened because of minor copy-paste errors. Related: https://github.com/389ds/389-ds-base/issues/5793 Reviewed by: @mreynolds389 (Thanks!)
commit c058a2b574e441037478f711d3c314a2974ee4d1 Author: Simon Pichugin <[email protected]> Date: Thu Jul 6 12:24:48 2023 -0700 Issue 5793 - UI - Fix minor crashes (#5827) Description: After a massive move from webpack to esbuild bundler rework, fix two minor crashes which happened because of minor copy-paste errors. Related: https://github.com/389ds/389-ds-base/issues/5793 Reviewed by: @mreynolds389 (Thanks!) diff --git a/src/cockpit/389-console/src/lib/plugins/memberOf.jsx b/src/cockpit/389-console/src/lib/plugins/memberOf.jsx index 7df4e784b..394e57728 100644 --- a/src/cockpit/389-console/src/lib/plugins/memberOf.jsx +++ b/src/cockpit/389-console/src/lib/plugins/memberOf.jsx @@ -373,6 +373,15 @@ class MemberOf extends React.Component { }; } + handleToggleFixupModal() { + this.setState(prevState => ({ + fixupModalShow: !prevState.fixupModalShow, + fixupDN: "", + fixupFilter: "", + savingModal: false, + })); + } + validateConfig() { const errObj = {}; let all_good = true; diff --git a/src/cockpit/389-console/src/lib/server/serverTables.jsx b/src/cockpit/389-console/src/lib/server/serverTables.jsx index ab20b887b..026108b8b 100644 --- a/src/cockpit/389-console/src/lib/server/serverTables.jsx +++ b/src/cockpit/389-console/src/lib/server/serverTables.jsx @@ -52,12 +52,12 @@ export class SASLTable extends React.Component { }); }; - this.onSort = this.onSort.bind(this); + this.handleSort = this.handleSort.bind(this); this.handleOnCollapse = this.handleOnCollapse.bind(this); this.handleOnSearchChange = this.handleOnSearchChange.bind(this); } - handleOnSort(_event, index, direction) { + handleSort(_event, index, direction) { const sorted_rows = []; const rows = []; let count = 0; @@ -241,7 +241,7 @@ export class SASLTable extends React.Component { rows={tableRows} variant={TableVariant.compact} sortBy={sortBy} - onSort={this.handleOnSort} + onSort={this.handleSort} onCollapse={this.handleOnCollapse} actions={tableRows.length > 0 ? this.actions() : null} dropdownPosition="right"
0
e97cb61d497f2b6765ba2786b1da25d32304ef90
389ds/389-ds-base
Issue 5162 - CI - fix error message for invalid pem file Description: With recent changes to certificate validation the error message has changed and the CI needs to be updated. relates: https://github.com/389ds/389-ds-base/issues/5162 Reviewed by: spichugi(Thanks!)
commit e97cb61d497f2b6765ba2786b1da25d32304ef90 Author: Mark Reynolds <[email protected]> Date: Mon Feb 20 16:34:38 2023 -0500 Issue 5162 - CI - fix error message for invalid pem file Description: With recent changes to certificate validation the error message has changed and the CI needs to be updated. relates: https://github.com/389ds/389-ds-base/issues/5162 Reviewed by: spichugi(Thanks!) diff --git a/dirsrvtests/tests/suites/clu/dsctl_tls_test.py b/dirsrvtests/tests/suites/clu/dsctl_tls_test.py index 371082d36..22360fa91 100644 --- a/dirsrvtests/tests/suites/clu/dsctl_tls_test.py +++ b/dirsrvtests/tests/suites/clu/dsctl_tls_test.py @@ -45,7 +45,7 @@ def test_tls_command_returns_error_text(topo): assert '255' not in str(e) assert 'improperly formatted name' in str(e) - # dsctl localhost tls remove-cert + # dsctl localhost tls remove-cert try: tls.del_cert("bad") assert False @@ -60,7 +60,7 @@ def test_tls_command_returns_error_text(topo): assert False except ValueError as e: assert '255' not in str(e) - assert 'could not decode certificate' in str(e) + assert 'Unable to load PEM file' in str(e) # dsctl localhost tls import-server-cert try:
0
51e05df9c37c66206041f026c9a67ec17bc9ea4a
389ds/389-ds-base
Ticket 47988: Schema learning mechanism, in replication, unable to extend an existing definition Bug Description: At the beginning of a replication session, a supplier checks the status of remote schema vs its own schema. If the remote schema contains new/extended definitions, the supplier learns those definitions. It learns through internal MOD_ADD operation on cn=schema. For extending definition, this fails because the definition already exists. Fix Description: It needs to MOD_DEL and MOD_ADD those extended definitions while it needs to do MOD_ADD for new definitions. It uses the field 'old_value' in 'struct schema_mods_indexes' to determine if it needs to del some definitions. Some definitions can not be deleted - if an objectclass is standard or is a superior of others oc - if an attribute is a standard definition or is used in objectclass This was problematic for updating the schema, so the fix is relaxing those controls for internal operations https://fedorahosted.org/389/ticket/47988 Reviewed by: ? Platforms tested: F17 Flag Day: no Doc impact: no
commit 51e05df9c37c66206041f026c9a67ec17bc9ea4a Author: Thierry bordaz (tbordaz) <[email protected]> Date: Thu Jan 22 14:29:52 2015 +0100 Ticket 47988: Schema learning mechanism, in replication, unable to extend an existing definition Bug Description: At the beginning of a replication session, a supplier checks the status of remote schema vs its own schema. If the remote schema contains new/extended definitions, the supplier learns those definitions. It learns through internal MOD_ADD operation on cn=schema. For extending definition, this fails because the definition already exists. Fix Description: It needs to MOD_DEL and MOD_ADD those extended definitions while it needs to do MOD_ADD for new definitions. It uses the field 'old_value' in 'struct schema_mods_indexes' to determine if it needs to del some definitions. Some definitions can not be deleted - if an objectclass is standard or is a superior of others oc - if an attribute is a standard definition or is used in objectclass This was problematic for updating the schema, so the fix is relaxing those controls for internal operations https://fedorahosted.org/389/ticket/47988 Reviewed by: ? Platforms tested: F17 Flag Day: no Doc impact: no diff --git a/ldap/servers/plugins/replication/repl5_connection.c b/ldap/servers/plugins/replication/repl5_connection.c index 969aa0708..398ff98e7 100644 --- a/ldap/servers/plugins/replication/repl5_connection.c +++ b/ldap/servers/plugins/replication/repl5_connection.c @@ -1810,6 +1810,7 @@ conn_push_schema(Repl_Connection *conn, CSN **remotecsn) CSN *localcsn = NULL; Slapi_PBlock *spb = NULL; char localcsnstr[CSN_STRSIZE + 1] = {0}; + char remotecnsstr[CSN_STRSIZE+1] = {0}; if (!remotecsn) { @@ -1838,6 +1839,16 @@ conn_push_schema(Repl_Connection *conn, CSN **remotecsn) } else { + if (*remotecsn) { + csn_as_string (*remotecsn, PR_FALSE, remotecnsstr); + csn_as_string (localcsn, PR_FALSE, localcsnstr); + slapi_log_error(SLAPI_LOG_REPL, repl_plugin_name, + "[S] Checking consumer schema localcsn:%s / remotecsn:%s\n", localcsnstr, remotecnsstr); + } else { + csn_as_string (localcsn, PR_FALSE, localcsnstr); + slapi_log_error(SLAPI_LOG_REPL, repl_plugin_name, + "[S] Checking consumer schema localcsn:%s / remotecsn:NULL\n", localcsnstr); + } if (!update_consumer_schema(conn)) { /* At least one schema definition (attributetypes/objectclasses) of the consumer * is a superset of the supplier. @@ -1846,7 +1857,11 @@ conn_push_schema(Repl_Connection *conn, CSN **remotecsn) * So it could be possible that a second attempt (right now) of update_consumer_schema * would be successful */ + slapi_log_error(SLAPI_LOG_REPL, "schema", + "[S] schema definitions may have been learned\n"); if (!update_consumer_schema(conn)) { + slapi_log_error(SLAPI_LOG_REPL, "schema", + "[S] learned definitions are not suffisant to try to push the schema \n"); return_value = CONN_OPERATION_FAILED; } } @@ -1862,6 +1877,8 @@ conn_push_schema(Repl_Connection *conn, CSN **remotecsn) memcpy(remotecsnstr, remote_schema_csn_bervals[0]->bv_val, remote_schema_csn_bervals[0]->bv_len); remotecsnstr[remote_schema_csn_bervals[0]->bv_len] = '\0'; + slapi_log_error(SLAPI_LOG_REPL, repl_plugin_name, + "[S] Reread remotecsn:%s\n", remotecsnstr); *remotecsn = csn_new_by_string(remotecsnstr); if (*remotecsn && (csn_compare(localcsn, *remotecsn) <= 0)) { return_value = CONN_SCHEMA_NO_UPDATE_NEEDED; diff --git a/ldap/servers/slapd/schema.c b/ldap/servers/slapd/schema.c index 52f29c238..e98150ed8 100644 --- a/ldap/servers/slapd/schema.c +++ b/ldap/servers/slapd/schema.c @@ -139,6 +139,7 @@ typedef struct repl_schema_policy { struct schema_mods_indexes { int index; char *new_value; + char *old_value; struct schema_mods_indexes *next; }; @@ -155,9 +156,9 @@ static int oc_check_required(Slapi_PBlock *, Slapi_Entry *,struct objclass *); static int oc_check_allowed_sv(Slapi_PBlock *, Slapi_Entry *e, const char *type, struct objclass **oclist ); static int schema_delete_objectclasses ( Slapi_Entry *entryBefore, LDAPMod *mod, char *errorbuf, size_t errorbufsize, - int schema_ds4x_compat ); + int schema_ds4x_compat, int is_internal_operation); static int schema_delete_attributes ( Slapi_Entry *entryBefore, - LDAPMod *mod, char *errorbuf, size_t errorbufsize); + LDAPMod *mod, char *errorbuf, size_t errorbufsize, int is_internal_operation); static int schema_add_attribute ( Slapi_PBlock *pb, LDAPMod *mod, char *errorbuf, size_t errorbufsize, int schema_ds4x_compat ); static int schema_add_objectclass ( Slapi_PBlock *pb, LDAPMod *mod, @@ -2074,7 +2075,9 @@ modify_schema_dse (Slapi_PBlock *pb, Slapi_Entry *entryBefore, Slapi_Entry *entr int schema_modify_enabled = config_get_schemamod(); int reapply_mods = 0; int is_replicated_operation = 0; - + int is_internal_operation = 0; + Slapi_Operation *operation = NULL; + if (!schema_modify_enabled) { *returncode = LDAP_UNWILLING_TO_PERFORM; schema_create_errormsg( returntext, SLAPI_DSE_RETURNTEXT_SIZE, @@ -2085,6 +2088,8 @@ modify_schema_dse (Slapi_PBlock *pb, Slapi_Entry *entryBefore, Slapi_Entry *entr slapi_pblock_get( pb, SLAPI_MODIFY_MODS, &mods ); slapi_pblock_get( pb, SLAPI_IS_REPLICATED_OPERATION, &is_replicated_operation); + slapi_pblock_get( pb, SLAPI_OPERATION, &operation); + is_internal_operation = slapi_operation_is_flag_set(operation, SLAPI_OP_FLAG_INTERNAL); /* In case we receive a schema from a supplier, check if we can accept it * (it is a superset of our own schema). @@ -2153,11 +2158,11 @@ modify_schema_dse (Slapi_PBlock *pb, Slapi_Entry *entryBefore, Slapi_Entry *entr if (SLAPI_IS_MOD_DELETE(mods[i]->mod_op)) { if (strcasecmp (mods[i]->mod_type, "objectclasses") == 0) { *returncode = schema_delete_objectclasses (entryBefore, mods[i], - returntext, SLAPI_DSE_RETURNTEXT_SIZE, schema_ds4x_compat ); + returntext, SLAPI_DSE_RETURNTEXT_SIZE, schema_ds4x_compat, is_internal_operation); } else if (strcasecmp (mods[i]->mod_type, "attributetypes") == 0) { *returncode = schema_delete_attributes (entryBefore, mods[i], - returntext, SLAPI_DSE_RETURNTEXT_SIZE ); + returntext, SLAPI_DSE_RETURNTEXT_SIZE, is_internal_operation); } else { *returncode= LDAP_NO_SUCH_ATTRIBUTE; @@ -2196,6 +2201,7 @@ modify_schema_dse (Slapi_PBlock *pb, Slapi_Entry *entryBefore, Slapi_Entry *entr schema_create_errormsg( returntext, SLAPI_DSE_RETURNTEXT_SIZE, schema_errprefix_generic, mods[i]->mod_type, "Replace is not allowed on the subschema subentry" ); + slapi_log_error(SLAPI_LOG_REPL, "schema", "modify_schema_dse: Replace is not allowed on the subschema subentry\n"); rc = SLAPI_DSE_CALLBACK_ERROR; } else { if (strcasecmp (mods[i]->mod_type, "attributetypes") == 0) { @@ -2264,7 +2270,7 @@ modify_schema_dse (Slapi_PBlock *pb, Slapi_Entry *entryBefore, Slapi_Entry *entr * Add a new objectclass */ *returncode = schema_add_objectclass ( pb, mods[i], returntext, - SLAPI_DSE_RETURNTEXT_SIZE, schema_ds4x_compat ); + SLAPI_DSE_RETURNTEXT_SIZE, schema_ds4x_compat); } else { if ( schema_ds4x_compat ) { @@ -2465,16 +2471,20 @@ oc_add_nolock(struct objclass *newoc) */ static int schema_delete_objectclasses( Slapi_Entry *entryBefore, LDAPMod *mod, - char *errorbuf, size_t errorbufsize, int schema_ds4x_compat ) + char *errorbuf, size_t errorbufsize, int schema_ds4x_compat, int is_internal_operation) { int i; int rc = LDAP_SUCCESS; /* optimistic */ struct objclass *poc, *poc2, *delete_oc = NULL; if ( NULL == mod->mod_bvalues ) { - schema_create_errormsg( errorbuf, errorbufsize, schema_errprefix_oc, - NULL, "Cannot remove all schema object classes" ); + if (is_internal_operation) { + slapi_log_error(SLAPI_LOG_REPL, "schema", "schema_delete_objectclasses: Remove all objectclass in Internal op\n"); + } else { + schema_create_errormsg(errorbuf, errorbufsize, schema_errprefix_oc, + NULL, "Cannot remove all schema object classes"); return LDAP_UNWILLING_TO_PERFORM; + } } for (i = 0; mod->mod_bvalues[i]; i++) { @@ -2492,11 +2502,19 @@ schema_delete_objectclasses( Slapi_Entry *entryBefore, LDAPMod *mod, for (poc2 = g_get_global_oc_nolock(); poc2 != NULL; poc2 = poc2->oc_next) { if (poc2->oc_superior && (strcasecmp (poc2->oc_superior, delete_oc->oc_name) == 0)) { - schema_create_errormsg( errorbuf, errorbufsize, schema_errprefix_oc, + if (is_internal_operation) { + slapi_log_error(SLAPI_LOG_REPL, "schema", "schema_delete_objectclasses: Should not delete object class (%s) which has child object classes" + ". But accept it because it is internal operation\n", + delete_oc->oc_name); + } else { + schema_create_errormsg(errorbuf, errorbufsize, schema_errprefix_oc, delete_oc->oc_name, "Cannot delete an object class" - " which has child object classes" ); - rc = LDAP_UNWILLING_TO_PERFORM; - goto unlock_and_return; + " which has child object classes"); + slapi_log_error(SLAPI_LOG_REPL, "schema", "schema_delete_objectclasses: Cannot delete an object class (%s) which has child object classes\n", + delete_oc->oc_name); + rc = LDAP_UNWILLING_TO_PERFORM; + goto unlock_and_return; + } } } @@ -2505,10 +2523,19 @@ schema_delete_objectclasses( Slapi_Entry *entryBefore, LDAPMod *mod, } else { - schema_create_errormsg( errorbuf, errorbufsize, schema_errprefix_oc, - delete_oc->oc_name, "Cannot delete a standard object class" ); - rc = LDAP_UNWILLING_TO_PERFORM; - goto unlock_and_return; + if (is_internal_operation) { + slapi_log_error(SLAPI_LOG_REPL, "schema", "schema_delete_objectclasses: Should not delete a standard object class (%s)" + ". But accept it because it is internal operation\n", + delete_oc->oc_name); + oc_delete_nolock (poc->oc_name); + } else { + schema_create_errormsg( errorbuf, errorbufsize, schema_errprefix_oc, + delete_oc->oc_name, "Cannot delete a standard object class" ); + slapi_log_error(SLAPI_LOG_REPL, "schema", "schema_delete_objectclasses: Cannot delete a standard object class (%s)\n", + delete_oc->oc_name); + rc = LDAP_UNWILLING_TO_PERFORM; + goto unlock_and_return; + } } } else { @@ -2552,7 +2579,7 @@ schema_return(int rc,struct sizedbuffer * psb1,struct sizedbuffer *psb2,struct s */ static int schema_delete_attributes ( Slapi_Entry *entryBefore, LDAPMod *mod, - char *errorbuf, size_t errorbufsize) + char *errorbuf, size_t errorbufsize, int is_internal_operation) { char *attr_ldif, *oc_list_type = ""; asyntaxinfo *a; @@ -2563,10 +2590,14 @@ schema_delete_attributes ( Slapi_Entry *entryBefore, LDAPMod *mod, struct sizedbuffer *psbAttrSyntax= sizedbuffer_construct(BUFSIZ); if (NULL == mod->mod_bvalues) { - schema_create_errormsg( errorbuf, errorbufsize, schema_errprefix_at, + if (is_internal_operation) { + slapi_log_error(SLAPI_LOG_REPL, "schema", "schema_delete_attributes: Remove all attributetypes in Internal op\n"); + } else { + schema_create_errormsg( errorbuf, errorbufsize, schema_errprefix_at, NULL, "Cannot remove all schema attribute types" ); - return schema_return(LDAP_UNWILLING_TO_PERFORM,psbAttrOid,psbAttrName, + return schema_return(LDAP_UNWILLING_TO_PERFORM,psbAttrOid,psbAttrName, psbAttrSyntax,NULL); + } } for (i = 0; mod->mod_bvalues[i]; i++) { @@ -2592,12 +2623,20 @@ schema_delete_attributes ( Slapi_Entry *entryBefore, LDAPMod *mod, if ((a = attr_syntax_get_by_name ( psbAttrName->buffer, 0 )) != NULL ) { /* only modify attrs which were user defined */ if (a->asi_flags & SLAPI_ATTR_FLAG_STD_ATTR) { - schema_create_errormsg( errorbuf, errorbufsize, schema_errprefix_at, - psbAttrName->buffer, - "Cannot delete a standard attribute type" ); - attr_syntax_return( a ); - return schema_return(LDAP_UNWILLING_TO_PERFORM,psbAttrOid,psbAttrName, - psbAttrSyntax,NULL); + if (is_internal_operation) { + slapi_log_error(SLAPI_LOG_REPL, "schema", "schema_delete_attributes: Should not delete a standard attribute type (%s)" + ". But accept it because it is internal operation\n", + psbAttrName->buffer); + } else { + schema_create_errormsg( errorbuf, errorbufsize, schema_errprefix_at, + psbAttrName->buffer, + "Cannot delete a standard attribute type"); + slapi_log_error(SLAPI_LOG_REPL, "schema", "schema_delete_attributes: Cannot delete a standard attribute type (%s)\n", + psbAttrName->buffer); + attr_syntax_return(a); + return schema_return(LDAP_UNWILLING_TO_PERFORM, psbAttrOid, psbAttrName, + psbAttrSyntax, NULL); + } } /* Do not allow deletion if referenced by an object class. */ @@ -2627,17 +2666,32 @@ schema_delete_attributes ( Slapi_Entry *entryBefore, LDAPMod *mod, } if (attr_in_use_by_an_oc) { - schema_create_errormsg( errorbuf, errorbufsize, schema_errprefix_at, - psbAttrName->buffer, "Is included in the %s list for object class %s. Cannot delete.", - oc_list_type, oc->oc_name ); - break; + if (is_internal_operation) { + slapi_log_error(SLAPI_LOG_REPL, "schema", "schema_delete_attributes: Should not delete an attribute (%s) used in oc (%s)" + ". But accept it because it is internal operation\n", + oc_list_type, oc->oc_name); + } else { + schema_create_errormsg(errorbuf, errorbufsize, schema_errprefix_at, + psbAttrName->buffer, "Is included in the %s list for object class %s. Cannot delete.", + oc_list_type, oc->oc_name); + slapi_log_error(SLAPI_LOG_REPL, "schema", "schema_delete_attributes: Could delete an attribute (%s) used in oc (%s)" + ". But accept it because it is internal operation\n", + oc_list_type, oc->oc_name); + break; + } } } oc_unlock(); if (attr_in_use_by_an_oc) { - attr_syntax_return( a ); - return schema_return(LDAP_UNWILLING_TO_PERFORM,psbAttrOid,psbAttrName, - psbAttrSyntax,NULL); + if (is_internal_operation) { + slapi_log_error(SLAPI_LOG_REPL, "schema", "schema_delete_attributes: Should not delete an attribute used in oc" + ". But accept it because it is internal operation\n"); + + } else { + attr_syntax_return(a); + return schema_return(LDAP_UNWILLING_TO_PERFORM, psbAttrOid, psbAttrName, + psbAttrSyntax, NULL); + } } /* Delete it. */ @@ -2744,7 +2798,10 @@ add_oc_internal(struct objclass *pnew_oc, char *errorbuf, size_t errorbufsize, } } - /* check to see if the superior oc exists */ + /* check to see if the superior oc exists + * This is not enforced for internal op (when learning new schema + * definitions from a replication session) + */ if (!rc && pnew_oc->oc_superior && ((psup_oc = oc_find_nolock (pnew_oc->oc_superior, NULL, PR_FALSE)) == NULL)) { schema_create_errormsg( errorbuf, errorbufsize, schema_errprefix_oc, @@ -2798,7 +2855,10 @@ add_oc_internal(struct objclass *pnew_oc, char *errorbuf, size_t errorbufsize, sizedbuffer_destroy(psbOcOid); } - /* check to see if the oc's attributes are valid */ + /* check to see if the oc's attributes are valid + * This is not checked if this is an internal operation (learning schema + * definitions from a replication session) + */ if (!rc && !(flags & DSE_SCHEMA_NO_CHECK) && schema_check_oc_attrs ( pnew_oc, errorbuf, errorbufsize, 0 /* don't strip options */ ) == 0 ) { @@ -6276,11 +6336,101 @@ schema_oc_superset_check(struct objclass *oc_list1, struct objclass *oc_list2, c return rc; } + +static char * +schema_oc_to_string(struct objclass *oc) +{ + char *oc_str; + int i; + int size = 0; + + /* Compute the size of the string that can contain + * the oc definition and allocates it + */ + if (oc->oc_oid) size += strlen(oc->oc_oid); + if (oc->oc_name) size += strlen(oc->oc_name); + if (oc->oc_desc) size += strlen(oc->oc_desc); + if (oc->oc_orig_required) { + for (i =0; oc->oc_orig_required[i] != NULL; i++) { + size += strlen(oc->oc_orig_required[i]); + size += 3; + } + } + if (oc->oc_orig_allowed) { + for (i =0; oc->oc_orig_allowed[i] != NULL; i++) { + size += strlen(oc->oc_orig_allowed[i]); + size += 3; + } + } + size += strlen(schema_oc_kind_strings_with_spaces[oc->oc_kind]); + + size += 128; /* for all keywords: NAME, DESC, SUP... */ + if ((oc_str = (char *) slapi_ch_calloc(1, size)) == NULL) { + return NULL; + } + + /* OID + name */ + sprintf(oc_str, "( %s NAME '%s'", (oc->oc_oid) ? oc->oc_oid : "", oc->oc_name); + + /* description */ + strcat(oc_str, " DESC '"); + if (oc->oc_desc) { + strcat(oc_str, oc->oc_desc); + } + strcat(oc_str, "'"); + + /* SUP */ + if (oc->oc_superior) { + strcat(oc_str, " SUP '"); + strcat(oc_str, oc->oc_superior); + strcat(oc_str, "'"); + } + + /* oc_kind */ + strcat(oc_str, schema_oc_kind_strings_with_spaces[oc->oc_kind]); + + /* MUST */ + if (oc->oc_orig_required) { + strcat(oc_str, " MUST ( "); + for ( i = 0; oc->oc_orig_required[i] != NULL; ++i ) { + if (i > 0) { + strcat(oc_str, " $ "); + } + strcat(oc_str, oc->oc_orig_required[i]); + } + strcat(oc_str, " ) "); + } + + /* MAY */ + if (oc->oc_orig_allowed) { + strcat(oc_str, " MAY ( "); + for ( i = 0; oc->oc_orig_allowed[i] != NULL; ++i ) { + if (i > 0) { + strcat(oc_str, " $ "); + } + strcat(oc_str, oc->oc_orig_allowed[i]); + } + strcat(oc_str, " ) "); + } + + /* flags */ + if (oc->oc_flags & OC_FLAG_USER_OC) { + strcat(oc_str, " X-ORIGIN 'blahblahblah'"); + } + + strcat(oc_str, " )"); + slapi_log_error(SLAPI_LOG_REPL, "schema", "schema_oc_to_string: replace (old[%d]=%s)\n", + size, oc_str); + + return(oc_str); + +} /* call must hold oc_lock at least in read */ static struct schema_mods_indexes * schema_list_oc2learn(struct objclass *oc_remote_list, struct objclass *oc_local_list, int replica_role) { struct objclass *oc_remote, *oc_local; struct schema_mods_indexes *head = NULL, *mods_index; + struct schema_mods_indexes *tail = NULL; int index = 0; int repl_schema_policy; const char *message; @@ -6320,11 +6470,22 @@ schema_list_oc2learn(struct objclass *oc_remote_list, struct objclass *oc_local_ continue; } - /* insert it in the list */ + /* insert it at the end of the list + * to keep the order of the original schema + * For example superior oc should be declared first + */ mods_index->index = index; - mods_index->next = head; + mods_index->next = NULL; mods_index->new_value = NULL; - head = mods_index; + if (oc_local) { + mods_index->old_value = schema_oc_to_string(oc_local); + } + if (head == NULL) { + head = mods_index; + } else { + tail->next = mods_index; + } + tail = mods_index; } } slapi_rwlock_unlock( schema_policy_lock ); @@ -7184,17 +7345,27 @@ modify_schema_internal_mod(Slapi_DN *sdn, Slapi_Mods *smods) /* do modify */ slapi_modify_internal_pb (newpb); slapi_pblock_get (newpb, SLAPI_PLUGIN_INTOP_RESULT, &op_result); - if (op_result == LDAP_SUCCESS) { - /* Update the schema csn if the operation succeeded */ - schema_csn = csn_new(); - if (NULL != schema_csn) { - csn_set_replicaid(schema_csn, 0); - csn_set_time(schema_csn, current_time()); - g_set_global_schema_csn(schema_csn); - } - } + if (op_result == LDAP_SUCCESS) { + char *type; - slapi_pblock_destroy(newpb); + if (smods && smods->mods) { + type = smods->mods[0]->mod_type; + } else { + type = "unknown"; + } + slapi_log_error(SLAPI_LOG_REPL, "schema", "modify_schema_internal_mod: successfully learn %s definitions\n", type); + /* Update the schema csn if the operation succeeded */ + schema_csn = csn_new(); + if (NULL != schema_csn) { + csn_set_replicaid(schema_csn, 0); + csn_set_time(schema_csn, current_time()); + g_set_global_schema_csn(schema_csn); + } + } else { + slapi_log_error(SLAPI_LOG_FATAL, "schema", "modify_schema_internal_mod: fail to learn schema definitions (%d) \n", op_result); + } + + slapi_pblock_destroy(newpb); } /* Prepare slapi_mods for the internal mod @@ -7202,32 +7373,80 @@ modify_schema_internal_mod(Slapi_DN *sdn, Slapi_Mods *smods) */ static void modify_schema_prepare_mods(Slapi_Mods *smods, char *type, struct schema_mods_indexes *values) -{ - struct schema_mods_indexes *object; - struct berval *bv; - struct berval **bvps; - int nb_values, i; - - for (object = values, nb_values = 0; object != NULL; object = object->next, nb_values++); - bvps = (struct berval **) slapi_ch_calloc(1, (nb_values + 1) * sizeof(struct berval *)); - - +{ + struct schema_mods_indexes *object; + struct berval *bv; + struct berval **bvps_del = NULL; + struct berval **bvps_add = NULL; + int nb_values_del, nb_values_add, i; + int nb_mods; + + /* Checks the values to delete */ + for (object = values, nb_values_del = 0; object != NULL; object = object->next) { + if (object->old_value) { + nb_values_del++; + } + } + if (nb_values_del) { + bvps_del = (struct berval **) slapi_ch_calloc(1, (nb_values_del + 1) * sizeof (struct berval *)); + + for (i = 0, object = values; object != NULL; object = object->next) { + if (object->old_value) { + bv = (struct berval *) slapi_ch_malloc(sizeof (struct berval)); + bv->bv_len = strlen(object->old_value); + bv->bv_val = (void*) object->old_value; + bvps_del[i] = bv; + i++; + slapi_log_error(SLAPI_LOG_REPL, "schema", "MOD[%d] del (%s): %s\n", i, type, object->old_value); + } + } + bvps_del[nb_values_del] = NULL; + } + + /* Checks the values to add */ + for (object = values, nb_values_add = 0; object != NULL; object = object->next, nb_values_add++); + + if (nb_values_add) { + bvps_add = (struct berval **) slapi_ch_calloc(1, (nb_values_add + 1) * sizeof (struct berval *)); + + for (i = 0, object = values; object != NULL; i++, object = object->next) { - bv = (struct berval *) slapi_ch_malloc(sizeof(struct berval)); - bv->bv_len = strlen(object->new_value); - bv->bv_val = (void*) object->new_value; - bvps[i] = bv; - slapi_log_error(SLAPI_LOG_REPL, "schema", "MOD[%d] add (%s): %s\n", i, type, object->new_value); - } - bvps[nb_values] = NULL; - slapi_mods_init (smods, 2); - slapi_mods_add_modbvps( smods, LDAP_MOD_ADD, type, bvps ); - for (i = 0; bvps[i] != NULL; i++) { - /* bv_val should not be free. It belongs to the incoming MOD */ - slapi_ch_free((void **) &bvps[i]); - } - slapi_ch_free((void **) &bvps); - + bv = (struct berval *) slapi_ch_malloc(sizeof (struct berval)); + bv->bv_len = strlen(object->new_value); + bv->bv_val = (void*) object->new_value; + bvps_add[i] = bv; + slapi_log_error(SLAPI_LOG_REPL, "schema", "MOD[%d] add (%s): %s\n", i, type, object->new_value); + } + bvps_add[nb_values_add] = NULL; + } + + /* Prepare the mods */ + nb_mods = 1; + if (bvps_del) nb_mods++; + if (bvps_add) nb_mods++; + slapi_mods_init(smods, nb_mods); + if (bvps_del) slapi_mods_add_modbvps(smods, LDAP_MOD_DELETE, type, bvps_del); + if (bvps_add) slapi_mods_add_modbvps(smods, LDAP_MOD_ADD, type, bvps_add); + + + /* clean up */ + if (bvps_del) { + + for (i = 0; bvps_del[i] != NULL; i++) { + /* bv_val should not be free. It belongs to the incoming MOD */ + slapi_ch_free((void **) &bvps_del[i]); + } + slapi_ch_free((void **) &bvps_del); + } + + if (bvps_add) { + + for (i = 0; bvps_add[i] != NULL; i++) { + /* bv_val should not be free. It belongs to the incoming MOD */ + slapi_ch_free((void **) &bvps_add[i]); + } + slapi_ch_free((void **) &bvps_add); + } } /* called by modify_schema_dse/supplier_learn_new_definitions to learn new
0
c6a72a50e6c948647b220e4a44978f2c9c7e8466
389ds/389-ds-base
Ticket #47400 - MMR stress test with dna enabled causes a deadlock Bug description: Under the heavy add/delete posix user entries, dna_update_config_event causes a deadlock. Fix description: dna_update_config_event starts transaction before updating the shared config entry to avoid the deadlock situation. https://fedorahosted.org/389/ticket/47400 Reviewed by Rich (Thank you!!)
commit c6a72a50e6c948647b220e4a44978f2c9c7e8466 Author: Noriko Hosoi <[email protected]> Date: Wed Jun 19 16:19:28 2013 -0700 Ticket #47400 - MMR stress test with dna enabled causes a deadlock Bug description: Under the heavy add/delete posix user entries, dna_update_config_event causes a deadlock. Fix description: dna_update_config_event starts transaction before updating the shared config entry to avoid the deadlock situation. https://fedorahosted.org/389/ticket/47400 Reviewed by Rich (Thank you!!) diff --git a/ldap/servers/plugins/dna/dna.c b/ldap/servers/plugins/dna/dna.c index ff12dded8..f5ebec6fb 100644 --- a/ldap/servers/plugins/dna/dna.c +++ b/ldap/servers/plugins/dna/dna.c @@ -1352,6 +1352,22 @@ dna_update_config_event(time_t event_time, void *arg) /* If a shared config dn is set, update the shared config. */ if (config_entry->shared_cfg_dn != NULL) { + int rc = 0; + Slapi_PBlock *dna_pb = NULL; + Slapi_DN *sdn = slapi_sdn_new_normdn_byref(config_entry->shared_cfg_dn); + Slapi_Backend *be = slapi_be_select(sdn); + slapi_sdn_free(&sdn); + if (be) { + dna_pb = slapi_pblock_new(); + slapi_pblock_set(dna_pb, SLAPI_BACKEND, be); + /* We need to start transaction to avoid the deadlock */ + rc = slapi_back_transaction_begin(dna_pb); + if (rc) { + slapi_log_error(SLAPI_LOG_FATAL, DNA_PLUGIN_SUBSYSTEM, + "dna_update_config_event: failed to start transaction\n"); + } + } + slapi_lock_mutex(config_entry->lock); /* First delete the existing shared config entry. This @@ -1367,6 +1383,12 @@ dna_update_config_event(time_t event_time, void *arg) dna_update_shared_config(config_entry); slapi_unlock_mutex(config_entry->lock); + if (dna_pb) { + if (0 == rc) { + slapi_back_transaction_commit(dna_pb); + } + slapi_pblock_destroy(dna_pb); + } slapi_pblock_init(pb); } @@ -1659,7 +1681,7 @@ dna_get_shared_servers(struct configEntry *config_entry, PRCList **servers) } } if(!inserted){ - dna_free_shared_server(&server); + dna_free_shared_server(&server); } } }
0
bd5c39b4dd1964bdc3392be7aa492bd40b83e717
389ds/389-ds-base
Issue 4994 - Revert retrocl dependency workaround (#4995) Description: The RetroCL exclude attribute RFE was dependent on the functionality of a commit that didn't make into the rhel 8.5 build. A work around was committed that added the missing methods. Since then the previous commit has been merged, so there now exists two definitions of the same method, these need to be removed. fixes: https://github.com/389ds/389-ds-base/issues/4994 relates: https://github.com/389ds/389-ds-base/issues/4791 Reviewed by: tbordaz (Merci)
commit bd5c39b4dd1964bdc3392be7aa492bd40b83e717 Author: James Chapman <[email protected]> Date: Thu Dec 9 22:22:02 2021 +0000 Issue 4994 - Revert retrocl dependency workaround (#4995) Description: The RetroCL exclude attribute RFE was dependent on the functionality of a commit that didn't make into the rhel 8.5 build. A work around was committed that added the missing methods. Since then the previous commit has been merged, so there now exists two definitions of the same method, these need to be removed. fixes: https://github.com/389ds/389-ds-base/issues/4994 relates: https://github.com/389ds/389-ds-base/issues/4791 Reviewed by: tbordaz (Merci) diff --git a/dirsrvtests/tests/suites/retrocl/basic_test.py b/dirsrvtests/tests/suites/retrocl/basic_test.py index c35a0c993..3bdff9415 100644 --- a/dirsrvtests/tests/suites/retrocl/basic_test.py +++ b/dirsrvtests/tests/suites/retrocl/basic_test.py @@ -16,7 +16,7 @@ from lib389.utils import * from lib389.tasks import * from lib389.cli_base import FakeArgs, connect_instance, disconnect_instance from lib389.cli_base.dsrc import dsrc_arg_concat -from lib389.cli_conf.plugins.retrochangelog import retrochangelog_add_attr +from lib389.cli_conf.plugins.retrochangelog import retrochangelog_add from lib389.idm.user import UserAccount, UserAccounts from lib389.idm.domain import Domain from lib389._mapped_object import DSLdapObjects @@ -119,7 +119,7 @@ def test_retrocl_exclude_attr_add(topology_st): args.bindpw = None args.prompt = False args.exclude_attrs = ATTR_HOMEPHONE - args.func = retrochangelog_add_attr + args.func = retrochangelog_add dsrc_inst = dsrc_arg_concat(args, None) inst = connect_instance(dsrc_inst, False, args) result = args.func(inst, None, log, args) @@ -252,7 +252,7 @@ def test_retrocl_exclude_attr_mod(topology_st): args.bindpw = None args.prompt = False args.exclude_attrs = ATTR_CARLICENSE - args.func = retrochangelog_add_attr + args.func = retrochangelog_add dsrc_inst = dsrc_arg_concat(args, None) inst = connect_instance(dsrc_inst, False, args) result = args.func(inst, None, log, args) diff --git a/src/lib389/lib389/cli_conf/plugins/retrochangelog.py b/src/lib389/lib389/cli_conf/plugins/retrochangelog.py index 46cfd6e8f..92527998b 100644 --- a/src/lib389/lib389/cli_conf/plugins/retrochangelog.py +++ b/src/lib389/lib389/cli_conf/plugins/retrochangelog.py @@ -6,13 +6,9 @@ # See LICENSE for details. # --- END COPYRIGHT BLOCK --- -# JC Work around for missing dependency on https://github.com/389ds/389-ds-base/pull/4344 -import ldap from lib389.plugins import RetroChangelogPlugin -# JC Work around for missing dependency https://github.com/389ds/389-ds-base/pull/4344 -# from lib389.cli_conf import add_generic_plugin_parsers, generic_object_edit, generic_object_add_attr -from lib389.cli_conf import add_generic_plugin_parsers, generic_object_edit, _args_to_attrs +from lib389.cli_conf import add_generic_plugin_parsers, generic_object_edit, generic_object_add_attr arg_to_attr = { 'is_replicated': 'isReplicated', @@ -29,33 +25,6 @@ def retrochangelog_edit(inst, basedn, log, args): plugin = RetroChangelogPlugin(inst) generic_object_edit(plugin, log, args, arg_to_attr) -# JC Work around for missing dependency https://github.com/389ds/389-ds-base/pull/4344 -def retrochangelog_add_attr(inst, basedn, log, args): - log = log.getChild('retrochangelog_add_attr') - plugin = RetroChangelogPlugin(inst) - generic_object_add_attr(plugin, log, args, arg_to_attr) - -# JC Work around for missing dependency https://github.com/389ds/389-ds-base/pull/4344 -def generic_object_add_attr(dsldap_object, log, args, arg_to_attr): - """Add an attribute to the entry. This differs to 'edit' as edit uses replace, - and this allows multivalues to be added. - - dsldap_object should be a single instance of DSLdapObject with a set dn - """ - log = log.getChild('generic_object_add_attr') - # Gather the attributes - attrs = _args_to_attrs(args, arg_to_attr) - - modlist = [] - for attr, value in attrs.items(): - if not isinstance(value, list): - value = [value] - modlist.append((ldap.MOD_ADD, attr, value)) - if len(modlist) > 0: - dsldap_object.apply_mods(modlist) - log.info("Successfully changed the %s", dsldap_object.dn) - else: - raise ValueError("There is nothing to set in the %s plugin entry" % dsldap_object.dn) def retrochangelog_add(inst, basedn, log, args): log = log.getChild('retrochangelog_add')
0
7c9b3a5ac8bf75ed1ac0eed350691d41b9607ff5
389ds/389-ds-base
Bug 613056 - fix coverify Defect Type: Null pointer dereferences issues 11892 - 11939 https://bugzilla.redhat.com/show_bug.cgi?id=613056 Resolves: bug 613056 Bug description: Fix coverify Defect Type: Null pointer dereferences issues 11892 - 11939 description: Catch possible NULL pointer in handle_handshake_done().
commit 7c9b3a5ac8bf75ed1ac0eed350691d41b9607ff5 Author: Endi S. Dewata <[email protected]> Date: Fri Jul 9 20:29:08 2010 -0500 Bug 613056 - fix coverify Defect Type: Null pointer dereferences issues 11892 - 11939 https://bugzilla.redhat.com/show_bug.cgi?id=613056 Resolves: bug 613056 Bug description: Fix coverify Defect Type: Null pointer dereferences issues 11892 - 11939 description: Catch possible NULL pointer in handle_handshake_done(). diff --git a/ldap/servers/slapd/auth.c b/ldap/servers/slapd/auth.c index e483f6e97..4b56e5ebc 100644 --- a/ldap/servers/slapd/auth.c +++ b/ldap/servers/slapd/auth.c @@ -440,6 +440,7 @@ handle_handshake_done (PRFileDesc *prfd, void* clientData) char* extraErrorMsg = ""; SSLChannelInfo channelInfo; SSLCipherSuiteInfo cipherInfo; + char* subject = NULL; if ( (slapd_ssl_getChannelInfo (prfd, &channelInfo, sizeof(channelInfo))) != SECSuccess ) { PRErrorCode errorCode = PR_GetError(); @@ -447,7 +448,7 @@ handle_handshake_done (PRFileDesc *prfd, void* clientData) "conn=%" NSPRIu64 " SSL failed to obtain channel info; " SLAPI_COMPONENT_NAME_NSPR " error %i (%s)\n", conn->c_connid, errorCode, slapd_pr_strerror(errorCode)); - return; + goto done; } if ( (slapd_ssl_getCipherSuiteInfo (channelInfo.cipherSuite, &cipherInfo, sizeof(cipherInfo)) ) != SECSuccess) { @@ -456,7 +457,7 @@ handle_handshake_done (PRFileDesc *prfd, void* clientData) "conn=%" NSPRIu64 " SSL failed to obtain cipher info; " SLAPI_COMPONENT_NAME_NSPR " error %i (%s)\n", conn->c_connid, errorCode, slapd_pr_strerror(errorCode)); - return; + goto done; } keySize = cipherInfo.effectiveKeyBits; @@ -468,22 +469,26 @@ handle_handshake_done (PRFileDesc *prfd, void* clientData) if ( conn->c_flags & CONN_FLAG_START_TLS ) { if ( cipherInfo.symKeyBits == 0 ) { start_tls_graceful_closure( conn, NULL, 1 ); - slapi_ch_free_string(&cipher); - return ; + goto done; } } if (config_get_SSLclientAuth() == SLAPD_SSLCLIENTAUTH_OFF ) { slapi_log_access (LDAP_DEBUG_STATS, "conn=%" NSPRIu64 " SSL %i-bit %s\n", conn->c_connid, keySize, cipher ? cipher : "NULL" ); - slapi_ch_free_string(&cipher); - return; + goto done; } if (clientCert == NULL) { slapi_log_access (LDAP_DEBUG_STATS, "conn=%" NSPRIu64 " SSL %i-bit %s\n", conn->c_connid, keySize, cipher ? cipher : "NULL" ); } else { - char* subject = subject_of (clientCert); + subject = subject_of (clientCert); + if (!subject) { + slapi_log_access( LDAP_DEBUG_STATS, + "conn=%" NSPRIu64 " SSL %i-bit %s; missing subject\n", + conn->c_connid, keySize, cipher ? cipher : "NULL"); + goto done; + } { char* issuer = issuer_of (clientCert); char sbuf[ BUFSIZ ], ibuf[ BUFSIZ ]; @@ -521,7 +526,6 @@ handle_handshake_done (PRFileDesc *prfd, void* clientData) slapi_ch_free_string(&basedn); slapu_msgfree (internal_ld, chain); } - if (subject) free (subject); } if (clientDN != NULL) { @@ -555,7 +559,8 @@ handle_handshake_done (PRFileDesc *prfd, void* clientData) */ bind_credentials_set( conn, SLAPD_AUTH_SSL, clientDN, SLAPD_AUTH_SSL, clientDN, clientCert , NULL); - +done: + slapi_ch_free_string(&subject); slapi_ch_free_string(&cipher); /* clientDN and clientCert will be freed later */ }
0
e394d61b29d8e234ae396deb55aa33e7d6d92a0f
389ds/389-ds-base
port client tools to use openldap API There are some differences between the mozldap and the openldap apis: openldap has deprecated ldap_init in favor of ldap_initialize openldap has deprecated the regular ldap op functions in favor of their _ext alternatives Reviewed by: nkinder (Thanks!) Platforms tested: Fedora 14 (rawhide)
commit e394d61b29d8e234ae396deb55aa33e7d6d92a0f Author: Rich Megginson <[email protected]> Date: Tue Jun 8 16:45:33 2010 -0600 port client tools to use openldap API There are some differences between the mozldap and the openldap apis: openldap has deprecated ldap_init in favor of ldap_initialize openldap has deprecated the regular ldap op functions in favor of their _ext alternatives Reviewed by: nkinder (Thanks!) Platforms tested: Fedora 14 (rawhide) diff --git a/ldap/servers/slapd/tools/rsearch/addthread.c b/ldap/servers/slapd/tools/rsearch/addthread.c index 417a4c193..f01ba1811 100644 --- a/ldap/servers/slapd/tools/rsearch/addthread.c +++ b/ldap/servers/slapd/tools/rsearch/addthread.c @@ -180,16 +180,33 @@ static void at_bind(AddThread *at) { int ret; int retry = 0; +#if defined(USE_OPENLDAP) + char *ldapurl = NULL; + at->ld = NULL; + ldapurl = PR_smprintf("ldap://%s:%d", hostname, port); + ret = ldap_initialize(&at->ld, ldapurl); + PR_smprintf_free(ldapurl); + ldapurl = NULL; + if (ret) { + fprintf(stderr, "T%d: failed to init: %s port %d: %d:%s\n", at->id, hostname, port, + ret, ldap_err2string(ret)); + return; + } +#else at->ld = ldap_init(hostname, port); +#endif if (! at->ld) { fprintf(stderr, "T%d: failed to init: %s port %d\n", at->id, hostname, port); return; } while (retry < 10) { - ret = ldap_simple_bind_s(at->ld, strlen(username) ? username : NULL, - strlen(password) ? password : NULL); + struct berval bvcreds = {0, NULL}; + bvcreds.bv_val = password; + bvcreds.bv_len = password ? strlen(password) : 0; + ret = ldap_sasl_bind_s(at->ld, username, LDAP_SASL_SIMPLE, &bvcreds, + NULL, NULL, NULL); if (LDAP_SUCCESS == ret) { return; /* ok */ } else if (LDAP_CONNECT_ERROR == ret) { @@ -198,7 +215,7 @@ static void at_bind(AddThread *at) break; } } - fprintf(stderr, "T%d: failed to bind, ldap_simple_bind_s returned %d\n", + fprintf(stderr, "T%d: failed to bind, ldap_sasl_bind_s returned %d\n", at->id, ret); } @@ -318,7 +335,7 @@ static int at_add(AddThread *at) fprintf(stderr, "'%s'\n", attrs[i]->mod_values[0]); } #endif - ret = ldap_add_s(at->ld, dn, attrs); + ret = ldap_add_ext_s(at->ld, dn, attrs, NULL, NULL); if (ret != LDAP_SUCCESS) { fprintf(stderr, "T%d: failed to add, error = %d\n", at->id, ret); } diff --git a/ldap/servers/slapd/tools/rsearch/searchthread.c b/ldap/servers/slapd/tools/rsearch/searchthread.c index c229d7593..9e2b0d666 100644 --- a/ldap/servers/slapd/tools/rsearch/searchthread.c +++ b/ldap/servers/slapd/tools/rsearch/searchthread.c @@ -160,7 +160,11 @@ static int st_bind_core(SearchThread *st, LDAP **ld, char *dn, char *pw) int ret = 0; int retry = 0; while (1) { - ret = ldap_simple_bind_s(*ld, dn, pw); + struct berval bvcreds = {0, NULL}; + bvcreds.bv_val = pw; + bvcreds.bv_len = pw ? strlen(pw) : 0; + ret = ldap_sasl_bind_s(*ld, dn, LDAP_SASL_SIMPLE, &bvcreds, + NULL, NULL, NULL); if (LDAP_SUCCESS == ret) { break; } else if (LDAP_CONNECT_ERROR == ret && retry < 10) { @@ -179,14 +183,46 @@ static int st_bind_core(SearchThread *st, LDAP **ld, char *dn, char *pw) static int st_bind(SearchThread *st) { if (!st->ld) { +#if defined(USE_OPENLDAP) + int ret = 0; + char *ldapurl = NULL; + + st->ld = NULL; + ldapurl = PR_smprintf("ldap://%s:%d", hostname, port); + ret = ldap_initialize(&st->ld, ldapurl); + PR_smprintf_free(ldapurl); + ldapurl = NULL; + if (ret) { + fprintf(stderr, "T%d: failed to init: %s port %d: %d:%s\n", st->id, hostname, port, + ret, ldap_err2string(ret)); + return 0; + } +#else st->ld = ldap_init(hostname, port); +#endif if (!st->ld) { fprintf(stderr, "T%d: failed to init\n", st->id); return 0; } } if (!st->ld2) { /* aux LDAP handle */ +#if defined(USE_OPENLDAP) + int ret = 0; + char *ldapurl = NULL; + + st->ld2 = NULL; + ldapurl = PR_smprintf("ldap://%s:%d", hostname, port); + ret = ldap_initialize(&st->ld2, ldapurl); + PR_smprintf_free(ldapurl); + ldapurl = NULL; + if (ret) { + fprintf(stderr, "T%d: failed to init: %s port %d: %d:%s\n", st->id, hostname, port, + ret, ldap_err2string(ret)); + return 0; + } +#else st->ld2 = ldap_init(hostname, port); +#endif if (!st->ld2) { fprintf(stderr, "T%d: failed to init 2\n", st->id); return 0; @@ -230,8 +266,9 @@ static int st_bind(SearchThread *st) timeout.tv_sec = 3600; timeout.tv_usec = 0; while (1) { - int ret = ldap_search_st(st->ld2, suffix, scope, pFilter, - NULL, attrsOnly, &timeout, &result); + int ret = ldap_search_ext_s(st->ld2, suffix, scope, pFilter, + NULL, attrsOnly, NULL, NULL, + &timeout, -1, &result); if (LDAP_SUCCESS == ret) { break; } else if ((LDAP_CONNECT_ERROR == ret || @@ -289,7 +326,7 @@ static int st_bind(SearchThread *st) static void st_unbind(SearchThread *st) { - if (ldap_unbind(st->ld) != LDAP_SUCCESS) + if (ldap_unbind_ext(st->ld, NULL, NULL) != LDAP_SUCCESS) fprintf(stderr, "T%d: failed to unbind\n", st->id); st->ld = NULL; st->soc = -1; @@ -335,8 +372,8 @@ static int st_search(SearchThread *st) timeout.tv_usec = 0; timeoutp = &timeout; } - ret = ldap_search_st(st->ld, suffix, scope, pFilter, attrToReturn, - attrsOnly, timeoutp, &result); + ret = ldap_search_ext_s(st->ld, suffix, scope, pFilter, attrToReturn, + attrsOnly, NULL, NULL, timeoutp, -1, &result); if (ret != LDAP_SUCCESS) { fprintf(stderr, "T%d: failed to search 2, error=0x%02X\n", st->id, ret); @@ -388,7 +425,7 @@ static int st_modify_nonidx(SearchThread *st) attr_description.mod_type = "description"; attr_description.mod_values = description_values; - rval = ldap_modify_s(st->ld, dn, attrs); + rval = ldap_modify_ext_s(st->ld, dn, attrs, NULL, NULL); if (rval != LDAP_SUCCESS) { fprintf(stderr, "T%d: Failed to modify error=0x%x\n", st->id, rval); fprintf(stderr, "dn: %s\n", dn); @@ -431,7 +468,7 @@ static int st_modify_idx(SearchThread *st) attr_telephonenumber.mod_type = "telephonenumber"; attr_telephonenumber.mod_values = telephonenumber_values; - rval = ldap_modify_s(st->ld, dn, attrs); + rval = ldap_modify_ext_s(st->ld, dn, attrs, NULL, NULL); if (rval != LDAP_SUCCESS) { fprintf(stderr, "T%d: Failed to modify error=0x%x\n", st->id, rval); fprintf(stderr, "dn: %s\n", dn); @@ -448,6 +485,7 @@ static int st_compare(SearchThread *st) char *dn = NULL; char *uid = NULL; char uid0[100]; + struct berval bvvalue = {0, NULL}; /* Decide what entry to modify, for this we need a table */ if (NULL == sdattable || sdt_getlen(sdattable) == 0) { @@ -469,7 +507,9 @@ static int st_compare(SearchThread *st) uid0[0] = '@'; /* make it not matched */ uid = uid0; } - rval = ldap_compare_s(st->ld, dn, "uid", uid); + bvvalue.bv_val = uid; + bvvalue.bv_len = uid ? strlen(uid) : 0; + rval = ldap_compare_ext_s(st->ld, dn, "uid", &bvvalue, NULL, NULL); correct_answer = compare_true ? LDAP_COMPARE_TRUE : LDAP_COMPARE_FALSE; if (rval == correct_answer) { rval = LDAP_SUCCESS; @@ -499,7 +539,7 @@ static int st_delete(SearchThread *st) } while (e < 0); dn = sdt_dn_get(sdattable, e); - rval = ldap_delete_s(st->ld, dn); + rval = ldap_delete_ext_s(st->ld, dn, NULL, NULL); if (rval != LDAP_SUCCESS) { if (rval == LDAP_NO_SUCH_OBJECT) { rval = LDAP_SUCCESS;
0
a63f491936af473ede47da8bb76c79ae6eb1ae8a
389ds/389-ds-base
185811 - Need to exclude pwd.h include on windows
commit a63f491936af473ede47da8bb76c79ae6eb1ae8a Author: Nathan Kinder <[email protected]> Date: Sat Mar 18 22:35:17 2006 +0000 185811 - Need to exclude pwd.h include on windows diff --git a/ldap/servers/slapd/log.c b/ldap/servers/slapd/log.c index 1c9502f21..072fd5f73 100644 --- a/ldap/servers/slapd/log.c +++ b/ldap/servers/slapd/log.c @@ -48,7 +48,9 @@ #include "log.h" #include "fe.h" +#ifndef _WIN32 #include <pwd.h> /* getpwnam */ +#endif #if defined( XP_WIN32 ) #include <fcntl.h>
0
13c0d2f7b7850676042fe05c917a7d498135324f
389ds/389-ds-base
Ticket #47838 - harden the list of ciphers available by default Description: Directory Server used to maintain the supported cipher suite list. This patch abandons the table and retrieve the list dynamically from NSS using its API SSL_GetImplementedCiphers. We still keeps a lookup table _lookup_cipher to find out a name defined in NSS from the old cipher name for the backward compatibility. E.g. "rc4" --> "SSL_CK_RC4_128_WITH_MD5". Changes on the value of nsSSL3Ciphers: . If "+all" is included in the value of nsSSL3Ciphers, enable all ciphers first, then disable specified ciphers starting with "-". Otherwise (without "+all"), disable all the ciphers first, then enable specified ciphers starting with "+". . Introduced a keyword "default" for nsSSL3Ciphers. If the config attribute nsSSL3Ciphers does not exist, the value is empty or the value is "default", the default cipher set is enabled. The enabled ciphers are logged in the error log as follows: [..] - SSL alert: Configured NSS Ciphers [..] - SSL alert: TLS_RSA_WITH_AES_128_GCM_SHA256: enabled [..] - SSL alert: TLS_RSA_WITH_AES_128_CBC_SHA: enabled If specified ciphers are weak or very weak, the cipher is logged with (WEAK CIPHER) or (MUST BE DISABLED) as follows: [..] - SSL alert: Configured NSS Ciphers [..] - SSL alert: TLS_DHE_DSS_WITH_DES_CBC_SHA: disabled, (WEAK CIPHER) [..] - SSL alert: TLS_RSA_WITH_NULL_SHA: disabled, (MUST BE DISABLED) To log all the available ciphers, set log level to CONFIG (SLAPI_LOG_CONFIG). Then, all the ciphers are logged. See also http://directory.fedoraproject.org/wiki/NSS_Ciphers https://fedorahosted.org/389/ticket/47838 Reviewed by [email protected] (Thank you, Rich!!)
commit 13c0d2f7b7850676042fe05c917a7d498135324f Author: Noriko Hosoi <[email protected]> Date: Thu Aug 7 11:53:55 2014 -0700 Ticket #47838 - harden the list of ciphers available by default Description: Directory Server used to maintain the supported cipher suite list. This patch abandons the table and retrieve the list dynamically from NSS using its API SSL_GetImplementedCiphers. We still keeps a lookup table _lookup_cipher to find out a name defined in NSS from the old cipher name for the backward compatibility. E.g. "rc4" --> "SSL_CK_RC4_128_WITH_MD5". Changes on the value of nsSSL3Ciphers: . If "+all" is included in the value of nsSSL3Ciphers, enable all ciphers first, then disable specified ciphers starting with "-". Otherwise (without "+all"), disable all the ciphers first, then enable specified ciphers starting with "+". . Introduced a keyword "default" for nsSSL3Ciphers. If the config attribute nsSSL3Ciphers does not exist, the value is empty or the value is "default", the default cipher set is enabled. The enabled ciphers are logged in the error log as follows: [..] - SSL alert: Configured NSS Ciphers [..] - SSL alert: TLS_RSA_WITH_AES_128_GCM_SHA256: enabled [..] - SSL alert: TLS_RSA_WITH_AES_128_CBC_SHA: enabled If specified ciphers are weak or very weak, the cipher is logged with (WEAK CIPHER) or (MUST BE DISABLED) as follows: [..] - SSL alert: Configured NSS Ciphers [..] - SSL alert: TLS_DHE_DSS_WITH_DES_CBC_SHA: disabled, (WEAK CIPHER) [..] - SSL alert: TLS_RSA_WITH_NULL_SHA: disabled, (MUST BE DISABLED) To log all the available ciphers, set log level to CONFIG (SLAPI_LOG_CONFIG). Then, all the ciphers are logged. See also http://directory.fedoraproject.org/wiki/NSS_Ciphers https://fedorahosted.org/389/ticket/47838 Reviewed by [email protected] (Thank you, Rich!!) diff --git a/ldap/servers/slapd/ssl.c b/ldap/servers/slapd/ssl.c index 8dc39d277..cf9643fcd 100644 --- a/ldap/servers/slapd/ssl.c +++ b/ldap/servers/slapd/ssl.c @@ -121,68 +121,90 @@ static char * configDN = "cn=encryption,cn=config"; /* ----------------------- Multiple cipher support ------------------------ */ +/* flags */ +#define CIPHER_IS_DEFAULT 0x1 +#define CIPHER_MUST_BE_DISABLED 0x2 +#define CIPHER_IS_WEAK 0x4 +#define CIPHER_IS_DEPRECATED 0x8 static char **cipher_names = NULL; typedef struct { - char *version; char *name; int num; + int flags; } cipherstruct; - -static cipherstruct _conf_ciphers[] = { - {"SSL3","rc4", SSL_EN_RC4_128_WITH_MD5}, - {"SSL3","rc4export", SSL_EN_RC4_128_EXPORT40_WITH_MD5}, - {"SSL3","rc2", SSL_EN_RC2_128_CBC_WITH_MD5}, - {"SSL3","rc2export", SSL_EN_RC2_128_CBC_EXPORT40_WITH_MD5}, - /*{"idea", SSL_EN_IDEA_128_CBC_WITH_MD5}, */ - {"SSL3","des", SSL_EN_DES_64_CBC_WITH_MD5}, - {"SSL3","desede3", SSL_EN_DES_192_EDE3_CBC_WITH_MD5}, - {"SSL3","rsa_rc4_128_md5", SSL_RSA_WITH_RC4_128_MD5}, - {"SSL3","rsa_rc4_128_sha", SSL_RSA_WITH_RC4_128_SHA}, - {"SSL3","rsa_3des_sha", SSL_RSA_WITH_3DES_EDE_CBC_SHA}, - {"SSL3","rsa_des_sha", SSL_RSA_WITH_DES_CBC_SHA}, - {"SSL3","rsa_fips_3des_sha", SSL_RSA_FIPS_WITH_3DES_EDE_CBC_SHA}, - {"SSL3","fips_3des_sha", SSL_RSA_FIPS_WITH_3DES_EDE_CBC_SHA}, /* ditto */ - {"SSL3","rsa_fips_des_sha", SSL_RSA_FIPS_WITH_DES_CBC_SHA}, - {"SSL3","fips_des_sha", SSL_RSA_FIPS_WITH_DES_CBC_SHA}, /* ditto */ - {"SSL3","rsa_rc4_40_md5", SSL_RSA_EXPORT_WITH_RC4_40_MD5}, - {"SSL3","rsa_rc2_40_md5", SSL_RSA_EXPORT_WITH_RC2_CBC_40_MD5}, - {"SSL3","rsa_null_md5", SSL_RSA_WITH_NULL_MD5}, /* disabled by default */ - {"SSL3","rsa_null_sha", SSL_RSA_WITH_NULL_SHA}, /* disabled by default */ - {"TLS","tls_rsa_export1024_with_rc4_56_sha", TLS_RSA_EXPORT1024_WITH_RC4_56_SHA}, - {"TLS","rsa_rc4_56_sha", TLS_RSA_EXPORT1024_WITH_RC4_56_SHA}, /* ditto */ - {"TLS","tls_rsa_export1024_with_des_cbc_sha", TLS_RSA_EXPORT1024_WITH_DES_CBC_SHA}, - {"TLS","rsa_des_56_sha", TLS_RSA_EXPORT1024_WITH_DES_CBC_SHA}, /* ditto */ - {"SSL3","fortezza", SSL_FORTEZZA_DMS_WITH_FORTEZZA_CBC_SHA}, /* deprecated */ - {"SSL3","fortezza_rc4_128_sha", SSL_FORTEZZA_DMS_WITH_RC4_128_SHA}, /* deprecated */ - {"SSL3","fortezza_null", SSL_FORTEZZA_DMS_WITH_NULL_SHA}, /* deprecated */ - - /*{"SSL3","dhe_dss_40_sha", SSL_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA}, */ - {"SSL3","dhe_dss_des_sha", SSL_DHE_DSS_WITH_DES_CBC_SHA}, - {"SSL3","dhe_dss_3des_sha", SSL_DHE_DSS_WITH_3DES_EDE_CBC_SHA}, - /*{"SSL3","dhe_rsa_40_sha", SSL_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA}, */ - {"SSL3","dhe_rsa_des_sha", SSL_DHE_RSA_WITH_DES_CBC_SHA}, - {"SSL3","dhe_rsa_3des_sha", SSL_DHE_RSA_WITH_3DES_EDE_CBC_SHA}, - - {"TLS","tls_rsa_aes_128_sha", TLS_RSA_WITH_AES_128_CBC_SHA}, - {"TLS","rsa_aes_128_sha", TLS_RSA_WITH_AES_128_CBC_SHA}, /* ditto */ - {"TLS","tls_dhe_dss_aes_128_sha", TLS_DHE_DSS_WITH_AES_128_CBC_SHA}, - {"TLS","tls_dhe_rsa_aes_128_sha", TLS_DHE_RSA_WITH_AES_128_CBC_SHA}, - - {"TLS","tls_rsa_aes_256_sha", TLS_RSA_WITH_AES_256_CBC_SHA}, - {"TLS","rsa_aes_256_sha", TLS_RSA_WITH_AES_256_CBC_SHA}, /* ditto */ - {"TLS","tls_dhe_dss_aes_256_sha", TLS_DHE_DSS_WITH_AES_256_CBC_SHA}, - {"TLS","tls_dhe_rsa_aes_256_sha", TLS_DHE_RSA_WITH_AES_256_CBC_SHA}, - /*{"TLS","tls_dhe_dss_1024_des_sha", TLS_DHE_DSS_EXPORT1024_WITH_DES_CBC_SHA}, */ - {"TLS","tls_dhe_dss_1024_rc4_sha", TLS_RSA_EXPORT1024_WITH_RC4_56_SHA}, - {"TLS","tls_dhe_dss_rc4_128_sha", TLS_DHE_DSS_WITH_RC4_128_SHA}, +static cipherstruct *_conf_ciphers = NULL; +static void _conf_init_ciphers(); +/* + * This lookup table is for supporting the old cipher name. + * Once swtiching to the NSS cipherSuiteName is done, + * this lookup_cipher table can be removed. + */ +typedef struct { + char *alias; + char *name; +} lookup_cipher; +static lookup_cipher _lookup_cipher[] = { + {"rc4", "SSL_CK_RC4_128_WITH_MD5"}, + {"rc4export", "SSL_CK_RC4_128_EXPORT40_WITH_MD5"}, + {"rc2", "SSL_CK_RC2_128_CBC_WITH_MD5"}, + {"rc2export", "SSL_CK_RC2_128_CBC_EXPORT40_WITH_MD5"}, + /*{"idea", "SSL_EN_IDEA_128_CBC_WITH_MD5"}, */ + {"des", "SSL_CK_DES_64_CBC_WITH_MD5"}, + {"desede3", "SSL_CK_DES_192_EDE3_CBC_WITH_MD5"}, + {"rsa_rc4_128_md5", "TLS_RSA_WITH_RC4_128_MD5"}, + {"rsa_rc4_128_sha", "TLS_RSA_WITH_RC4_128_SHA"}, + {"rsa_3des_sha", "TLS_RSA_WITH_3DES_EDE_CBC_SHA"}, + {"tls_rsa_3des_sha", "TLS_RSA_WITH_3DES_EDE_CBC_SHA"}, + {"rsa_fips_3des_sha", "SSL_RSA_FIPS_WITH_3DES_EDE_CBC_SHA"}, + {"fips_3des_sha", "SSL_RSA_FIPS_WITH_3DES_EDE_CBC_SHA"}, + {"rsa_des_sha", "TLS_RSA_WITH_DES_CBC_SHA"}, + {"rsa_fips_des_sha", "SSL_RSA_FIPS_WITH_DES_CBC_SHA"}, + {"fips_des_sha", "SSL_RSA_FIPS_WITH_DES_CBC_SHA"}, /* ditto */ + {"rsa_rc4_40_md5", "TLS_RSA_EXPORT_WITH_RC4_40_MD5"}, + {"tls_rsa_rc4_40_md5", "TLS_RSA_EXPORT_WITH_RC4_40_MD5"}, + {"rsa_rc2_40_md5", "TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5"}, + {"tls_rsa_rc2_40_md5", "TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5"}, + {"rsa_null_md5", "TLS_RSA_WITH_NULL_MD5"}, /* disabled by default */ + {"rsa_null_sha", "TLS_RSA_WITH_NULL_SHA"}, /* disabled by default */ + {"tls_rsa_export1024_with_rc4_56_sha", "TLS_RSA_EXPORT1024_WITH_RC4_56_SHA"}, + {"rsa_rc4_56_sha", "TLS_RSA_EXPORT1024_WITH_RC4_56_SHA"}, /* ditto */ + {"tls_rsa_export1024_with_des_cbc_sha", "TLS_RSA_EXPORT1024_WITH_DES_CBC_SHA"}, + {"rsa_des_56_sha", "TLS_RSA_EXPORT1024_WITH_DES_CBC_SHA"}, /* ditto */ + {"fortezza", ""}, /* deprecated */ + {"fortezza_rc4_128_sha", ""}, /* deprecated */ + {"fortezza_null", ""}, /* deprecated */ + + /*{"dhe_dss_40_sha", SSL_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA, 0}, */ + {"dhe_dss_des_sha", "TLS_DHE_DSS_WITH_DES_CBC_SHA"}, + {"dhe_dss_3des_sha", "TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA"}, + {"dhe_rsa_40_sha", "TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA"}, + {"dhe_rsa_des_sha", "TLS_DHE_RSA_WITH_DES_CBC_SHA"}, + {"dhe_rsa_3des_sha", "TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA"}, + + {"tls_rsa_aes_128_sha", "TLS_RSA_WITH_AES_128_CBC_SHA"}, + {"rsa_aes_128_sha", "TLS_RSA_WITH_AES_128_CBC_SHA"}, /* ditto */ + {"tls_dh_dss_aes_128_sha", ""}, /* deprecated */ + {"tls_dh_rsa_aes_128_sha", ""}, /* deprecated */ + {"tls_dhe_dss_aes_128_sha", "TLS_DHE_DSS_WITH_AES_128_CBC_SHA"}, + {"tls_dhe_rsa_aes_128_sha", "TLS_DHE_RSA_WITH_AES_128_CBC_SHA"}, + + {"tls_rsa_aes_256_sha", "TLS_RSA_WITH_AES_256_CBC_SHA"}, + {"rsa_aes_256_sha", "TLS_RSA_WITH_AES_256_CBC_SHA"}, /* ditto */ + {"tls_dss_aes_256_sha", ""}, /* deprecated */ + {"tls_rsa_aes_256_sha", ""}, /* deprecated */ + {"tls_dhe_dss_aes_256_sha", "TLS_DHE_DSS_WITH_AES_256_CBC_SHA"}, + {"tls_dhe_rsa_aes_256_sha", "TLS_DHE_RSA_WITH_AES_256_CBC_SHA"}, + /*{"tls_dhe_dss_1024_des_sha", ""}, */ + {"tls_dhe_dss_1024_rc4_sha", "TLS_RSA_EXPORT1024_WITH_RC4_56_SHA"}, + {"tls_dhe_dss_rc4_128_sha", "TLS_DHE_DSS_WITH_RC4_128_SHA"}, #if defined(NSS_TLS12) /* New in NSS 3.15 */ - {"TLS","tls_rsa_aes_128_gcm_sha", TLS_RSA_WITH_AES_128_GCM_SHA256}, - {"TLS","tls_dhe_rsa_aes_128_gcm_sha", TLS_DHE_RSA_WITH_AES_128_GCM_SHA256}, - {"TLS","tls_dhe_dss_aes_128_gcm_sha", TLS_DHE_DSS_WITH_AES_128_GCM_SHA256}, + {"tls_rsa_aes_128_gcm_sha", "TLS_RSA_WITH_AES_128_GCM_SHA256"}, + {"tls_dhe_rsa_aes_128_gcm_sha", "TLS_DHE_RSA_WITH_AES_128_GCM_SHA256"}, + {"tls_dhe_dss_aes_128_gcm_sha", NULL}, /* not available */ #endif - {NULL, NULL, 0} + {NULL, NULL} }; static void @@ -217,17 +239,24 @@ char ** getSupportedCiphers() { SSLCipherSuiteInfo info; char *sep = "::"; - int number_of_ciphers = sizeof (_conf_ciphers) /sizeof(cipherstruct); + int number_of_ciphers = SSL_NumImplementedCiphers; int i; int idx = 0; PRBool isFIPS = slapd_pk11_isFIPS(); - if (cipher_names == NULL ) { - cipher_names = (char **) slapi_ch_calloc ((number_of_ciphers +1 ) , sizeof(char *)); + + _conf_init_ciphers(); + + if ((cipher_names == NULL) && (_conf_ciphers)) { + cipher_names = (char **)slapi_ch_calloc((number_of_ciphers + 1), sizeof(char *)); for (i = 0 ; _conf_ciphers[i].name != NULL; i++ ) { SSL_GetCipherSuiteInfo((PRUint16)_conf_ciphers[i].num,&info,sizeof(info)); /* only support FIPS approved ciphers in FIPS mode */ if (!isFIPS || info.isFIPS) { - cipher_names[idx++] = PR_smprintf("%s%s%s%s%s%s%s%s%d",_conf_ciphers[i].version,sep,_conf_ciphers[i].name,sep,info.symCipherName,sep,info.macAlgorithmName,sep,info.symKeyBits); + cipher_names[idx++] = PR_smprintf("%s%s%s%s%s%s%d", + _conf_ciphers[i].name,sep, + info.symCipherName,sep, + info.macAlgorithmName,sep, + info.symKeyBits); } } cipher_names[idx] = NULL; @@ -240,7 +269,7 @@ cipher_check_fips(int idx, char ***suplist, char ***unsuplist) { PRBool rc = PR_TRUE; - if (slapd_pk11_isFIPS()) { + if (_conf_ciphers && slapd_pk11_isFIPS()) { SSLCipherSuiteInfo info; if (SECFailure == SSL_GetCipherSuiteInfo((PRUint16)_conf_ciphers[idx].num, &info, sizeof info)) { @@ -273,24 +302,94 @@ cipher_check_fips(int idx, char ***suplist, char ***unsuplist) return rc; } -void -_conf_setallciphers(int active, char ***suplist, char ***unsuplist) +static void +_conf_init_ciphers() { int x; + SECStatus rc; + SSLCipherSuiteInfo info; + const PRUint16 *implementedCiphers = SSL_GetImplementedCiphers(); + + /* Initialize _conf_ciphers */ + if (_conf_ciphers) { + return; + } + _conf_ciphers = (cipherstruct *)slapi_ch_calloc(SSL_NumImplementedCiphers + 1, sizeof(cipherstruct)); - /* MLM - change: Because null_md5 is NOT encrypted at all, force - * them to activate it by name. */ - for(x = 0; _conf_ciphers[x].name; x++) { - PRBool enabled = active ? PR_TRUE : PR_FALSE; - if(active && (!strcmp(_conf_ciphers[x].name, "rsa_null_md5") || - !strcmp(_conf_ciphers[x].name, "rsa_null_sha"))) - { + for (x = 0; implementedCiphers && (x < SSL_NumImplementedCiphers); x++) { + rc = SSL_GetCipherSuiteInfo(implementedCiphers[x], &info, sizeof info); + if (SECFailure == rc) { + slapi_log_error(SLAPI_LOG_FATAL, "SSL Initialization", + "Warning: failed to get the cipher suite info of cipher ID %d\n", + implementedCiphers[x]); continue; } - if (enabled) { - enabled = cipher_check_fips(x, suplist, unsuplist); + if (!_conf_ciphers[x].num) { /* initialize each cipher */ + _conf_ciphers[x].name = slapi_ch_strdup(info.cipherSuiteName); + _conf_ciphers[x].num = implementedCiphers[x]; + if (info.symCipher == ssl_calg_null) { + _conf_ciphers[x].flags |= CIPHER_MUST_BE_DISABLED; + } else { + _conf_ciphers[x].flags |= info.isExportable?CIPHER_IS_WEAK: + (info.symCipher < ssl_calg_3des)?CIPHER_IS_WEAK: + (info.effectiveKeyBits < 128)?CIPHER_IS_WEAK:0; + } } - SSL_CipherPrefSetDefault(_conf_ciphers[x].num, enabled); + } + return; +} + +#define CIPHER_SET_ALL 1 +#define CIPHER_SET_NONE 0 +#define CIPHER_SET_DEFAULT 2 +/* + * flag: 1 -- enable all + * 0 -- disable all + * 2 -- set default ciphers + */ +static void +_conf_setallciphers(int flag, char ***suplist, char ***unsuplist) +{ + int x; + SECStatus rc; + PRBool setdefault = (flag == CIPHER_SET_DEFAULT) ? PR_TRUE : PR_FALSE; + PRBool enabled = (flag == CIPHER_SET_ALL) ? PR_TRUE : PR_FALSE; + PRBool setme; + const PRUint16 *implementedCiphers = SSL_GetImplementedCiphers(); + SSLCipherSuiteInfo info; + + _conf_init_ciphers(); + + for (x = 0; implementedCiphers && (x < SSL_NumImplementedCiphers); x++) { + if (!(_conf_ciphers[x].flags & CIPHER_IS_DEFAULT)) { + /* + * SSL_CipherPrefGetDefault + * If the application has not previously set the default preference, + * SSL_CipherPrefGetDefault returns the factory setting. + */ + rc = SSL_CipherPrefGetDefault(_conf_ciphers[x].num, &setme); + if (SECFailure == rc) { + slapi_log_error(SLAPI_LOG_FATAL, "SSL Initialization", + "Warning: failed to get the default state of cipher %s\n", + _conf_ciphers[x].name); + continue; + } + if (_conf_ciphers[x].flags & CIPHER_IS_WEAK) { + setme = PR_FALSE; + } + _conf_ciphers[x].flags |= setme?CIPHER_IS_DEFAULT:0; + } + if (setdefault) { + /* Use the NSS default settings */ + } else if (enabled && !(_conf_ciphers[x].flags & CIPHER_MUST_BE_DISABLED)) { + setme = PR_TRUE; + } else { + setme = PR_FALSE; + } + if (setme) { + setme = cipher_check_fips(x, suplist, unsuplist); + } + SSL_CipherPrefSetDefault(_conf_ciphers[x].num, setme); } } @@ -309,40 +408,61 @@ charray2str(char **ary, const char *delim) return str; } +void +_conf_dumpciphers() +{ + int x; + PRBool enabled; + /* {"SSL3","rc4", SSL_EN_RC4_128_WITH_MD5}, */ + slapd_SSL_warn("Configured NSS Ciphers"); + for (x = 0; _conf_ciphers[x].name; x++) { + SSL_CipherPrefGetDefault(_conf_ciphers[x].num, &enabled); + if (enabled) { + slapd_SSL_warn("\t%s: enabled%s%s%s", _conf_ciphers[x].name, + (_conf_ciphers[x].flags&CIPHER_IS_WEAK)?", (WEAK CIPHER)":"", + (_conf_ciphers[x].flags&CIPHER_IS_DEPRECATED)?", (DEPRECATED)":"", + (_conf_ciphers[x].flags&CIPHER_MUST_BE_DISABLED)?", (MUST BE DISABLED)":""); + } else if (slapi_is_loglevel_set(SLAPI_LOG_CONFIG)) { + slapd_SSL_warn("\t%s: disabled%s%s%s", _conf_ciphers[x].name, + (_conf_ciphers[x].flags&CIPHER_IS_WEAK)?", (WEAK CIPHER)":"", + (_conf_ciphers[x].flags&CIPHER_IS_DEPRECATED)?", (DEPRECATED)":"", + (_conf_ciphers[x].flags&CIPHER_MUST_BE_DISABLED)?", (MUST BE DISABLED)":""); + } + } +} + char * _conf_setciphers(char *ciphers) { char *t, err[MAGNUS_ERROR_LEN]; - int x, active; + int x, i, active; char *raw = ciphers; char **suplist = NULL; char **unsuplist = NULL; - - /* Default is to activate all of them */ - if(!ciphers || ciphers[0] == '\0') { - _conf_setallciphers(1, &suplist, NULL); - if (suplist && *suplist) { - if (slapi_is_loglevel_set(SLAPI_LOG_CONFIG)) { - char *str = charray2str(suplist, ","); - slapd_SSL_warn("Security Initialization: FIPS mode is enabled - only the following " - "cipher suites are approved for FIPS: [%s] - " - "all other cipher suites are disabled - if " - "you want to use other cipher suites, you must use modutil to " - "disable FIPS in the internal token.", - str ? str : "(none)"); - slapi_ch_free_string(&str); - } - } - slapi_ch_free((void **)&suplist); /* strings inside are static */ + int lookup; + + /* #47838: harden the list of ciphers available by default */ + /* Default is to activate all of them ==> none of them*/ + if (!ciphers || (ciphers[0] == '\0') || !PL_strcasecmp(ciphers, "default")) { + _conf_setallciphers(CIPHER_SET_DEFAULT, NULL, NULL); + slapd_SSL_warn("Security Initialization: Enabling default cipher set."); + _conf_dumpciphers(); return NULL; } - /* - * Enable all the ciphers by default and the following while loop would - * disable the user disabled ones. This is needed because we added a new - * set of ciphers in the table. Right now there is no support for this - * from the console - */ - _conf_setallciphers(1, &suplist, NULL); + + if (PL_strcasestr(ciphers, "+all")) { + /* + * Enable all the ciphers if "+all" and the following while loop would + * disable the user disabled ones. This is needed because we added a new + * set of ciphers in the table. Right now there is no support for this + * from the console + */ + _conf_setallciphers(CIPHER_SET_ALL, &suplist, NULL); + } else { + /* If "+all" is not in nsSSL3Ciphers value, disable all first, + * then enable specified ciphers. */ + _conf_setallciphers(0 /* disabled */, NULL, NULL); + } t = ciphers; while(t) { @@ -354,24 +474,45 @@ _conf_setciphers(char *ciphers) case '-': active = 0; break; default: - PR_snprintf(err, sizeof(err), "invalid ciphers <%s>: format is " - "+cipher1,-cipher2...", raw); + PR_snprintf(err, sizeof(err), "invalid ciphers <%s>: format is " + "+cipher1,-cipher2...", raw); return slapi_ch_strdup(err); } if( (t = strchr(ciphers, ',')) ) *t++ = '\0'; - if(!strcasecmp(ciphers, "all")) - _conf_setallciphers(active, NULL, NULL); - else { + if(strcasecmp(ciphers, "all")) { /* if not all */ + PRBool enabled = active ? PR_TRUE : PR_FALSE; + lookup = 1; for(x = 0; _conf_ciphers[x].name; x++) { - if(!strcasecmp(ciphers, _conf_ciphers[x].name)) { - PRBool enabled = active ? PR_TRUE : PR_FALSE; - if (enabled) { - enabled = cipher_check_fips(x, NULL, &unsuplist); - } - SSL_CipherPrefSetDefault(_conf_ciphers[x].num, enabled); - break; + if(!PL_strcasecmp(ciphers, _conf_ciphers[x].name)) { + if (enabled) { + enabled = cipher_check_fips(x, NULL, &unsuplist); + } + SSL_CipherPrefSetDefault(_conf_ciphers[x].num, enabled); + lookup = 0; + break; + } + } + if (lookup) { /* lookup with old cipher name and get NSS cipherSuiteName */ + for (i = 0; _lookup_cipher[i].alias; i++) { + if (!PL_strcasecmp(ciphers, _lookup_cipher[i].alias)) { + if (!_lookup_cipher[i].name[0]) { + slapd_SSL_warn("Cipher suite %s is not available in NSS %d.%d", + ciphers, NSS_VMAJOR, NSS_VMINOR); + break; + } + for (x = 0; _conf_ciphers[x].name; x++) { + if (!PL_strcasecmp(_lookup_cipher[i].name, _conf_ciphers[x].name)) { + if (enabled) { + enabled = cipher_check_fips(x, NULL, &unsuplist); + } + SSL_CipherPrefSetDefault(_conf_ciphers[x].num, enabled); + break; + } + } + break; + } } } if(!_conf_ciphers[x].name) { @@ -399,6 +540,8 @@ _conf_setciphers(char *ciphers) slapi_ch_free((void **)&suplist); /* strings inside are static */ slapi_ch_free((void **)&unsuplist); /* strings inside are static */ + + _conf_dumpciphers(); return NULL; } @@ -855,7 +998,8 @@ svrcore_setup() * on a secure port. */ int -slapd_ssl_init() { +slapd_ssl_init() +{ PRErrorCode errorCode; char ** family_list; char *val = NULL; @@ -919,7 +1063,7 @@ slapd_ssl_init() { } activation = slapi_entry_attr_get_charptr( entry, "nssslactivation" ); - if((!activation) || (!strcasecmp(activation, "off"))) { + if((!activation) || (!PL_strcasecmp(activation, "off"))) { /* this family was turned off, goto next */ slapi_ch_free((void **) &activation); continue; @@ -929,8 +1073,8 @@ slapd_ssl_init() { token = slapi_entry_attr_get_charptr( entry, "nsssltoken" ); if( token ) { - if( !strcasecmp(token, "internal") || - !strcasecmp(token, "internal (software)")) + if( !PL_strcasecmp(token, "internal") || + !PL_strcasecmp(token, "internal (software)")) slot = slapd_pk11_getInternalKeySlot(); else slot = slapd_pk11_findSlotByName(token); @@ -977,7 +1121,7 @@ slapd_ssl_init() { /* Step Three.5: Set SSL cipher preferences */ *cipher_string = 0; - if(ciphers && (*ciphers) && strcmp(ciphers, "blank")) + if(ciphers && (*ciphers) && PL_strcmp(ciphers, "blank")) PL_strncpyz(cipher_string, ciphers, sizeof(cipher_string)); slapi_ch_free((void **) &ciphers); @@ -991,19 +1135,16 @@ slapd_ssl_init() { } freeConfigEntry( &entry ); - - + /* Introduce a way of knowing whether slapd_ssl_init has * already been executed. */ _security_library_initialized = 1; - - if ( rv != 0 ) - return rv; - + if ( rv != 0 ) { + return rv; + } return 0; - } #if !defined(NSS_TLS10) /* NSS_TLS11 or newer */ @@ -1307,7 +1448,7 @@ slapd_ssl_init2(PRFileDesc **fd, int startTLS) } activation = slapi_entry_attr_get_charptr( e, "nssslactivation" ); - if((!activation) || (!strcasecmp(activation, "off"))) { + if((!activation) || (!PL_strcasecmp(activation, "off"))) { /* this family was turned off, goto next */ slapi_ch_free((void **) &activation); freeConfigEntry( &e ); @@ -1319,8 +1460,8 @@ slapd_ssl_init2(PRFileDesc **fd, int startTLS) token = slapi_entry_attr_get_charptr( e, "nsssltoken" ); personality = slapi_entry_attr_get_charptr( e, "nssslpersonalityssl" ); if( token && personality ) { - if( !strcasecmp(token, "internal") || - !strcasecmp(token, "internal (software)") ) + if( !PL_strcasecmp(token, "internal") || + !PL_strcasecmp(token, "internal (software)") ) PL_strncpyz(cert_name, personality, sizeof(cert_name)); else /* external PKCS #11 token - attach token name */ @@ -1538,9 +1679,9 @@ slapd_ssl_init2(PRFileDesc **fd, int startTLS) if ( e != NULL ) { val = slapi_entry_attr_get_charptr( e, "nsSSL3" ); if ( val ) { - if ( !strcasecmp( val, "off" ) ) { + if ( !PL_strcasecmp( val, "off" ) ) { enableSSL3 = PR_FALSE; - } else if ( !strcasecmp( val, "on" ) ) { + } else if ( !PL_strcasecmp( val, "on" ) ) { enableSSL3 = PR_TRUE; } else { enableSSL3 = slapi_entry_attr_get_bool( e, "nsSSL3" ); @@ -1557,9 +1698,9 @@ slapd_ssl_init2(PRFileDesc **fd, int startTLS) slapi_ch_free_string( &val ); val = slapi_entry_attr_get_charptr( e, "nsTLS1" ); if ( val ) { - if ( !strcasecmp( val, "off" ) ) { + if ( !PL_strcasecmp( val, "off" ) ) { enableTLS1 = PR_FALSE; - } else if ( !strcasecmp( val, "on" ) ) { + } else if ( !PL_strcasecmp( val, "on" ) ) { enableTLS1 = PR_TRUE; } else { enableTLS1 = slapi_entry_attr_get_bool( e, "nsTLS1" ); @@ -1718,7 +1859,7 @@ slapd_SSL_client_auth (LDAP* ld) } activation = slapi_entry_attr_get_charptr( entry, "nssslactivation" ); - if((!activation) || (!strcasecmp(activation, "off"))) { + if((!activation) || (!PL_strcasecmp(activation, "off"))) { /* this family was turned off, goto next */ slapi_ch_free((void **) &activation); freeConfigEntry( &entry ); @@ -1729,7 +1870,7 @@ slapd_SSL_client_auth (LDAP* ld) personality = slapi_entry_attr_get_charptr( entry, "nssslpersonalityssl" ); cipher = slapi_entry_attr_get_charptr( entry, "cn" ); - if ( cipher && !strcasecmp(cipher, "RSA" )) { + if ( cipher && !PL_strcasecmp(cipher, "RSA" )) { char *ssltoken; /* If there already is a token name, use it */ @@ -1742,8 +1883,8 @@ slapd_SSL_client_auth (LDAP* ld) ssltoken = slapi_entry_attr_get_charptr( entry, "nsssltoken" ); if( ssltoken && personality ) { - if( !strcasecmp(ssltoken, "internal") || - !strcasecmp(ssltoken, "internal (software)") ) { + if( !PL_strcasecmp(ssltoken, "internal") || + !PL_strcasecmp(ssltoken, "internal (software)") ) { /* Translate config internal name to more * readable form. Certificate name is just
0
d7b56a1ec60dac5da4959cb11b9d9977f40e431a
389ds/389-ds-base
Issue 6181 - RFE - Allow system to manage uid/gid at startup (#6182) Bug Description: We have a user who wishes to implement a non-standard configuration in which the running gid is not the primary gid of the uid that the server runs as. Currently this trips up most of our setup tools. Rather than support dropping to an alternate gid in the server, it is simpler to allow systemd to pre-configure our user and group at start up. This needs a small number of changes. Fix Description: - dscreate needs to correctly setup file ownships for dse.ldif and friends rather than relying on the server having root access and changing the perms itself - Our unit file needs to enable the CAP_NET_BIND privilege so that the service can bind to ports lower than 1024 without being root - The server needs to not attempt to change it's uid/gid if we are already running as that user/gid. fixes: https://github.com/389ds/389-ds-base/issues/6181 Author: William Brown <[email protected]> Review by: @mreynolds389 and @progier389 (Thank you!)
commit d7b56a1ec60dac5da4959cb11b9d9977f40e431a Author: Firstyear <[email protected]> Date: Wed Jun 5 10:18:51 2024 +1000 Issue 6181 - RFE - Allow system to manage uid/gid at startup (#6182) Bug Description: We have a user who wishes to implement a non-standard configuration in which the running gid is not the primary gid of the uid that the server runs as. Currently this trips up most of our setup tools. Rather than support dropping to an alternate gid in the server, it is simpler to allow systemd to pre-configure our user and group at start up. This needs a small number of changes. Fix Description: - dscreate needs to correctly setup file ownships for dse.ldif and friends rather than relying on the server having root access and changing the perms itself - Our unit file needs to enable the CAP_NET_BIND privilege so that the service can bind to ports lower than 1024 without being root - The server needs to not attempt to change it's uid/gid if we are already running as that user/gid. fixes: https://github.com/389ds/389-ds-base/issues/6181 Author: William Brown <[email protected]> Review by: @mreynolds389 and @progier389 (Thank you!) diff --git a/ldap/servers/slapd/main.c b/ldap/servers/slapd/main.c index ee32c1a6c..055411b6b 100644 --- a/ldap/servers/slapd/main.c +++ b/ldap/servers/slapd/main.c @@ -817,11 +817,17 @@ main(int argc, char **argv) } /* Now, sockets are open, so we can safely change identity now */ - return_value = main_setuid(slapdFrontendConfig->localuser); - if (0 != return_value) { - slapi_log_err(SLAPI_LOG_ERR, "main", "Failed to change user and group identity to that of %s\n", - slapdFrontendConfig->localuser); - exit(1); + /* + * We can only change uid if we are already root - otherwise it's likely + * that our external service manager has setup the uid/gid for us. + */ + if (getuid() == 0) { + return_value = main_setuid(slapdFrontendConfig->localuser); + if (0 != return_value) { + slapi_log_err(SLAPI_LOG_ERR, "main", "Failed to change user and group identity to that of %s\n", + slapdFrontendConfig->localuser); + exit(1); + } } /* diff --git a/src/lib389/lib389/__init__.py b/src/lib389/lib389/__init__.py index b4cee81e8..9279bcc53 100644 --- a/src/lib389/lib389/__init__.py +++ b/src/lib389/lib389/__init__.py @@ -1734,7 +1734,7 @@ class DirSrv(SimpleLDAPObject, object): if not self.with_systemd(): return False cp = subprocess.run(["systemctl", "is-system-running"], - universal_newlines=True, capture_output=True) + universal_newlines=True, stdout=subprocess.PIPE) # is-system-running can detect the 7 modes (initializing, starting, # running, degraded, maintenance, stopping, offline) or "unknown". # To keep things simple, we assume that anything other than "offline" diff --git a/src/lib389/lib389/instance/remove.py b/src/lib389/lib389/instance/remove.py index 9e41fb5be..5754fba1d 100644 --- a/src/lib389/lib389/instance/remove.py +++ b/src/lib389/lib389/instance/remove.py @@ -81,8 +81,9 @@ def remove_ds_instance(dirsrv, force=False): # Stop the instance (if running) and now we know it really does exist # and hopefully have permission to access it ... - _log.debug("Stopping instance %s" % dirsrv.serverid) - dirsrv.stop() + if dirsrv.status(): + _log.debug("Stopping instance %s" % dirsrv.serverid) + dirsrv.stop() _log.debug("Found instance marker at %s! Proceeding to remove ..." % dse_ldif_path) diff --git a/src/lib389/lib389/instance/setup.py b/src/lib389/lib389/instance/setup.py index 01027bc7e..2b89d118e 100644 --- a/src/lib389/lib389/instance/setup.py +++ b/src/lib389/lib389/instance/setup.py @@ -861,6 +861,7 @@ class SetupDs(object): ldapi_autobind="on", ) file_dse.write(dse_fmt) + os.chown(os.path.join(slapd['config_dir'], 'dse.ldif'), slapd['user_uid'], slapd['group_gid']) self.log.info("Create file system structures ...") # Create all the needed paths @@ -977,7 +978,7 @@ class SetupDs(object): # Create a certificate database. tlsdb = NssSsl(dirsrv=ds_instance, dbpath=slapd['cert_dir']) if not tlsdb._db_exists(): - tlsdb.reinit() + tlsdb.reinit(uid=slapd['user_uid'], gid=slapd['group_gid']) if slapd['self_sign_cert']: self.log.info("Create self-signed certificate database ...") diff --git a/src/lib389/lib389/nss_ssl.py b/src/lib389/lib389/nss_ssl.py index 7bb49ad9f..e6c7804ef 100644 --- a/src/lib389/lib389/nss_ssl.py +++ b/src/lib389/lib389/nss_ssl.py @@ -44,7 +44,7 @@ log = logging.getLogger(__name__) class NssSsl(DSLint): - def __init__(self, dirsrv=None, dbpassword=None, dbpath=None): + def __init__(self, dirsrv=None, dbpassword=None, dbpath=None, uid=None, gid=None): self.dirsrv = dirsrv self._certdb = dbpath if self._certdb is None: @@ -150,7 +150,7 @@ class NssSsl(DSLint): finally: prv_mask = os.umask(prv_mask) - def reinit(self): + def reinit(self, uid=None, gid=None): """ Re-init (create) the nss db. """ @@ -190,6 +190,10 @@ only. if not os.path.exists(pwd_text_file): with open(pwd_text_file, 'w') as f: f.write('%s' % self.dbpassword) + + if uid is not None and gid is not None: + os.chown(pin_file, uid, gid) + os.chown(pwd_text_file, uid, gid) finally: prv_mask = os.umask(prv_mask) @@ -203,8 +207,14 @@ only. except subprocess.CalledProcessError as e: raise ValueError(e.output.decode('utf-8').rstrip()) self.log.debug("nss output: %s", result) + + if uid is not None and gid is not None: + for file in self.db_files["sql_backend"]: + os.chown(file, uid, gid) + return True + def _db_exists(self, even_partial=False): """Check that a nss db exists at the certpath""" diff --git a/wrappers/systemd.template.service.in b/wrappers/systemd.template.service.in index bcb203466..2f62a738c 100644 --- a/wrappers/systemd.template.service.in +++ b/wrappers/systemd.template.service.in @@ -15,9 +15,18 @@ NotifyAccess=all EnvironmentFile=-@initconfigdir@/@package_name@ EnvironmentFile=-@initconfigdir@/@package_name@-%i PIDFile=/run/@package_name@/slapd-%i.pid -ExecStartPre=@libexecdir@/ds_systemd_ask_password_acl @instconfigdir@/slapd-%i/dse.ldif -ExecStartPre=@libexecdir@/ds_selinux_restorecon.sh @instconfigdir@/slapd-%i/dse.ldif + +# The =+ denotes that ExecStartPre tasks will be run with privileges in the case the instance +# is configured with a non root User/Group via an override. +# See https://www.freedesktop.org/software/systemd/man/latest/systemd.service.html#Command%20lines +ExecStartPre=+@libexecdir@/ds_systemd_ask_password_acl @instconfigdir@/slapd-%i/dse.ldif +ExecStartPre=+@libexecdir@/ds_selinux_restorecon.sh @instconfigdir@/slapd-%i/dse.ldif ExecStart=@sbindir@/ns-slapd -D @instconfigdir@/slapd-%i -i /run/@package_name@/slapd-%i.pid + +# Allow non-root instances to bind to low ports. +AmbientCapabilities=CAP_NET_BIND_SERVICE +CapabilityBoundingSet=CAP_NET_BIND_SERVICE + PrivateTmp=on # https://en.opensuse.org/openSUSE:Security_Features#Systemd_hardening_effort ProtectSystem=full
0
c08e8772cbff27451ec807a077a65e6058da7f94
389ds/389-ds-base
Trac Ticket #275 - Invalid read reported by valgrind https://fedorahosted.org/389/ticket/275 Fix description: Since the matching rule type could be normalized and the original string could be freed in filter_normalize_ext, the type needs to have a duplicated string (bitwise.c, plugin_mr.c). Filter_ava functions and filter_sub functions in the syntax plugins need to check if the passed pblock is NULL or not before accessing it.
commit c08e8772cbff27451ec807a077a65e6058da7f94 Author: Noriko Hosoi <[email protected]> Date: Thu Feb 2 14:14:15 2012 -0800 Trac Ticket #275 - Invalid read reported by valgrind https://fedorahosted.org/389/ticket/275 Fix description: Since the matching rule type could be normalized and the original string could be freed in filter_normalize_ext, the type needs to have a duplicated string (bitwise.c, plugin_mr.c). Filter_ava functions and filter_sub functions in the syntax plugins need to check if the passed pblock is NULL or not before accessing it. diff --git a/ldap/servers/plugins/bitwise/bitwise.c b/ldap/servers/plugins/bitwise/bitwise.c index 190e26df8..4672541a9 100644 --- a/ldap/servers/plugins/bitwise/bitwise.c +++ b/ldap/servers/plugins/bitwise/bitwise.c @@ -69,7 +69,7 @@ static struct bitwise_match_cb * new_bitwise_match_cb(char *type, struct berval *val) { struct bitwise_match_cb *bmc = (struct bitwise_match_cb *)slapi_ch_calloc(1, sizeof(struct bitwise_match_cb)); - bmc->type = type; + bmc->type = slapi_ch_strdup(type); bmc->val = val; return bmc; @@ -78,6 +78,7 @@ new_bitwise_match_cb(char *type, struct berval *val) static void delete_bitwise_match_cb(struct bitwise_match_cb *bmc) { + slapi_ch_free_string(&bmc->type); slapi_ch_free((void **)&bmc); } diff --git a/ldap/servers/plugins/syntaxes/bitstring.c b/ldap/servers/plugins/syntaxes/bitstring.c index 2366ffd46..15e946bc6 100644 --- a/ldap/servers/plugins/syntaxes/bitstring.c +++ b/ldap/servers/plugins/syntaxes/bitstring.c @@ -153,17 +153,19 @@ bitstring_filter_ava( struct berval *bvfilter, Slapi_Value **bvals, int ftype, - Slapi_Value **retVal + Slapi_Value **retVal ) { int filter_normalized = 0; int syntax = SYNTAX_CES; - slapi_pblock_get( pb, SLAPI_PLUGIN_SYNTAX_FILTER_NORMALIZED, &filter_normalized ); - if (filter_normalized) { - syntax |= SYNTAX_NORM_FILT; + if (pb) { + slapi_pblock_get( pb, SLAPI_PLUGIN_SYNTAX_FILTER_NORMALIZED, + &filter_normalized ); + if (filter_normalized) { + syntax |= SYNTAX_NORM_FILT; + } } - return( string_filter_ava( bvfilter, bvals, syntax, - ftype, retVal ) ); + return( string_filter_ava( bvfilter, bvals, syntax, ftype, retVal ) ); } diff --git a/ldap/servers/plugins/syntaxes/ces.c b/ldap/servers/plugins/syntaxes/ces.c index 057183670..626cb7c2a 100644 --- a/ldap/servers/plugins/syntaxes/ces.c +++ b/ldap/servers/plugins/syntaxes/ces.c @@ -297,14 +297,17 @@ ces_filter_ava( struct berval *bvfilter, Slapi_Value **bvals, int ftype, - Slapi_Value **retVal + Slapi_Value **retVal ) { int filter_normalized = 0; int syntax = SYNTAX_CES; - slapi_pblock_get( pb, SLAPI_PLUGIN_SYNTAX_FILTER_NORMALIZED, &filter_normalized ); - if (filter_normalized) { - syntax |= SYNTAX_NORM_FILT; + if (pb) { + slapi_pblock_get( pb, SLAPI_PLUGIN_SYNTAX_FILTER_NORMALIZED, + &filter_normalized ); + if (filter_normalized) { + syntax |= SYNTAX_NORM_FILT; + } } return( string_filter_ava( bvfilter, bvals, syntax, ftype, retVal) ); diff --git a/ldap/servers/plugins/syntaxes/cis.c b/ldap/servers/plugins/syntaxes/cis.c index 71e7a7c20..c914fd6f5 100644 --- a/ldap/servers/plugins/syntaxes/cis.c +++ b/ldap/servers/plugins/syntaxes/cis.c @@ -561,14 +561,17 @@ cis_filter_ava( struct berval *bvfilter, Slapi_Value **bvals, int ftype, - Slapi_Value **retVal + Slapi_Value **retVal ) { int filter_normalized = 0; int syntax = SYNTAX_CIS; - slapi_pblock_get( pb, SLAPI_PLUGIN_SYNTAX_FILTER_NORMALIZED, &filter_normalized ); - if (filter_normalized) { - syntax |= SYNTAX_NORM_FILT; + if (pb) { + slapi_pblock_get( pb, SLAPI_PLUGIN_SYNTAX_FILTER_NORMALIZED, + &filter_normalized ); + if (filter_normalized) { + syntax |= SYNTAX_NORM_FILT; + } } return( string_filter_ava( bvfilter, bvals, syntax, ftype, retVal ) ); diff --git a/ldap/servers/plugins/syntaxes/deliverymethod.c b/ldap/servers/plugins/syntaxes/deliverymethod.c index 99ee59a5d..a763874fe 100644 --- a/ldap/servers/plugins/syntaxes/deliverymethod.c +++ b/ldap/servers/plugins/syntaxes/deliverymethod.c @@ -118,14 +118,17 @@ delivery_filter_ava( struct berval *bvfilter, Slapi_Value **bvals, int ftype, - Slapi_Value **retVal + Slapi_Value **retVal ) { int filter_normalized = 0; int syntax = SYNTAX_CIS; - slapi_pblock_get( pb, SLAPI_PLUGIN_SYNTAX_FILTER_NORMALIZED, &filter_normalized ); - if (filter_normalized) { - syntax |= SYNTAX_NORM_FILT; + if (pb) { + slapi_pblock_get( pb, SLAPI_PLUGIN_SYNTAX_FILTER_NORMALIZED, + &filter_normalized ); + if (filter_normalized) { + syntax |= SYNTAX_NORM_FILT; + } } return( string_filter_ava( bvfilter, bvals, syntax, ftype, retVal ) ); diff --git a/ldap/servers/plugins/syntaxes/dn.c b/ldap/servers/plugins/syntaxes/dn.c index bcf951a62..37c1fbaa3 100644 --- a/ldap/servers/plugins/syntaxes/dn.c +++ b/ldap/servers/plugins/syntaxes/dn.c @@ -153,9 +153,12 @@ dn_filter_ava( Slapi_PBlock *pb, struct berval *bvfilter, { int filter_normalized = 0; int syntax = SYNTAX_CIS | SYNTAX_DN; - slapi_pblock_get( pb, SLAPI_PLUGIN_SYNTAX_FILTER_NORMALIZED, &filter_normalized ); - if (filter_normalized) { - syntax |= SYNTAX_NORM_FILT; + if (pb) { + slapi_pblock_get( pb, SLAPI_PLUGIN_SYNTAX_FILTER_NORMALIZED, + &filter_normalized ); + if (filter_normalized) { + syntax |= SYNTAX_NORM_FILT; + } } return( string_filter_ava( bvfilter, bvals, syntax, ftype, retVal ) ); } diff --git a/ldap/servers/plugins/syntaxes/facsimile.c b/ldap/servers/plugins/syntaxes/facsimile.c index 21db4b1ab..cb57f1e5b 100644 --- a/ldap/servers/plugins/syntaxes/facsimile.c +++ b/ldap/servers/plugins/syntaxes/facsimile.c @@ -118,14 +118,17 @@ facsimile_filter_ava( struct berval *bvfilter, Slapi_Value **bvals, int ftype, - Slapi_Value **retVal + Slapi_Value **retVal ) { int filter_normalized = 0; int syntax = SYNTAX_CIS; - slapi_pblock_get( pb, SLAPI_PLUGIN_SYNTAX_FILTER_NORMALIZED, &filter_normalized ); - if (filter_normalized) { - syntax |= SYNTAX_NORM_FILT; + if (pb) { + slapi_pblock_get( pb, SLAPI_PLUGIN_SYNTAX_FILTER_NORMALIZED, + &filter_normalized ); + if (filter_normalized) { + syntax |= SYNTAX_NORM_FILT; + } } return( string_filter_ava( bvfilter, bvals, syntax, ftype, retVal ) ); diff --git a/ldap/servers/plugins/syntaxes/guide.c b/ldap/servers/plugins/syntaxes/guide.c index 3222083d7..a870c8778 100644 --- a/ldap/servers/plugins/syntaxes/guide.c +++ b/ldap/servers/plugins/syntaxes/guide.c @@ -165,14 +165,17 @@ guide_filter_ava( struct berval *bvfilter, Slapi_Value **bvals, int ftype, - Slapi_Value **retVal + Slapi_Value **retVal ) { int filter_normalized = 0; int syntax = SYNTAX_CIS; - slapi_pblock_get( pb, SLAPI_PLUGIN_SYNTAX_FILTER_NORMALIZED, &filter_normalized ); - if (filter_normalized) { - syntax |= SYNTAX_NORM_FILT; + if (pb) { + slapi_pblock_get( pb, SLAPI_PLUGIN_SYNTAX_FILTER_NORMALIZED, + &filter_normalized ); + if (filter_normalized) { + syntax |= SYNTAX_NORM_FILT; + } } return( string_filter_ava( bvfilter, bvals, syntax, ftype, retVal ) ); diff --git a/ldap/servers/plugins/syntaxes/int.c b/ldap/servers/plugins/syntaxes/int.c index e8b881716..e6fc09cc5 100644 --- a/ldap/servers/plugins/syntaxes/int.c +++ b/ldap/servers/plugins/syntaxes/int.c @@ -167,9 +167,12 @@ int_filter_ava( Slapi_PBlock *pb, struct berval *bvfilter, { int filter_normalized = 0; int syntax = SYNTAX_INT | SYNTAX_CES; - slapi_pblock_get( pb, SLAPI_PLUGIN_SYNTAX_FILTER_NORMALIZED, &filter_normalized ); - if (filter_normalized) { - syntax |= SYNTAX_NORM_FILT; + if (pb) { + slapi_pblock_get( pb, SLAPI_PLUGIN_SYNTAX_FILTER_NORMALIZED, + &filter_normalized ); + if (filter_normalized) { + syntax |= SYNTAX_NORM_FILT; + } } return( string_filter_ava( bvfilter, bvals, syntax, ftype, retVal ) ); diff --git a/ldap/servers/plugins/syntaxes/nameoptuid.c b/ldap/servers/plugins/syntaxes/nameoptuid.c index b5a615e33..e6341e133 100644 --- a/ldap/servers/plugins/syntaxes/nameoptuid.c +++ b/ldap/servers/plugins/syntaxes/nameoptuid.c @@ -158,14 +158,17 @@ nameoptuid_filter_ava( struct berval *bvfilter, Slapi_Value **bvals, int ftype, - Slapi_Value **retVal + Slapi_Value **retVal ) { int filter_normalized = 0; int syntax = SYNTAX_CIS | SYNTAX_DN; - slapi_pblock_get( pb, SLAPI_PLUGIN_SYNTAX_FILTER_NORMALIZED, &filter_normalized ); - if (filter_normalized) { - syntax |= SYNTAX_NORM_FILT; + if (pb) { + slapi_pblock_get( pb, SLAPI_PLUGIN_SYNTAX_FILTER_NORMALIZED, + &filter_normalized ); + if (filter_normalized) { + syntax |= SYNTAX_NORM_FILT; + } } return( string_filter_ava( bvfilter, bvals, syntax, ftype, retVal ) ); diff --git a/ldap/servers/plugins/syntaxes/numericstring.c b/ldap/servers/plugins/syntaxes/numericstring.c index 4da67a630..93528fb02 100644 --- a/ldap/servers/plugins/syntaxes/numericstring.c +++ b/ldap/servers/plugins/syntaxes/numericstring.c @@ -173,9 +173,12 @@ numstr_filter_ava( Slapi_PBlock *pb, struct berval *bvfilter, { int filter_normalized = 0; int syntax = SYNTAX_SI | SYNTAX_CES; - slapi_pblock_get( pb, SLAPI_PLUGIN_SYNTAX_FILTER_NORMALIZED, &filter_normalized ); - if (filter_normalized) { - syntax |= SYNTAX_NORM_FILT; + if (pb) { + slapi_pblock_get( pb, SLAPI_PLUGIN_SYNTAX_FILTER_NORMALIZED, + &filter_normalized ); + if (filter_normalized) { + syntax |= SYNTAX_NORM_FILT; + } } return( string_filter_ava( bvfilter, bvals, syntax, ftype, retVal ) ); diff --git a/ldap/servers/plugins/syntaxes/sicis.c b/ldap/servers/plugins/syntaxes/sicis.c index 3608d2c24..74074b9b1 100644 --- a/ldap/servers/plugins/syntaxes/sicis.c +++ b/ldap/servers/plugins/syntaxes/sicis.c @@ -121,14 +121,17 @@ sicis_filter_ava( struct berval *bvfilter, Slapi_Value **bvals, int ftype, - Slapi_Value **retVal + Slapi_Value **retVal ) { int filter_normalized = 0; int syntax = SYNTAX_SI | SYNTAX_CIS; - slapi_pblock_get( pb, SLAPI_PLUGIN_SYNTAX_FILTER_NORMALIZED, &filter_normalized ); - if (filter_normalized) { - syntax |= SYNTAX_NORM_FILT; + if (pb) { + slapi_pblock_get( pb, SLAPI_PLUGIN_SYNTAX_FILTER_NORMALIZED, + &filter_normalized ); + if (filter_normalized) { + syntax |= SYNTAX_NORM_FILT; + } } return( string_filter_ava( bvfilter, bvals, syntax, ftype, retVal ) ); diff --git a/ldap/servers/plugins/syntaxes/string.c b/ldap/servers/plugins/syntaxes/string.c index 3840c2e9d..80a1a462c 100644 --- a/ldap/servers/plugins/syntaxes/string.c +++ b/ldap/servers/plugins/syntaxes/string.c @@ -236,9 +236,10 @@ string_filter_sub( Slapi_PBlock *pb, char *initial, char **any, char *final, int free_re = 1; struct subfilt *sf = NULL; - LDAPDebug( LDAP_DEBUG_FILTER, "=> string_filter_sub\n", - 0, 0, 0 ); - slapi_pblock_get( pb, SLAPI_OPERATION, &op ); + LDAPDebug( LDAP_DEBUG_FILTER, "=> string_filter_sub\n", 0, 0, 0 ); + if (pb) { + slapi_pblock_get( pb, SLAPI_OPERATION, &op ); + } if (NULL != op) { slapi_pblock_get( pb, SLAPI_SEARCH_TIMELIMIT, &timelimit ); slapi_pblock_get( pb, SLAPI_OPINITIATED_TIME, &optime ); @@ -251,8 +252,10 @@ string_filter_sub( Slapi_PBlock *pb, char *initial, char **any, char *final, */ time_up = ( timelimit==-1 ? -1 : optime + timelimit); - slapi_pblock_get( pb, SLAPI_PLUGIN_SYNTAX_FILTER_NORMALIZED, &filter_normalized ); - slapi_pblock_get( pb, SLAPI_PLUGIN_SYNTAX_FILTER_DATA, &sf ); + if (pb) { + slapi_pblock_get( pb, SLAPI_PLUGIN_SYNTAX_FILTER_NORMALIZED, &filter_normalized ); + slapi_pblock_get( pb, SLAPI_PLUGIN_SYNTAX_FILTER_DATA, &sf ); + } if ( sf ) { re = (Slapi_Regex *)sf->sf_private; if ( re ) { @@ -757,7 +760,9 @@ string_assertion2keys_sub( char *oaltfinal = NULL; int anysize = 0; - slapi_pblock_get(pb, SLAPI_SYNTAX_SUBSTRLENS, &substrlens); + if (pb) { + slapi_pblock_get(pb, SLAPI_SYNTAX_SUBSTRLENS, &substrlens); + } if (NULL == substrlens) { substrlens = localsublens; diff --git a/ldap/servers/plugins/syntaxes/tel.c b/ldap/servers/plugins/syntaxes/tel.c index 65b6ddde2..b1e7da56e 100644 --- a/ldap/servers/plugins/syntaxes/tel.c +++ b/ldap/servers/plugins/syntaxes/tel.c @@ -179,14 +179,17 @@ tel_filter_ava( struct berval *bvfilter, Slapi_Value **bvals, int ftype, - Slapi_Value **retVal + Slapi_Value **retVal ) { int filter_normalized = 0; int syntax = SYNTAX_TEL | SYNTAX_CIS; - slapi_pblock_get( pb, SLAPI_PLUGIN_SYNTAX_FILTER_NORMALIZED, &filter_normalized ); - if (filter_normalized) { - syntax |= SYNTAX_NORM_FILT; + if (pb) { + slapi_pblock_get( pb, SLAPI_PLUGIN_SYNTAX_FILTER_NORMALIZED, + &filter_normalized ); + if (filter_normalized) { + syntax |= SYNTAX_NORM_FILT; + } } return( string_filter_ava( bvfilter, bvals, syntax, ftype, retVal ) ); diff --git a/ldap/servers/plugins/syntaxes/teletex.c b/ldap/servers/plugins/syntaxes/teletex.c index 3e328a97d..b70a19335 100644 --- a/ldap/servers/plugins/syntaxes/teletex.c +++ b/ldap/servers/plugins/syntaxes/teletex.c @@ -118,14 +118,17 @@ teletex_filter_ava( struct berval *bvfilter, Slapi_Value **bvals, int ftype, - Slapi_Value **retVal + Slapi_Value **retVal ) { int filter_normalized = 0; int syntax = SYNTAX_CIS; - slapi_pblock_get( pb, SLAPI_PLUGIN_SYNTAX_FILTER_NORMALIZED, &filter_normalized ); - if (filter_normalized) { - syntax |= SYNTAX_NORM_FILT; + if (pb) { + slapi_pblock_get( pb, SLAPI_PLUGIN_SYNTAX_FILTER_NORMALIZED, + &filter_normalized ); + if (filter_normalized) { + syntax |= SYNTAX_NORM_FILT; + } } return( string_filter_ava( bvfilter, bvals, syntax, ftype, retVal ) ); diff --git a/ldap/servers/plugins/syntaxes/telex.c b/ldap/servers/plugins/syntaxes/telex.c index 6803810c0..1254f8801 100644 --- a/ldap/servers/plugins/syntaxes/telex.c +++ b/ldap/servers/plugins/syntaxes/telex.c @@ -122,9 +122,12 @@ telex_filter_ava( { int filter_normalized = 0; int syntax = SYNTAX_CIS; - slapi_pblock_get( pb, SLAPI_PLUGIN_SYNTAX_FILTER_NORMALIZED, &filter_normalized ); - if (filter_normalized) { - syntax |= SYNTAX_NORM_FILT; + if (pb) { + slapi_pblock_get( pb, SLAPI_PLUGIN_SYNTAX_FILTER_NORMALIZED, + &filter_normalized ); + if (filter_normalized) { + syntax |= SYNTAX_NORM_FILT; + } } return( string_filter_ava( bvfilter, bvals, syntax, ftype, retVal ) ); diff --git a/ldap/servers/slapd/plugin_mr.c b/ldap/servers/slapd/plugin_mr.c index 12dece7fb..ff962ef4b 100644 --- a/ldap/servers/slapd/plugin_mr.c +++ b/ldap/servers/slapd/plugin_mr.c @@ -59,7 +59,7 @@ struct mr_private { below are not used */ const struct slapdplugin *pi; /* our plugin */ const char *oid; /* orig oid */ - const char *type; /* orig type from filter */ + char *type; /* orig type from filter */ const struct berval *value; /* orig value from filter */ int ftype; /* filter type */ int op; /* query op type */ @@ -244,7 +244,7 @@ mr_private_new(const struct slapdplugin *pi, const char *oid, const char *type, mrpriv = (struct mr_private *)slapi_ch_calloc(1, sizeof(struct mr_private)); mrpriv->pi = pi; mrpriv->oid = oid; /* should be consistent for lifetime of usage - no copy necessary */ - mrpriv->type = type; /* should be consistent for lifetime of usage - no copy necessary */ + mrpriv->type = slapi_ch_strdup(type); /* should be consistent for lifetime of usage - copy it since it could be normalized in filter_normalize_ext */ mrpriv->value = value; /* should be consistent for lifetime of usage - no copy necessary */ mrpriv->ftype = ftype; mrpriv->op = op; @@ -271,7 +271,7 @@ mr_private_done(struct mr_private *mrpriv) if (mrpriv) { mrpriv->pi = NULL; mrpriv->oid = NULL; - mrpriv->type = NULL; + slapi_ch_free_string(&mrpriv->type); mrpriv->value = NULL; mrpriv->ftype = 0; mrpriv->op = 0; @@ -388,7 +388,7 @@ default_mr_filter_match(void *obj, Slapi_Entry *e, Slapi_Attr *attr) for (; (rc == -1) && (attr != NULL); slapi_entry_next_attr(e, attr, &attr)) { char* type = NULL; if (!slapi_attr_get_type (attr, &type) && type != NULL && - !slapi_attr_type_cmp (mrpriv->type, type, 2/*match subtypes*/)) { + !slapi_attr_type_cmp ((const char *)mrpriv->type, type, 2/*match subtypes*/)) { Slapi_Value **vals = attr_get_present_values(attr); #ifdef SUPPORT_MR_SUBSTRING_MATCHING if (mrpriv->ftype == LDAP_FILTER_SUBSTRINGS) { diff --git a/ldap/servers/slapd/search.c b/ldap/servers/slapd/search.c index 5beb8ce64..df3f7ef4c 100644 --- a/ldap/servers/slapd/search.c +++ b/ldap/servers/slapd/search.c @@ -81,6 +81,7 @@ do_search( Slapi_PBlock *pb ) int rc = -1; int strict = 0; int minssf_exclude_rootdse = 0; + int filter_normalized = 0; LDAPDebug( LDAP_DEBUG_TRACE, "do_search\n", 0, 0, 0 ); @@ -386,6 +387,8 @@ do_search( Slapi_PBlock *pb ) slapi_pblock_set( pb, SLAPI_SEARCH_SCOPE, &scope ); slapi_pblock_set( pb, SLAPI_SEARCH_DEREF, &deref ); slapi_pblock_set( pb, SLAPI_SEARCH_FILTER, filter ); + slapi_pblock_set( pb, SLAPI_PLUGIN_SYNTAX_FILTER_NORMALIZED, + &filter_normalized ); slapi_pblock_set( pb, SLAPI_SEARCH_STRFILTER, fstr ); slapi_pblock_set( pb, SLAPI_SEARCH_ATTRS, attrs ); slapi_pblock_set( pb, SLAPI_SEARCH_GERATTRS, gerattrs );
0
802224f2846900c870a780fe7608782792806d85
389ds/389-ds-base
Ticket #48943 - When fine-grained policy is applied, a sub-tree has a priority over a user while changing password Description: If the user entry has a pwdpolicysubentry, the configuration in the pwpolicy should be the strongest and respected. If the entry does not have it, it retrieves the pwpolicy from the CoS Cache, which is the current behaviour. https://fedorahosted.org/389/ticket/48943 Reviewed by [email protected] (Thank you, William!!)
commit 802224f2846900c870a780fe7608782792806d85 Author: Noriko Hosoi <[email protected]> Date: Sat Jul 30 16:56:57 2016 -0700 Ticket #48943 - When fine-grained policy is applied, a sub-tree has a priority over a user while changing password Description: If the user entry has a pwdpolicysubentry, the configuration in the pwpolicy should be the strongest and respected. If the entry does not have it, it retrieves the pwpolicy from the CoS Cache, which is the current behaviour. https://fedorahosted.org/389/ticket/48943 Reviewed by [email protected] (Thank you, William!!) diff --git a/ldap/servers/slapd/pw.c b/ldap/servers/slapd/pw.c index 498afd416..6b865ece8 100644 --- a/ldap/servers/slapd/pw.c +++ b/ldap/servers/slapd/pw.c @@ -1777,9 +1777,17 @@ new_passwdPolicy(Slapi_PBlock *pb, const char *dn) attribute in the target entry itself. */ } else { if ( (e = get_entry( pb, dn )) != NULL ) { - rc = slapi_vattr_values_get(e, "pwdpolicysubentry", &values, - &type_name_disposition, &actual_type_name, - SLAPI_VIRTUALATTRS_REQUEST_POINTERS, &attr_free_flags); + Slapi_Attr* attr = NULL; + rc = slapi_entry_attr_find(e, "pwdpolicysubentry", &attr); + if (attr && (0 == rc)) { + /* If the entry has pwdpolicysubentry, use the PwPolicy. */ + values = valueset_dup(&attr->a_present_values); + } else { + /* Otherwise, retrieve the policy from CoS Cache */ + rc = slapi_vattr_values_get(e, "pwdpolicysubentry", &values, + &type_name_disposition, &actual_type_name, + SLAPI_VIRTUALATTRS_REQUEST_POINTERS, &attr_free_flags); + } if (rc) { values = NULL; }
0
18a5f7e213282753e6a613162a0c4e6574fbf79b
389ds/389-ds-base
Issue 5281 - HIGH - basic test does not run Bug Description: test_basic_ldapagent has an incorrect pytestmark that checks for presence of 389-ds-base-snmp package that might not exist on other distros than Fedora/RHEL. It's also a second declaration of the pytestmark that overrides previous one at the beginning of the test. Fix Description: Instead of checking for package presence, check for `ldap-agent` binary. Fixes: https://github.com/389ds/389-ds-base/issues/5281 Reviewed by: @mreynolds389, @droideck (Thanks!)
commit 18a5f7e213282753e6a613162a0c4e6574fbf79b Author: Viktor Ashirov <[email protected]> Date: Thu May 5 09:48:20 2022 +0200 Issue 5281 - HIGH - basic test does not run Bug Description: test_basic_ldapagent has an incorrect pytestmark that checks for presence of 389-ds-base-snmp package that might not exist on other distros than Fedora/RHEL. It's also a second declaration of the pytestmark that overrides previous one at the beginning of the test. Fix Description: Instead of checking for package presence, check for `ldap-agent` binary. Fixes: https://github.com/389ds/389-ds-base/issues/5281 Reviewed by: @mreynolds389, @droideck (Thanks!) diff --git a/dirsrvtests/tests/suites/basic/basic_test.py b/dirsrvtests/tests/suites/basic/basic_test.py index da5cc8e30..206bfd007 100644 --- a/dirsrvtests/tests/suites/basic/basic_test.py +++ b/dirsrvtests/tests/suites/basic/basic_test.py @@ -985,7 +985,6 @@ def test_basic_systemctl(topology_st, import_example_ldif): log.info('test_basic_systemctl: PASSED') -pytestmark = pytest.mark.skipif(get_rpm_version("389-ds-base-snmp") == "not installed", reason="389-ds-base-snmp package is not present") def test_basic_ldapagent(topology_st, import_example_ldif): """Tests that the ldap agent starts @@ -1004,6 +1003,8 @@ def test_basic_ldapagent(topology_st, import_example_ldif): log.info('Running test_basic_ldapagent...') + if not os.path.exists(os.path.join(topology_st.standalone.get_sbin_dir(), 'ldap-agent')): + pytest.skip("ldap-agent is not present") var_dir = topology_st.standalone.get_local_state_dir() config_file = os.path.join(topology_st.standalone.get_sysconf_dir(), 'dirsrv/config/agent.conf')
0
b5e480f4439f9bede93108e76d3ce5d8fd01fbe9
389ds/389-ds-base
Issue 5532 - Make db compaction TOD day more robust. Bug Description: The time of day compaction setting does promise that the compaction will happen as configured. This is becuase the compaction interval starts when the server is started. Once it wakes up and we are "past" the TOD setting then we compact, but it can happen at any time once the TOD has passed. Fix Description: Once the compaction interval is hit we create an "event" with the exact time the compaction should start. relates: https://github.com/389ds/389-ds-base/issues/5532 Reviewed by: tbordaz & spichugi(Thanks!!)
commit b5e480f4439f9bede93108e76d3ce5d8fd01fbe9 Author: Mark Reynolds <[email protected]> Date: Wed Nov 16 10:58:22 2022 -0500 Issue 5532 - Make db compaction TOD day more robust. Bug Description: The time of day compaction setting does promise that the compaction will happen as configured. This is becuase the compaction interval starts when the server is started. Once it wakes up and we are "past" the TOD setting then we compact, but it can happen at any time once the TOD has passed. Fix Description: Once the compaction interval is hit we create an "event" with the exact time the compaction should start. relates: https://github.com/389ds/389-ds-base/issues/5532 Reviewed by: tbordaz & spichugi(Thanks!!) diff --git a/dirsrvtests/tests/suites/config/compact_test.py b/dirsrvtests/tests/suites/config/compact_test.py index 8dbcbab9a..4bbffb748 100644 --- a/dirsrvtests/tests/suites/config/compact_test.py +++ b/dirsrvtests/tests/suites/config/compact_test.py @@ -1,7 +1,16 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- + import logging import pytest import os import time +import datetime from lib389.utils import get_default_db_lib from lib389.tasks import DBCompactTask from lib389.backend import DatabaseConfig @@ -72,11 +81,31 @@ def test_compaction_interval_and_time(topo): """ inst = topo.ms["supplier1"] + + # Calculate the compaction time (1 minute from now) + now = datetime.datetime.now() + current_hour = now.hour + current_minute = now.minute + 2 + if current_hour < 10: + hour = "0" + str(current_hour) + else: + hour = str(current_hour) + if current_minute < 10: + minute = "0" + str(current_minute) + else: + minute = str(current_minute) + compact_time = hour + ":" + minute + + # Set compaction TOD config = DatabaseConfig(inst) - config.set([('nsslapd-db-compactdb-interval', '2'), ('nsslapd-db-compactdb-time', '00:01')]) - inst.deleteErrorLogs() + config.set([('nsslapd-db-compactdb-interval', '2'), ('nsslapd-db-compactdb-time', compact_time)]) + inst.deleteErrorLogs(restart=True) + + # Check compaction occurred as expected + time.sleep(60) + assert not inst.searchErrorsLog("Compacting databases") - time.sleep(6) + time.sleep(61) assert inst.searchErrorsLog("Compacting databases") inst.deleteErrorLogs(restart=False) diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c index c4a8b67eb..35623aa25 100644 --- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c +++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c @@ -104,6 +104,7 @@ static int trans_batch_txn_max_sleep = 50; static PRBool log_flush_thread = PR_FALSE; static int txn_in_progress_count = 0; static int *txn_log_flush_pending = NULL; +static PRBool compaction_scheduled = PR_FALSE; static pthread_mutex_t sync_txn_log_flush; static pthread_cond_t sync_txn_log_flush_done; @@ -3732,13 +3733,12 @@ bdb_log_flush_threadmain(void *param) } /* - * This refreshes the TOD expiration. So live changes to the configuration - * will take effect immediately. + * Get the time in seconds when the compaction should occur */ static time_t bdb_get_tod_expiration(char *expire_time) { - time_t start_time, todays_elapsed_time, now = time(NULL); + time_t todays_elapsed_time, now = time(NULL); struct tm *tm_struct = localtime(&now); char hour_str[3] = {0}; char min_str[3] = {0}; @@ -3748,9 +3748,8 @@ bdb_get_tod_expiration(char *expire_time) /* Get today's start time */ todays_elapsed_time = (tm_struct->tm_hour * 3600) + (tm_struct->tm_min * 60) + (tm_struct->tm_sec); - start_time = slapi_current_utc_time() - todays_elapsed_time; - /* Get the hour and minute and calculate the expiring time. The time was + /* Get the hour and minute and calculate the expiring TOD. The time was * already validated in bdb_config.c: HH:MM */ hour_str[0] = *s++; hour_str[1] = *s++; @@ -3761,7 +3760,73 @@ bdb_get_tod_expiration(char *expire_time) min = strtoll(min_str, &endp, 10); expiring_time = (hour * 60 * 60) + (min * 60); - return start_time + expiring_time; + /* Calculate the time in seconds when the compaction should start, midnight + * requires special treatment (for both current time and configured TOD) */ + if (expiring_time == 0) { + /* Compaction TOD configured for midnight */ + if (todays_elapsed_time == 0) { + /* It's currently midnight, compact now! */ + return 0; + } else { + /* Return the time until it's midnight */ + return _SEC_PER_DAY - todays_elapsed_time; + } + } else if (todays_elapsed_time == 0) { + /* It's currently midnight, just use the configured TOD */ + return expiring_time; + } else if (todays_elapsed_time > expiring_time) { + /* We missed TOD today, do it tomorrow */ + return _SEC_PER_DAY - (todays_elapsed_time - expiring_time); + } else { + /* Compaction is coming up later today */ + return expiring_time - todays_elapsed_time; + } +} + +static void +bdb_compact(time_t when, void *arg) +{ + struct ldbminfo *li = (struct ldbminfo *)arg; + Object *inst_obj; + ldbm_instance *inst; + DB *db = NULL; + int rc = 0; + + for (inst_obj = objset_first_obj(li->li_instance_set); + inst_obj; + inst_obj = objset_next_obj(li->li_instance_set, inst_obj)) + { + inst = (ldbm_instance *)object_get_data(inst_obj); + rc = dblayer_get_id2entry(inst->inst_be, (dbi_db_t **)&db); + if (!db || rc) { + continue; + } + slapi_log_err(SLAPI_LOG_NOTICE, "bdb_compact", "Compacting DB start: %s\n", + inst->inst_name); + + rc = bdb_db_compact_one_db(db, inst); + if (rc) { + slapi_log_err(SLAPI_LOG_ERR, "bdb_compact", + "Failed to compact id2entry for %s; db error - %d %s\n", + inst->inst_name, rc, db_strerror(rc)); + break; + } + + /* Time to compact the DB's */ + bdb_force_checkpoint(li); + bdb_do_compact(li, PR_FALSE); + bdb_force_checkpoint(li); + + /* Now reset the timer and compacting flag */ + rc = bdb_db_compact_one_db(db, inst); + if (rc) { + slapi_log_err(SLAPI_LOG_ERR, "bdb_compact", + "Failed to compact for %s; db error - %d %s\n", + inst->inst_name, rc, db_strerror(rc)); + break; + } + } + compaction_scheduled = PR_FALSE; } /* @@ -3806,7 +3871,6 @@ bdb_checkpoint_threadmain(void *param) time_t compactdb_interval = 0; time_t checkpoint_interval = 0; int32_t compactdb_time = 0; - PRBool compacting = PR_FALSE; PR_ASSERT(NULL != param); li = (struct ldbminfo *)param; @@ -3848,15 +3912,6 @@ bdb_checkpoint_threadmain(void *param) PR_Lock(li->li_config_mutex); checkpoint_interval_update = (time_t)BDB_CONFIG(li)->bdb_checkpoint_interval; compactdb_interval_update = (time_t)BDB_CONFIG(li)->bdb_compactdb_interval; - if (!compacting) { - /* Once we know we want to compact we need to stop refreshing the - * TOD expiration. Otherwise if the compact time is close to - * midnight we could roll over past midnight during the checkpoint - * sleep interval, and we'd never actually compact the databases. - * We also need to get this value before the sleep. - */ - compactdb_time = bdb_get_tod_expiration((char *)BDB_CONFIG(li)->bdb_compactdb_time); - } PR_Unlock(li->li_config_mutex); if (compactdb_interval_update != compactdb_interval) { @@ -3946,59 +4001,21 @@ bdb_checkpoint_threadmain(void *param) * this could have been a bug in fact, where compactdb_interval * was 0, if you change while running it would never take effect .... */ - if (slapi_timespec_expire_check(&compactdb_expire) == TIMER_EXPIRED) { - compacting = PR_TRUE; - if (slapi_current_utc_time() < compactdb_time) { - /* We have passed the interval, but we need to wait for a - * particular TOD to pass before compacting */ - continue; - } - } - if (compactdb_interval_update != compactdb_interval || - slapi_timespec_expire_check(&compactdb_expire) == TIMER_EXPIRED) { - int rc = 0; - Object *inst_obj; - ldbm_instance *inst; - DB *db = NULL; - - for (inst_obj = objset_first_obj(li->li_instance_set); - inst_obj; - inst_obj = objset_next_obj(li->li_instance_set, inst_obj)) { - inst = (ldbm_instance *)object_get_data(inst_obj); - rc = dblayer_get_id2entry(inst->inst_be, (dbi_db_t **)&db); - if (!db || rc) { - continue; - } - slapi_log_err(SLAPI_LOG_NOTICE, "bdb_checkpoint_threadmain", "Compacting DB start: %s\n", - inst->inst_name); - - rc = bdb_db_compact_one_db(db, inst); - if (rc) { - slapi_log_err(SLAPI_LOG_ERR, "bdb_checkpoint_threadmain", - "compactdb: failed to compact id2entry for %s; db error - %d %s\n", - inst->inst_name, rc, db_strerror(rc)); - break; - } + (slapi_timespec_expire_check(&compactdb_expire) == TIMER_EXPIRED && !compaction_scheduled)) + { + /* Get the time in second when the compaction should occur */ + PR_Lock(li->li_config_mutex); + compactdb_time = bdb_get_tod_expiration((char *)BDB_CONFIG(li)->bdb_compactdb_time); + PR_Unlock(li->li_config_mutex); - /* Time to compact the DB's */ - bdb_force_checkpoint(li); - bdb_compact(li, PR_FALSE); - bdb_force_checkpoint(li); - - /* Now reset the timer and compacting flag */ - rc = bdb_db_compact_one_db(db, inst); - if (rc) { - slapi_log_err(SLAPI_LOG_ERR, "bdb_checkpoint_threadmain", - "compactdb: failed to compact changelog for %s; db error - %d %s\n", - inst->inst_name, rc, db_strerror(rc)); - break; - } - } + /* Start compaction event */ + compaction_scheduled = PR_TRUE; + slapi_eq_once_rel(bdb_compact, (void *)li, slapi_current_rel_time_t() + compactdb_time); + /* reset interval timer */ compactdb_interval = compactdb_interval_update; slapi_timespec_expire_at(compactdb_interval, &compactdb_expire); - compacting = PR_FALSE; } } slapi_log_err(SLAPI_LOG_TRACE, "bdb_checkpoint_threadmain", "Check point before leaving\n"); @@ -7078,20 +7095,20 @@ bdb_public_dblayer_compact(Slapi_Backend *be, PRBool just_changelog) li = (struct ldbminfo *)be->be_database->plg_private; bdb_force_checkpoint(li); - rc = bdb_compact(li, just_changelog); + rc = bdb_do_compact(li, just_changelog); bdb_force_checkpoint(li); return rc; } int -bdb_compact(struct ldbminfo *li, PRBool just_changelog) +bdb_do_compact(struct ldbminfo *li, PRBool just_changelog) { Object *inst_obj; ldbm_instance *inst; DB *db = NULL; int rc = 0; - slapi_log_err(SLAPI_LOG_NOTICE, "bdb_compact", + slapi_log_err(SLAPI_LOG_NOTICE, "bdb_do_compact", "Compacting databases ...\n"); for (inst_obj = objset_first_obj(li->li_instance_set); inst_obj; @@ -7103,11 +7120,11 @@ bdb_compact(struct ldbminfo *li, PRBool just_changelog) if (!db || rc) { continue; } - slapi_log_err(SLAPI_LOG_NOTICE, "bdb_compact", + slapi_log_err(SLAPI_LOG_NOTICE, "bdb_do_compact", "Compacting DB: %s\n", inst->inst_name); rc = bdb_db_compact_one_db(db, inst); if (rc) { - slapi_log_err(SLAPI_LOG_ERR, "bdb_compact", + slapi_log_err(SLAPI_LOG_ERR, "bdb_do_compact", "failed to compact id2entry for %s; db error - %d %s\n", inst->inst_name, rc, db_strerror(rc)); break; @@ -7115,13 +7132,13 @@ bdb_compact(struct ldbminfo *li, PRBool just_changelog) } /* Compact changelog db */ - slapi_log_err(SLAPI_LOG_NOTICE, "bdb_compact", + slapi_log_err(SLAPI_LOG_NOTICE, "bdb_do_compact", "Compacting Replication Changelog: %s\n", inst->inst_name); dblayer_get_changelog(inst->inst_be, (dbi_db_t **)&db, 0); if (db) { rc = bdb_db_compact_one_db(db, inst); if (rc) { - slapi_log_err(SLAPI_LOG_ERR, "bdb_compact", + slapi_log_err(SLAPI_LOG_ERR, "bdb_do_compact", "failed to compact changelog for %s; db error - %d %s\n", inst->inst_name, rc, db_strerror(rc)); break; @@ -7129,7 +7146,7 @@ bdb_compact(struct ldbminfo *li, PRBool just_changelog) } } - slapi_log_err(SLAPI_LOG_NOTICE, "bdb_compact", "Compacting databases finished.\n"); + slapi_log_err(SLAPI_LOG_NOTICE, "bdb_do_compact", "Compacting databases finished.\n"); return rc; } diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.h b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.h index 92b5f71a9..b1bcd0241 100644 --- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.h +++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.h @@ -109,7 +109,7 @@ int bdb_db_size(Slapi_PBlock *pb); int bdb_upgradedb(Slapi_PBlock *pb); int bdb_upgradednformat(Slapi_PBlock *pb); int bdb_upgradeddformat(Slapi_PBlock *pb); -int bdb_compact(struct ldbminfo *li, PRBool just_changelog); +int bdb_do_compact(struct ldbminfo *li, PRBool just_changelog); int bdb_restore(struct ldbminfo *li, char *src_dir, Slapi_Task *task); int bdb_cleanup(struct ldbminfo *li); int bdb_txn_begin(struct ldbminfo *li, back_txnid parent_txn, back_txn *txn, PRBool use_lock);
0
b5e708a3e907381c66fbcdba2ed1fd69a99198f3
389ds/389-ds-base
Bug 693503 - matching rules do not inherit from superior attribute type https://bugzilla.redhat.com/show_bug.cgi?id=693503 Resolves: bug 693503 Bug Description: matching rules do not inherit from superior attribute type Reviewed by: nkinder (Thanks!) Branch: master Fix Description: If the attribute definition specifies a superior, use the superior equality, substrings, and ordering matching rule from the superior if not specified in the child. Platforms tested: RHEL6 x86_64 Flag Day: no Doc impact: no
commit b5e708a3e907381c66fbcdba2ed1fd69a99198f3 Author: Rich Megginson <[email protected]> Date: Mon Apr 4 14:20:06 2011 -0600 Bug 693503 - matching rules do not inherit from superior attribute type https://bugzilla.redhat.com/show_bug.cgi?id=693503 Resolves: bug 693503 Bug Description: matching rules do not inherit from superior attribute type Reviewed by: nkinder (Thanks!) Branch: master Fix Description: If the attribute definition specifies a superior, use the superior equality, substrings, and ordering matching rule from the superior if not specified in the child. Platforms tested: RHEL6 x86_64 Flag Day: no Doc impact: no diff --git a/ldap/servers/slapd/schema.c b/ldap/servers/slapd/schema.c index fa90d7f3b..4c8166fa6 100644 --- a/ldap/servers/slapd/schema.c +++ b/ldap/servers/slapd/schema.c @@ -3441,21 +3441,31 @@ read_at_ldif(const char *input, struct asyntaxinfo **asipp, char *errorbuf, status = invalid_syntax_error; /* We only want to use the parent syntax if a SYNTAX * wasn't explicitly specified for this attribute. */ - } else if (NULL == pSyntax) { + } else if ((NULL == pSyntax) || (NULL == pMREquality) || (NULL == pMRSubstring) || + (NULL == pMROrdering)) { char *pso = asi_parent->asi_plugin->plg_syntax_oid; - if (pso) { - slapi_ch_free ((void **)&pSyntax); + if (pso && (NULL == pSyntax)) { pSyntax = slapi_ch_strdup(pso); LDAPDebug (LDAP_DEBUG_TRACE, "Inheriting syntax %s from parent type %s\n", pSyntax, pSuperior,NULL); - } else { + } else if (NULL == pSyntax) { schema_create_errormsg( errorbuf, errorbufsize, schema_errprefix_at, first_attr_name, "Missing parent attribute syntax OID"); status = invalid_syntax_error; } + + if (NULL == pMREquality) { + pMREquality = slapi_ch_strdup(asi_parent->asi_mr_equality); + } + if (NULL == pMRSubstring) { + pMRSubstring = slapi_ch_strdup(asi_parent->asi_mr_substring); + } + if (NULL == pMROrdering) { + pMROrdering = slapi_ch_strdup(asi_parent->asi_mr_ordering); + } attr_syntax_return( asi_parent ); } }
0
75a51aa395018d75b7feca38101cc89fa4627e1c
389ds/389-ds-base
Issue 50872 - dsconf can't create GSSAPI replication agreements Description: An error in python syntax broke the check for bind method vs credentials. Bug was found and fixed by: quentinmit (Thanks!) relates: https://pagure.io/389-ds-base/issue/50872 Reviewed by: mreynolds
commit 75a51aa395018d75b7feca38101cc89fa4627e1c Author: Mark Reynolds <[email protected]> Date: Thu Feb 27 15:01:40 2020 -0500 Issue 50872 - dsconf can't create GSSAPI replication agreements Description: An error in python syntax broke the check for bind method vs credentials. Bug was found and fixed by: quentinmit (Thanks!) relates: https://pagure.io/389-ds-base/issue/50872 Reviewed by: mreynolds diff --git a/src/lib389/lib389/cli_conf/replication.py b/src/lib389/lib389/cli_conf/replication.py index 1c0c9253d..8e77c36b2 100644 --- a/src/lib389/lib389/cli_conf/replication.py +++ b/src/lib389/lib389/cli_conf/replication.py @@ -663,7 +663,7 @@ def add_agmt(inst, basedn, log, args): properties['nsds5replicastripattrs'] = args.strip_list # We do need the bind dn and credentials for none-sasl bind methods - if (bind_method == 'simple' or 'sslclientauth') and (args.bind_dn is None or args.bind_passwd is None): + if (bind_method in ('simple', 'sslclientauth')) and (args.bind_dn is None or args.bind_passwd is None): raise ValueError("You need to set the bind dn (--bind-dn) and the password (--bind-passwd) for bind method ({})".format(bind_method)) # Create the agmt diff --git a/src/lib389/lib389/replica.py b/src/lib389/lib389/replica.py index 0fc1275b8..e3fc7fe1f 100644 --- a/src/lib389/lib389/replica.py +++ b/src/lib389/lib389/replica.py @@ -16,7 +16,6 @@ import logging import uuid import json import copy -from shutil import copyfile from operator import itemgetter from itertools import permutations from lib389._constants import *
0
6ee822f1549ff60e1d3a5993930339557aa3ce7b
389ds/389-ds-base
bump version to 1.3.0.rc1
commit 6ee822f1549ff60e1d3a5993930339557aa3ce7b Author: Noriko Hosoi <[email protected]> Date: Tue Dec 11 14:32:48 2012 -0800 bump version to 1.3.0.rc1 diff --git a/VERSION.sh b/VERSION.sh index 487c21deb..d786b37c4 100644 --- a/VERSION.sh +++ b/VERSION.sh @@ -14,7 +14,7 @@ VERSION_MAINT=0 # if this is a PRERELEASE, set VERSION_PREREL # otherwise, comment it out # be sure to include the dot prefix in the prerel -VERSION_PREREL=.a1 +VERSION_PREREL=.rc1 # NOTES on VERSION_PREREL # use aN for an alpha release e.g. a1, a2, etc. # use rcN for a release candidate e.g. rc1, rc2, etc.
0
3bf2d596dec57d3ba1496bc1c45bb9d1fc1f815f
389ds/389-ds-base
Ticket #48048 - Fix coverity issues - 2015/2/24 Coverity defect 13074 - Dereference after null check (FORWARD_NULL) Description: Added NULL check for entry. If NULL, continue the loop. modified: write_replog_db in retrocl_po.c
commit 3bf2d596dec57d3ba1496bc1c45bb9d1fc1f815f Author: Noriko Hosoi <[email protected]> Date: Tue Feb 24 12:33:38 2015 -0800 Ticket #48048 - Fix coverity issues - 2015/2/24 Coverity defect 13074 - Dereference after null check (FORWARD_NULL) Description: Added NULL check for entry. If NULL, continue the loop. modified: write_replog_db in retrocl_po.c diff --git a/ldap/servers/plugins/retrocl/retrocl_po.c b/ldap/servers/plugins/retrocl/retrocl_po.c index 4b2cdda6a..9128fef1e 100644 --- a/ldap/servers/plugins/retrocl/retrocl_po.c +++ b/ldap/servers/plugins/retrocl/retrocl_po.c @@ -231,7 +231,7 @@ write_replog_db( if ( entry == NULL ) { slapi_pblock_get( pb, SLAPI_ENTRY_PRE_OP, &entry ); } - + if ( entry == NULL ) continue; uniqueId = slapi_entry_get_uniqueid( entry ); slapi_log_error( SLAPI_LOG_PLUGIN, RETROCL_PLUGIN_NAME,
0
800aac85273aa44597e44a1935c630486157dc2f
389ds/389-ds-base
Bug 619122 - fix coverify Defect Type: Resource leaks issues CID 11975 - 12053 https://bugzilla.redhat.com/show_bug.cgi?id=619122 Resolves: bug 619122 Bug description: fix coverify Defect Type: Resource leaks issues CID 11983. description: The pw_rever_encode() has been modified to release pwsp at the end of each loop iteration.
commit 800aac85273aa44597e44a1935c630486157dc2f Author: Endi S. Dewata <[email protected]> Date: Wed Jul 28 16:02:08 2010 -0500 Bug 619122 - fix coverify Defect Type: Resource leaks issues CID 11975 - 12053 https://bugzilla.redhat.com/show_bug.cgi?id=619122 Resolves: bug 619122 Bug description: fix coverify Defect Type: Resource leaks issues CID 11983. description: The pw_rever_encode() has been modified to release pwsp at the end of each loop iteration. diff --git a/ldap/servers/slapd/pw.c b/ldap/servers/slapd/pw.c index c528dfea2..cb494941f 100644 --- a/ldap/servers/slapd/pw.c +++ b/ldap/servers/slapd/pw.c @@ -563,11 +563,11 @@ pw_rever_encode(Slapi_Value **vals, char * attr_name) return (0); } } + free_pw_scheme( pwsp ); } } } - free_pw_scheme( pwsp ); return(-1); }
0
836a31dcdf2bf5f74ee5f93d4ba40f827af34716
389ds/389-ds-base
Ticket #320 - allow most plugins to be betxn plugins This allows plugins to run as betxn plugins instead of regular pre/post op plugins. For plugins that are not preoperation or postoperation in the DSE, e.g. that are "object", I've introduced a boolean attribute nsslapd-pluginbetxn. If this is set to a true value, the internal pre/post operation plugins registered will be made betxn plugins instead. Looks like cos cannot currently be made a betxn plugin - there is a deadlock when deleting from the ldbm and DSE backends at the same time. Everything else passes the acceptance tests. Reviewed by: nhosoi (Thanks!)
commit 836a31dcdf2bf5f74ee5f93d4ba40f827af34716 Author: Rich Megginson <[email protected]> Date: Wed Mar 7 10:47:05 2012 -0700 Ticket #320 - allow most plugins to be betxn plugins This allows plugins to run as betxn plugins instead of regular pre/post op plugins. For plugins that are not preoperation or postoperation in the DSE, e.g. that are "object", I've introduced a boolean attribute nsslapd-pluginbetxn. If this is set to a true value, the internal pre/post operation plugins registered will be made betxn plugins instead. Looks like cos cannot currently be made a betxn plugin - there is a deadlock when deleting from the ldbm and DSE backends at the same time. Everything else passes the acceptance tests. Reviewed by: nhosoi (Thanks!) diff --git a/ldap/servers/plugins/cos/cos.c b/ldap/servers/plugins/cos/cos.c index 78ae5d660..6dfd857cb 100644 --- a/ldap/servers/plugins/cos/cos.c +++ b/ldap/servers/plugins/cos/cos.c @@ -170,17 +170,30 @@ int cos_postop_init ( Slapi_PBlock *pb ) { int rc = 0; + Slapi_Entry *plugin_entry = NULL; + char *plugin_type = NULL; + int postadd = SLAPI_PLUGIN_POST_ADD_FN; + int postmod = SLAPI_PLUGIN_POST_MODIFY_FN; + int postmdn = SLAPI_PLUGIN_POST_MODRDN_FN; + int postdel = SLAPI_PLUGIN_POST_DELETE_FN; + + if ((slapi_pblock_get(pb, SLAPI_PLUGIN_CONFIG_ENTRY, &plugin_entry) == 0) && + plugin_entry && + (plugin_type = slapi_entry_attr_get_charptr(plugin_entry, "nsslapd-plugintype")) && + plugin_type && strstr(plugin_type, "betxn")) { + postadd = SLAPI_PLUGIN_BE_TXN_POST_ADD_FN; + postmod = SLAPI_PLUGIN_BE_TXN_POST_MODIFY_FN; + postmdn = SLAPI_PLUGIN_BE_TXN_POST_MODRDN_FN; + postdel = SLAPI_PLUGIN_BE_TXN_POST_DELETE_FN; + } + slapi_ch_free_string(&plugin_type); if ( slapi_pblock_set( pb, SLAPI_PLUGIN_VERSION, SLAPI_PLUGIN_VERSION_01 ) != 0 || - slapi_pblock_set(pb, SLAPI_PLUGIN_POST_MODIFY_FN, - (void *)cos_post_op ) != 0 || - slapi_pblock_set(pb, SLAPI_PLUGIN_POST_MODRDN_FN, - (void *)cos_post_op ) != 0 || - slapi_pblock_set(pb, SLAPI_PLUGIN_POST_ADD_FN, - (void *) cos_post_op ) != 0 || - slapi_pblock_set(pb, SLAPI_PLUGIN_POST_DELETE_FN, - (void *) cos_post_op ) != 0 ) + slapi_pblock_set(pb, postmod, (void *)cos_post_op ) != 0 || + slapi_pblock_set(pb, postmdn, (void *)cos_post_op ) != 0 || + slapi_pblock_set(pb, postadd, (void *) cos_post_op ) != 0 || + slapi_pblock_set(pb, postdel, (void *) cos_post_op ) != 0 ) { slapi_log_error( SLAPI_LOG_FATAL, COS_PLUGIN_SUBSYSTEM, "cos_postop_init: failed to register plugin\n" ); @@ -221,9 +234,17 @@ int cos_init( Slapi_PBlock *pb ) { int ret = 0; void * plugin_identity=NULL; + Slapi_Entry *plugin_entry = NULL; + int is_betxn = 0; + const char *plugintype = "postoperation"; LDAPDebug( LDAP_DEBUG_TRACE, "--> cos_init\n",0,0,0); + if ((slapi_pblock_get(pb, SLAPI_PLUGIN_CONFIG_ENTRY, &plugin_entry) == 0) && + plugin_entry) { + is_betxn = slapi_entry_attr_get_bool(plugin_entry, "nsslapd-pluginbetxn"); + } + /* ** Store the plugin identity for later use. ** Used for internal operations @@ -248,7 +269,10 @@ int cos_init( Slapi_PBlock *pb ) goto bailout; } - ret = slapi_register_plugin("postoperation", 1 /* Enabled */, + if (is_betxn) { + plugintype = "betxnpostoperation"; + } + ret = slapi_register_plugin(plugintype, 1 /* Enabled */, "cos_postop_init", cos_postop_init, "Class of Service postoperation plugin", NULL, plugin_identity); @@ -256,10 +280,12 @@ int cos_init( Slapi_PBlock *pb ) goto bailout; } - ret = slapi_register_plugin("internalpostoperation", 1 /* Enabled */, - "cos_internalpostop_init", cos_internalpostop_init, - "Class of Service internalpostoperation plugin", NULL, - plugin_identity); + if (!is_betxn) { + ret = slapi_register_plugin("internalpostoperation", 1 /* Enabled */, + "cos_internalpostop_init", cos_internalpostop_init, + "Class of Service internalpostoperation plugin", NULL, + plugin_identity); + } bailout: LDAPDebug( LDAP_DEBUG_TRACE, "<-- cos_init\n",0,0,0); diff --git a/ldap/servers/plugins/retrocl/retrocl.c b/ldap/servers/plugins/retrocl/retrocl.c index aedd61652..90c34556e 100644 --- a/ldap/servers/plugins/retrocl/retrocl.c +++ b/ldap/servers/plugins/retrocl/retrocl.c @@ -118,14 +118,31 @@ int retrocl_postop_modrdn (Slapi_PBlock *pb) { return retrocl_postob(pb,OP_MODRD int retrocl_postop_init( Slapi_PBlock *pb ) { - int rc= 0; /* OK */ + int rc= 0; /* OK */ + Slapi_Entry *plugin_entry = NULL; + char *plugin_type = NULL; + int postadd = SLAPI_PLUGIN_POST_ADD_FN; + int postmod = SLAPI_PLUGIN_POST_MODIFY_FN; + int postmdn = SLAPI_PLUGIN_POST_MODRDN_FN; + int postdel = SLAPI_PLUGIN_POST_DELETE_FN; + + if ((slapi_pblock_get(pb, SLAPI_PLUGIN_CONFIG_ENTRY, &plugin_entry) == 0) && + plugin_entry && + (plugin_type = slapi_entry_attr_get_charptr(plugin_entry, "nsslapd-plugintype")) && + plugin_type && strstr(plugin_type, "betxn")) { + postadd = SLAPI_PLUGIN_BE_TXN_POST_ADD_FN; + postmod = SLAPI_PLUGIN_BE_TXN_POST_MODIFY_FN; + postmdn = SLAPI_PLUGIN_BE_TXN_POST_MODRDN_FN; + postdel = SLAPI_PLUGIN_BE_TXN_POST_DELETE_FN; + } + slapi_ch_free_string(&plugin_type); if( slapi_pblock_set( pb, SLAPI_PLUGIN_VERSION, SLAPI_PLUGIN_VERSION_01 ) != 0 || - slapi_pblock_set( pb, SLAPI_PLUGIN_DESCRIPTION, (void *)&retroclpostopdesc ) != 0 || - slapi_pblock_set( pb, SLAPI_PLUGIN_POST_ADD_FN, (void *) retrocl_postop_add ) != 0 || - slapi_pblock_set( pb, SLAPI_PLUGIN_POST_DELETE_FN, (void *) retrocl_postop_delete ) != 0 || - slapi_pblock_set( pb, SLAPI_PLUGIN_POST_MODIFY_FN, (void *) retrocl_postop_modify ) != 0 || - slapi_pblock_set( pb, SLAPI_PLUGIN_POST_MODRDN_FN, (void *) retrocl_postop_modrdn ) != 0 ) + slapi_pblock_set( pb, SLAPI_PLUGIN_DESCRIPTION, (void *)&retroclpostopdesc ) != 0 || + slapi_pblock_set( pb, postadd, (void *) retrocl_postop_add ) != 0 || + slapi_pblock_set( pb, postdel, (void *) retrocl_postop_delete ) != 0 || + slapi_pblock_set( pb, postmod, (void *) retrocl_postop_modify ) != 0 || + slapi_pblock_set( pb, postmdn, (void *) retrocl_postop_modrdn ) != 0 ) { slapi_log_error( SLAPI_LOG_PLUGIN, RETROCL_PLUGIN_NAME, "retrocl_postop_init failed\n" ); rc= -1; @@ -417,9 +434,12 @@ int retrocl_plugin_init(Slapi_PBlock *pb) { static int legacy_initialised= 0; - int rc = 0; + int rc = 0; int precedence = 0; void *identity = NULL; + Slapi_Entry *plugin_entry = NULL; + int is_betxn = 0; + const char *plugintype = "postoperation"; slapi_pblock_get (pb, SLAPI_PLUGIN_IDENTITY, &identity); PR_ASSERT (identity); @@ -427,14 +447,24 @@ retrocl_plugin_init(Slapi_PBlock *pb) slapi_pblock_get( pb, SLAPI_PLUGIN_PRECEDENCE, &precedence ); + if ((slapi_pblock_get(pb, SLAPI_PLUGIN_CONFIG_ENTRY, &plugin_entry) == 0) && + plugin_entry) { + is_betxn = slapi_entry_attr_get_bool(plugin_entry, "nsslapd-pluginbetxn"); + } + if (!legacy_initialised) { rc= slapi_pblock_set( pb, SLAPI_PLUGIN_VERSION, SLAPI_PLUGIN_VERSION_01 ); rc= slapi_pblock_set( pb, SLAPI_PLUGIN_DESCRIPTION, (void *)&retrocldesc ); rc= slapi_pblock_set( pb, SLAPI_PLUGIN_START_FN, (void *) retrocl_start ); rc= slapi_pblock_set( pb, SLAPI_PLUGIN_CLOSE_FN, (void *) retrocl_stop ); - rc= slapi_register_plugin_ext("postoperation", 1 /* Enabled */, "retrocl_postop_init", retrocl_postop_init, "Retrocl postoperation plugin", NULL, identity, precedence); - rc= slapi_register_plugin_ext("internalpostoperation", 1 /* Enabled */, "retrocl_internalpostop_init", retrocl_internalpostop_init, "Retrocl internal postoperation plugin", NULL, identity, precedence); + if (is_betxn) { + plugintype = "betxnpostoperation"; + } + rc= slapi_register_plugin_ext(plugintype, 1 /* Enabled */, "retrocl_postop_init", retrocl_postop_init, "Retrocl postoperation plugin", NULL, identity, precedence); + if (!is_betxn) { + rc= slapi_register_plugin_ext("internalpostoperation", 1 /* Enabled */, "retrocl_internalpostop_init", retrocl_internalpostop_init, "Retrocl internal postoperation plugin", NULL, identity, precedence); + } retrocl_internal_lock = PR_NewLock(); if (retrocl_internal_lock == NULL) return -1; diff --git a/ldap/servers/plugins/roles/roles_plugin.c b/ldap/servers/plugins/roles/roles_plugin.c index 89bcdb56e..eab773cab 100644 --- a/ldap/servers/plugins/roles/roles_plugin.c +++ b/ldap/servers/plugins/roles/roles_plugin.c @@ -81,17 +81,30 @@ int roles_postop_init ( Slapi_PBlock *pb ) { int rc = 0; + Slapi_Entry *plugin_entry = NULL; + char *plugin_type = NULL; + int postadd = SLAPI_PLUGIN_POST_ADD_FN; + int postmod = SLAPI_PLUGIN_POST_MODIFY_FN; + int postmdn = SLAPI_PLUGIN_POST_MODRDN_FN; + int postdel = SLAPI_PLUGIN_POST_DELETE_FN; + + if ((slapi_pblock_get(pb, SLAPI_PLUGIN_CONFIG_ENTRY, &plugin_entry) == 0) && + plugin_entry && + (plugin_type = slapi_entry_attr_get_charptr(plugin_entry, "nsslapd-plugintype")) && + plugin_type && strstr(plugin_type, "betxn")) { + postadd = SLAPI_PLUGIN_BE_TXN_POST_ADD_FN; + postmod = SLAPI_PLUGIN_BE_TXN_POST_MODIFY_FN; + postmdn = SLAPI_PLUGIN_BE_TXN_POST_MODRDN_FN; + postdel = SLAPI_PLUGIN_BE_TXN_POST_DELETE_FN; + } + slapi_ch_free_string(&plugin_type); if ( slapi_pblock_set( pb, SLAPI_PLUGIN_VERSION, SLAPI_PLUGIN_VERSION_01 ) != 0 || - slapi_pblock_set(pb, SLAPI_PLUGIN_POST_MODIFY_FN, - (void *)roles_post_op ) != 0 || - slapi_pblock_set(pb, SLAPI_PLUGIN_POST_MODRDN_FN, - (void *)roles_post_op ) != 0 || - slapi_pblock_set(pb, SLAPI_PLUGIN_POST_ADD_FN, - (void *) roles_post_op ) != 0 || - slapi_pblock_set(pb, SLAPI_PLUGIN_POST_DELETE_FN, - (void *) roles_post_op ) != 0 ) + slapi_pblock_set(pb, postmod, (void *)roles_post_op ) != 0 || + slapi_pblock_set(pb, postmdn, (void *)roles_post_op ) != 0 || + slapi_pblock_set(pb, postadd, (void *) roles_post_op ) != 0 || + slapi_pblock_set(pb, postdel, (void *) roles_post_op ) != 0 ) { slapi_log_error( SLAPI_LOG_FATAL, ROLES_PLUGIN_SUBSYSTEM, "roles_postop_init: failed to register plugin\n" ); @@ -131,6 +144,9 @@ int roles_init( Slapi_PBlock *pb ) { int rc = 0; void *plugin_identity = NULL; + Slapi_Entry *plugin_entry = NULL; + int is_betxn = 0; + const char *plugintype = "postoperation"; slapi_log_error( SLAPI_LOG_PLUGIN, ROLES_PLUGIN_SUBSYSTEM, "=> roles_init\n" ); @@ -139,6 +155,11 @@ int roles_init( Slapi_PBlock *pb ) PR_ASSERT (plugin_identity); roles_set_plugin_identity(plugin_identity); + if ((slapi_pblock_get(pb, SLAPI_PLUGIN_CONFIG_ENTRY, &plugin_entry) == 0) && + plugin_entry) { + is_betxn = slapi_entry_attr_get_bool(plugin_entry, "nsslapd-pluginbetxn"); + } + if ( slapi_pblock_set( pb, SLAPI_PLUGIN_VERSION, (void *)SLAPI_PLUGIN_VERSION_01 ) != 0 || slapi_pblock_set( pb, SLAPI_PLUGIN_DESCRIPTION, @@ -154,7 +175,10 @@ int roles_init( Slapi_PBlock *pb ) goto bailout; } - rc = slapi_register_plugin("postoperation", 1 /* Enabled */, + if (is_betxn) { + plugintype = "betxnpostoperation"; + } + rc = slapi_register_plugin(plugintype, 1 /* Enabled */, "roles_postop_init", roles_postop_init, "Roles postoperation plugin", NULL, plugin_identity); @@ -162,10 +186,12 @@ int roles_init( Slapi_PBlock *pb ) goto bailout; } - rc = slapi_register_plugin("internalpostoperation", 1 /* Enabled */, - "roles_internalpostop_init", roles_internalpostop_init, - "Roles internalpostoperation plugin", NULL, - plugin_identity); + if (!is_betxn) { + rc = slapi_register_plugin("internalpostoperation", 1 /* Enabled */, + "roles_internalpostop_init", roles_internalpostop_init, + "Roles internalpostoperation plugin", NULL, + plugin_identity); + } bailout: slapi_log_error( SLAPI_LOG_PLUGIN, ROLES_PLUGIN_SUBSYSTEM, "<= roles_init %d\n", rc ); diff --git a/ldap/servers/plugins/statechange/statechange.c b/ldap/servers/plugins/statechange/statechange.c index 0653eedf4..0b2b73701 100644 --- a/ldap/servers/plugins/statechange/statechange.c +++ b/ldap/servers/plugins/statechange/statechange.c @@ -111,23 +111,36 @@ void plugin_init_debug_level(int *level_ptr) int statechange_init( Slapi_PBlock *pb ) { int ret = 0; + Slapi_Entry *plugin_entry = NULL; + char *plugin_type = NULL; + int postadd = SLAPI_PLUGIN_POST_ADD_FN; + int postmod = SLAPI_PLUGIN_POST_MODIFY_FN; + int postmdn = SLAPI_PLUGIN_POST_MODRDN_FN; + int postdel = SLAPI_PLUGIN_POST_DELETE_FN; slapi_log_error( SLAPI_LOG_TRACE, SCN_PLUGIN_SUBSYSTEM, "--> statechange_init\n"); + if ((slapi_pblock_get(pb, SLAPI_PLUGIN_CONFIG_ENTRY, &plugin_entry) == 0) && + plugin_entry && + (plugin_type = slapi_entry_attr_get_charptr(plugin_entry, "nsslapd-plugintype")) && + plugin_type && strstr(plugin_type, "betxn")) { + postadd = SLAPI_PLUGIN_BE_TXN_POST_ADD_FN; + postmod = SLAPI_PLUGIN_BE_TXN_POST_MODIFY_FN; + postmdn = SLAPI_PLUGIN_BE_TXN_POST_MODRDN_FN; + postdel = SLAPI_PLUGIN_BE_TXN_POST_DELETE_FN; + } + slapi_ch_free_string(&plugin_type); + head = 0; if ( slapi_pblock_set( pb, SLAPI_PLUGIN_VERSION, SLAPI_PLUGIN_VERSION_01 ) != 0 || slapi_pblock_set(pb, SLAPI_PLUGIN_START_FN, (void *) statechange_start ) != 0 || - slapi_pblock_set(pb, SLAPI_PLUGIN_POST_MODIFY_FN, - (void *) statechange_mod_post_op ) != 0 || - slapi_pblock_set(pb, SLAPI_PLUGIN_POST_MODRDN_FN, - (void *) statechange_modrdn_post_op ) != 0 || - slapi_pblock_set(pb, SLAPI_PLUGIN_POST_ADD_FN, - (void *) statechange_add_post_op ) != 0 || - slapi_pblock_set(pb, SLAPI_PLUGIN_POST_DELETE_FN, - (void *) statechange_delete_post_op ) != 0 || + slapi_pblock_set(pb, postmod, (void *) statechange_mod_post_op ) != 0 || + slapi_pblock_set(pb, postmdn, (void *) statechange_modrdn_post_op ) != 0 || + slapi_pblock_set(pb, postadd, (void *) statechange_add_post_op ) != 0 || + slapi_pblock_set(pb, postdel, (void *) statechange_delete_post_op ) != 0 || slapi_pblock_set(pb, SLAPI_PLUGIN_CLOSE_FN, (void *) statechange_close ) != 0 || slapi_pblock_set( pb, SLAPI_PLUGIN_DESCRIPTION, diff --git a/ldap/servers/plugins/uiduniq/7bit.c b/ldap/servers/plugins/uiduniq/7bit.c index 3e0b9215b..1b263f807 100644 --- a/ldap/servers/plugins/uiduniq/7bit.c +++ b/ldap/servers/plugins/uiduniq/7bit.c @@ -692,6 +692,11 @@ int NS7bitAttr_Init(Slapi_PBlock *pb) { int err = 0; + Slapi_Entry *plugin_entry = NULL; + char *plugin_type = NULL; + int preadd = SLAPI_PLUGIN_PRE_ADD_FN; + int premod = SLAPI_PLUGIN_PRE_MODIFY_FN; + int premdn = SLAPI_PLUGIN_PRE_MODRDN_FN; BEGIN int argc; @@ -703,6 +708,16 @@ NS7bitAttr_Init(Slapi_PBlock *pb) SLAPI_PLUGIN_VERSION_01); if (err) break; + if ((slapi_pblock_get(pb, SLAPI_PLUGIN_CONFIG_ENTRY, &plugin_entry) == 0) && + plugin_entry && + (plugin_type = slapi_entry_attr_get_charptr(plugin_entry, "nsslapd-plugintype")) && + plugin_type && strstr(plugin_type, "betxn")) { + preadd = SLAPI_PLUGIN_BE_TXN_PRE_ADD_FN; + premod = SLAPI_PLUGIN_BE_TXN_PRE_MODIFY_FN; + premdn = SLAPI_PLUGIN_BE_TXN_PRE_MODRDN_FN; + } + slapi_ch_free_string(&plugin_type); + /* * Get and normalize arguments */ @@ -734,16 +749,13 @@ NS7bitAttr_Init(Slapi_PBlock *pb) if (err) break; /* Register functions */ - err = slapi_pblock_set(pb, SLAPI_PLUGIN_PRE_ADD_FN, - (void*)preop_add); + err = slapi_pblock_set(pb, preadd, (void*)preop_add); if (err) break; - err = slapi_pblock_set(pb, SLAPI_PLUGIN_PRE_MODIFY_FN, - (void*)preop_modify); + err = slapi_pblock_set(pb, premod, (void*)preop_modify); if (err) break; - err = slapi_pblock_set(pb, SLAPI_PLUGIN_PRE_MODRDN_FN, - (void*)preop_modrdn); + err = slapi_pblock_set(pb, premdn, (void*)preop_modrdn); if (err) break; END diff --git a/ldap/servers/plugins/uiduniq/uid.c b/ldap/servers/plugins/uiduniq/uid.c index 8bd5e17b0..25bc4e947 100644 --- a/ldap/servers/plugins/uiduniq/uid.c +++ b/ldap/servers/plugins/uiduniq/uid.c @@ -1013,6 +1013,11 @@ int NSUniqueAttr_Init(Slapi_PBlock *pb) { int err = 0; + Slapi_Entry *plugin_entry = NULL; + char *plugin_type = NULL; + int preadd = SLAPI_PLUGIN_PRE_ADD_FN; + int premod = SLAPI_PLUGIN_PRE_MODIFY_FN; + int premdn = SLAPI_PLUGIN_PRE_MODRDN_FN; BEGIN int argc; @@ -1031,6 +1036,16 @@ NSUniqueAttr_Init(Slapi_PBlock *pb) slapi_pblock_get (pb, SLAPI_PLUGIN_IDENTITY, &plugin_identity); /* PR_ASSERT (plugin_identity); */ + if ((slapi_pblock_get(pb, SLAPI_PLUGIN_CONFIG_ENTRY, &plugin_entry) == 0) && + plugin_entry && + (plugin_type = slapi_entry_attr_get_charptr(plugin_entry, "nsslapd-plugintype")) && + plugin_type && strstr(plugin_type, "betxn")) { + preadd = SLAPI_PLUGIN_BE_TXN_PRE_ADD_FN; + premod = SLAPI_PLUGIN_BE_TXN_PRE_MODIFY_FN; + premdn = SLAPI_PLUGIN_BE_TXN_PRE_MODRDN_FN; + } + slapi_ch_free_string(&plugin_type); + /* * Get and normalize arguments */ @@ -1056,16 +1071,13 @@ NSUniqueAttr_Init(Slapi_PBlock *pb) if (err) break; /* Register functions */ - err = slapi_pblock_set(pb, SLAPI_PLUGIN_PRE_ADD_FN, - (void*)preop_add); + err = slapi_pblock_set(pb, preadd, (void*)preop_add); if (err) break; - err = slapi_pblock_set(pb, SLAPI_PLUGIN_PRE_MODIFY_FN, - (void*)preop_modify); + err = slapi_pblock_set(pb, premod, (void*)preop_modify); if (err) break; - err = slapi_pblock_set(pb, SLAPI_PLUGIN_PRE_MODRDN_FN, - (void*)preop_modrdn); + err = slapi_pblock_set(pb, premdn, (void*)preop_modrdn); if (err) break; END diff --git a/ldap/servers/plugins/usn/usn.c b/ldap/servers/plugins/usn/usn.c index faa737c5e..617589e56 100644 --- a/ldap/servers/plugins/usn/usn.c +++ b/ldap/servers/plugins/usn/usn.c @@ -78,12 +78,20 @@ usn_init(Slapi_PBlock *pb) { int rc = 0; void *identity = NULL; + Slapi_Entry *plugin_entry = NULL; + int is_betxn = 0; + const char *plugintype; slapi_log_error(SLAPI_LOG_TRACE, USN_PLUGIN_SUBSYSTEM, "--> usn_init\n"); slapi_pblock_get(pb, SLAPI_PLUGIN_IDENTITY, &identity); + if ((slapi_pblock_get(pb, SLAPI_PLUGIN_CONFIG_ENTRY, &plugin_entry) == 0) && + plugin_entry) { + is_betxn = slapi_entry_attr_get_bool(plugin_entry, "nsslapd-pluginbetxn"); + } + /* slapi_register_plugin always returns SUCCESS (0) */ if (slapi_pblock_set(pb, SLAPI_PLUGIN_VERSION, SLAPI_PLUGIN_VERSION_01) != 0 || @@ -104,13 +112,25 @@ usn_init(Slapi_PBlock *pb) goto bail; } - rc = slapi_register_plugin("preoperation", 1 /* Enabled */, + plugintype = "preoperation"; + if (is_betxn) { + plugintype = "betxnpreoperation"; + } + rc = slapi_register_plugin(plugintype, 1 /* Enabled */, "usn_preop_init", usn_preop_init, "USN preoperation plugin", NULL, identity); - rc |= slapi_register_plugin("bepreoperation", 1 /* Enabled */, + plugintype = "bepreoperation"; + if (is_betxn) { + plugintype = "betxnpreoperation"; + } + rc |= slapi_register_plugin(plugintype, 1 /* Enabled */, "usn_bepreop_init", usn_bepreop_init, "USN bepreoperation plugin", NULL, identity); - rc |= slapi_register_plugin("bepostoperation", 1 /* Enabled */, + plugintype = "bepostoperation"; + if (is_betxn) { + plugintype = "betxnpostoperation"; + } + rc |= slapi_register_plugin(plugintype, 1 /* Enabled */, "usn_bepostop_init", usn_bepostop_init, "USN bepostoperation plugin", NULL, identity); usn_set_identity(identity); @@ -124,6 +144,17 @@ static int usn_preop_init(Slapi_PBlock *pb) { int rc = 0; + Slapi_Entry *plugin_entry = NULL; + char *plugin_type = NULL; + int predel = SLAPI_PLUGIN_PRE_DELETE_FN; + + if ((slapi_pblock_get(pb, SLAPI_PLUGIN_CONFIG_ENTRY, &plugin_entry) == 0) && + plugin_entry && + (plugin_type = slapi_entry_attr_get_charptr(plugin_entry, "nsslapd-plugintype")) && + plugin_type && strstr(plugin_type, "betxn")) { + predel = SLAPI_PLUGIN_BE_TXN_PRE_DELETE_FN; + } + slapi_ch_free_string(&plugin_type); /* set up csn generator for tombstone */ _usn_csngen = csngen_new(USN_CSNGEN_ID, NULL); @@ -133,8 +164,7 @@ usn_preop_init(Slapi_PBlock *pb) rc = -1; } - if (slapi_pblock_set(pb, SLAPI_PLUGIN_PRE_DELETE_FN, - (void *)usn_preop_delete) != 0) { + if (slapi_pblock_set(pb, predel, (void *)usn_preop_delete) != 0) { slapi_log_error(SLAPI_LOG_FATAL, USN_PLUGIN_SUBSYSTEM, "usn_preop_init: failed to register preop plugin\n"); rc = -1; @@ -147,17 +177,30 @@ static int usn_bepreop_init(Slapi_PBlock *pb) { int rc = 0; - - if (slapi_pblock_set(pb, SLAPI_PLUGIN_BE_PRE_ADD_FN, - (void *)usn_bepreop_add) != 0 || - slapi_pblock_set(pb, SLAPI_PLUGIN_BE_PRE_DELETE_FN, - (void *)usn_bepreop_delete) != 0 || - slapi_pblock_set(pb, SLAPI_PLUGIN_BE_PRE_MODIFY_FN, - (void *)usn_bepreop_modify) != 0 || - slapi_pblock_set(pb, SLAPI_PLUGIN_BE_PRE_MODRDN_FN, - (void *)usn_bepreop_modify) != 0) { + Slapi_Entry *plugin_entry = NULL; + char *plugin_type = NULL; + int preadd = SLAPI_PLUGIN_BE_PRE_ADD_FN; + int premod = SLAPI_PLUGIN_BE_PRE_MODIFY_FN; + int premdn = SLAPI_PLUGIN_BE_PRE_MODRDN_FN; + int predel = SLAPI_PLUGIN_BE_PRE_DELETE_FN; + + if ((slapi_pblock_get(pb, SLAPI_PLUGIN_CONFIG_ENTRY, &plugin_entry) == 0) && + plugin_entry && + (plugin_type = slapi_entry_attr_get_charptr(plugin_entry, "nsslapd-plugintype")) && + plugin_type && strstr(plugin_type, "betxn")) { + preadd = SLAPI_PLUGIN_BE_TXN_PRE_ADD_FN; + premod = SLAPI_PLUGIN_BE_TXN_PRE_MODIFY_FN; + premdn = SLAPI_PLUGIN_BE_TXN_PRE_MODRDN_FN; + predel = SLAPI_PLUGIN_BE_TXN_PRE_DELETE_FN; + } + slapi_ch_free_string(&plugin_type); + + if (slapi_pblock_set(pb, preadd, (void *)usn_bepreop_add) != 0 || + slapi_pblock_set(pb, predel, (void *)usn_bepreop_delete) != 0 || + slapi_pblock_set(pb, premod, (void *)usn_bepreop_modify) != 0 || + slapi_pblock_set(pb, premdn, (void *)usn_bepreop_modify) != 0) { slapi_log_error(SLAPI_LOG_FATAL, USN_PLUGIN_SUBSYSTEM, - "usn_bepreop_init: failed to register bepreop plugin\n"); + "usn_bepreop_init: failed to register bepreop plugin\n"); rc = -1; } @@ -168,17 +211,30 @@ static int usn_bepostop_init(Slapi_PBlock *pb) { int rc = 0; - - if (slapi_pblock_set(pb, SLAPI_PLUGIN_BE_POST_ADD_FN, - (void *)usn_bepostop) != 0 || - slapi_pblock_set(pb, SLAPI_PLUGIN_BE_POST_DELETE_FN, - (void *)usn_bepostop_delete) != 0 || - slapi_pblock_set(pb, SLAPI_PLUGIN_BE_POST_MODIFY_FN, - (void *)usn_bepostop_modify) != 0 || - slapi_pblock_set(pb, SLAPI_PLUGIN_BE_POST_MODRDN_FN, - (void *)usn_bepostop) != 0) { + Slapi_Entry *plugin_entry = NULL; + char *plugin_type = NULL; + int postadd = SLAPI_PLUGIN_BE_POST_ADD_FN; + int postmod = SLAPI_PLUGIN_BE_POST_MODIFY_FN; + int postmdn = SLAPI_PLUGIN_BE_POST_MODRDN_FN; + int postdel = SLAPI_PLUGIN_BE_POST_DELETE_FN; + + if ((slapi_pblock_get(pb, SLAPI_PLUGIN_CONFIG_ENTRY, &plugin_entry) == 0) && + plugin_entry && + (plugin_type = slapi_entry_attr_get_charptr(plugin_entry, "nsslapd-plugintype")) && + plugin_type && strstr(plugin_type, "betxn")) { + postadd = SLAPI_PLUGIN_BE_TXN_POST_ADD_FN; + postmod = SLAPI_PLUGIN_BE_TXN_POST_MODIFY_FN; + postmdn = SLAPI_PLUGIN_BE_TXN_POST_MODRDN_FN; + postdel = SLAPI_PLUGIN_BE_TXN_POST_DELETE_FN; + } + slapi_ch_free_string(&plugin_type); + + if (slapi_pblock_set(pb, postadd, (void *)usn_bepostop) != 0 || + slapi_pblock_set(pb, postdel, (void *)usn_bepostop_delete) != 0 || + slapi_pblock_set(pb, postmod, (void *)usn_bepostop_modify) != 0 || + slapi_pblock_set(pb, postmdn, (void *)usn_bepostop) != 0) { slapi_log_error(SLAPI_LOG_FATAL, USN_PLUGIN_SUBSYSTEM, - "usn_bepostop_init: failed to register bepostop plugin\n"); + "usn_bepostop_init: failed to register bepostop plugin\n"); rc = -1; }
0
b66b7126ac363890ad715b5bb855c018050e3a6b
389ds/389-ds-base
Issue 49443 - Add CI test case Description: Add a test case to suites/filter/filter_test.py. Test that ldapsearch with scope one returns only one entry. https://pagure.io/389-ds-base/issue/49443 Reviewed by: wibrown (Thanks!) spichugi Signed-off-by: Simon Pichugin <[email protected]>
commit b66b7126ac363890ad715b5bb855c018050e3a6b Author: Amita Sharma <[email protected]> Date: Wed Dec 6 16:13:19 2017 +0530 Issue 49443 - Add CI test case Description: Add a test case to suites/filter/filter_test.py. Test that ldapsearch with scope one returns only one entry. https://pagure.io/389-ds-base/issue/49443 Reviewed by: wibrown (Thanks!) spichugi Signed-off-by: Simon Pichugin <[email protected]> diff --git a/dirsrvtests/tests/suites/filter/filter_test.py b/dirsrvtests/tests/suites/filter/filter_test.py index a6007e058..a2fb9330f 100644 --- a/dirsrvtests/tests/suites/filter/filter_test.py +++ b/dirsrvtests/tests/suites/filter/filter_test.py @@ -100,6 +100,30 @@ def test_filter_search_original_attrs(topology_st): log.info('test_filter_search_original_attrs: PASSED') [email protected] +def test_filter_scope_one(topology_st): + """Test ldapsearch with scope one gives only single entry + + :id: cf5a6078-bbe6-4d43-ac71-553c45923f91 + :setup: Standalone instance + :steps: + 1. Search cn=Directory Administrators,dc=example,dc=com using ldapsearch with + scope one using base as dc=example,dc=com + 2. Check that search should return only one entry + :expectedresults: + 1. This should pass + 2. This should pass + """ + + parent_dn="dn: dc=example,dc=com" + child_dn="dn: cn=Directory Administrators,dc=example,dc=com" + + log.info('Search user using ldapsearch with scope one') + results = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_ONELEVEL,'cn=Directory Administrators',['cn'] ) + log.info(results) + + log.info('Search should only have one entry') + assert len(results) == 1 if __name__ == '__main__': # Run isolated
0
6ec27bc8b5253e21daa2b47888486a6e0d27f7af
389ds/389-ds-base
Ticket 49325 - Proof of concept rust tqueue in sds Bug Description: Rust is a modern systems programming language in the style of C and C++. It has strict compile time guarantees to the correctness of applications, and promises the potential to reduce many times of security and stability issues including bounds checking, null dereferences, use-after free and more. This is achieved without a run time, instead using compile time ownership and lifetime checks. Fix Description: This ticket is to add a proof of concept that we can use Rust as an FFI language with existing C components. This adds an optional configure argument to enable a rust thread safe queue which is used by nunc-stans for event dispatch. My tests already show it is safe (it passes all the existing test) and the server when built with this option passes the basic suite. Importantly it shows how we can integrate cargo with autotools, and how to expose and C compatible apis from rust. To use this, at configure time add "--enable-rust". There are no other changes to the server without this flag, and it is not a requirement to build DS, it's optional. https://pagure.io/389-ds-base/issue/49325 Author: wibrown Review by: mreynolds, tbordaz (Thank you!)
commit 6ec27bc8b5253e21daa2b47888486a6e0d27f7af Author: William Brown <[email protected]> Date: Fri Jul 14 17:13:10 2017 +1000 Ticket 49325 - Proof of concept rust tqueue in sds Bug Description: Rust is a modern systems programming language in the style of C and C++. It has strict compile time guarantees to the correctness of applications, and promises the potential to reduce many times of security and stability issues including bounds checking, null dereferences, use-after free and more. This is achieved without a run time, instead using compile time ownership and lifetime checks. Fix Description: This ticket is to add a proof of concept that we can use Rust as an FFI language with existing C components. This adds an optional configure argument to enable a rust thread safe queue which is used by nunc-stans for event dispatch. My tests already show it is safe (it passes all the existing test) and the server when built with this option passes the basic suite. Importantly it shows how we can integrate cargo with autotools, and how to expose and C compatible apis from rust. To use this, at configure time add "--enable-rust". There are no other changes to the server without this flag, and it is not a requirement to build DS, it's optional. https://pagure.io/389-ds-base/issue/49325 Author: wibrown Review by: mreynolds, tbordaz (Thank you!) diff --git a/Makefile.am b/Makefile.am index b8ed7e9ae..e3b30b6da 100644 --- a/Makefile.am +++ b/Makefile.am @@ -29,8 +29,21 @@ NSPR_INCLUDES = @nspr_inc@ SVRCORE_INCLUDES = @svrcore_inc@ SASL_INCLUDES = @sasl_inc@ EVENT_INCLUDES = @event_inc@ -# Not used currently -# TCMALLOC_INCLUDES = @tcmalloc_inc@ + +# Rust inclusions. +if RUST_ENABLE +RUST_ON = 1 +CARGO_FLAGS = @cargo_defs@ +RUSTC_FLAGS = @asan_rust_defs@ @debug_rust_defs@ +RUST_LDFLAGS = -ldl -lpthread -lgcc_s -lc -lm -lrt -lutil +RUST_DEFINES = -DRUST_ENABLE +else +RUST_ON = 0 +CARGO_FLAGS = +RUSTC_FLAGS = +RUST_LDFLAGS = +RUST_DEFINES = +endif # We can't add the lfds includes all the time as they have a "bomb" in them that # prevents compilation on unsupported hardware arches. @@ -80,7 +93,7 @@ PATH_DEFINES = -DLOCALSTATEDIR="\"$(localstatedir)\"" -DSYSCONFDIR="\"$(sysconfd # Now that we have all our defines in place, setup the CPPFLAGS # These flags are the "must have" for all components -AM_CPPFLAGS = $(DEBUG_DEFINES) $(GCCSEC_DEFINES) $(ASAN_DEFINES) $(PROFILING_DEFINES) +AM_CPPFLAGS = $(DEBUG_DEFINES) $(GCCSEC_DEFINES) $(ASAN_DEFINES) $(PROFILING_DEFINES) $(RUST_DEFINES) # Flags for Directory Server # WARNING: This needs a clean up, because slap.h is a horrible mess and is publically exposed! DSPLUGIN_CPPFLAGS = $(DS_DEFINES) $(DS_INCLUDES) $(PATH_DEFINES) $(SYSTEMD_DEFINES) $(NUNCSTANS_INCLUDES) @openldap_inc@ @ldapsdk_inc@ @nss_inc@ $(NSPR_INCLUDES) @systemd_inc@ @@ -138,7 +151,7 @@ AM_LDFLAGS = -lpthread else #AM_LDFLAGS = -Wl,-z,defs # Provide the tcmalloc links if needed -AM_LDFLAGS = $(ASAN_DEFINES) $(PROFILING_LINKS) $(TCMALLOC_LINK) -latomic +AM_LDFLAGS = $(RUST_LDFLAGS) $(ASAN_DEFINES) $(PROFILING_LINKS) $(TCMALLOC_LINK) -latomic endif #end hpux # https://www.gnu.org/software/libtool/manual/html_node/Updating-version-info.html#Updating-version-info @@ -236,6 +249,7 @@ clean-local: -rm -rf dist -rm -rf $(abs_top_builddir)/html -rm -rf $(abs_top_builddir)/man + CARGO_TARGET_DIR=$(abs_top_builddir)/rs cargo clean --manifest-path=$(srcdir)/src/libsds/Cargo.toml dberrstrs.h: Makefile perl $(srcdir)/ldap/servers/slapd/mkDBErrStrs.pl -i @db_incdir@ -o . @@ -288,7 +302,18 @@ bin_PROGRAMS = dbscan \ pwdhash \ rsearch -server_LTLIBRARIES = libsds.la libnunc-stans.la libldaputil.la libslapd.la libns-dshttpd.la +# ---------------------------------------------------------------------------------------- +# This odd looking definition is to keep the libraries in ORDER that they are needed. rsds +# is needed by sds, which is needed by ns. So we have a blank LTLIB, then append in order +# based on defines +# ---------------------------------------------------------------------------------------- + +server_LTLIBRARIES = +if RUST_ENABLE +server_LTLIBRARIES += librsds.la +endif +server_LTLIBRARIES += libsds.la libnunc-stans.la libldaputil.la libslapd.la libns-dshttpd.la + # this is how to add optional plugins if enable_pam_passthru @@ -1047,7 +1072,6 @@ libsds_la_SOURCES = src/libsds/sds/core/utils.c \ src/libsds/sds/bpt_cow/txn.c \ src/libsds/sds/bpt_cow/verify.c \ src/libsds/sds/queue/queue.c \ - src/libsds/sds/queue/tqueue.c \ src/libsds/sds/queue/lqueue.c \ src/libsds/external/csiphash/csiphash.c \ src/libsds/sds/ht/ht.c \ @@ -1070,6 +1094,32 @@ endif libsds_la_CPPFLAGS = $(AM_CPPFLAGS) $(SDS_CPPFLAGS) libsds_la_LDFLAGS = $(AM_LDFLAGS) $(SDS_LDFLAGS) +if RUST_ENABLE +libsds_la_LIBADD = librsds.la + +librsdspatha = $(abs_top_builddir)/rs/@rust_target_dir@/librsds.a +librsdspatho = $(abs_top_builddir)/rs/@rust_target_dir@/librsds.o + +# Remember, these emit to cargo_target_dir/<at>rust_target_dir<at>/emit target +$(librsdspatha): Makefile src/libsds/Cargo.toml src/libsds/sds/lib.rs src/libsds/sds/tqueue.rs + CARGO_TARGET_DIR=$(abs_top_builddir)/rs cargo rustc $(CARGO_FLAGS) --verbose --manifest-path=$(srcdir)/src/libsds/Cargo.toml -- $(RUSTC_FLAGS) --emit link=$(librsdspatha) + +$(librsdspatho): Makefile src/libsds/Cargo.toml src/libsds/sds/lib.rs src/libsds/sds/tqueue.rs + CARGO_TARGET_DIR=$(abs_top_builddir)/rs cargo rustc $(CARGO_FLAGS) --verbose --manifest-path=$(srcdir)/src/libsds/Cargo.toml -- $(RUSTC_FLAGS) --emit obj=$(librsdspatho) + +am_librsds_la_OBJECTS = $(librsdspatho) +librsds_la_LIBADD = $(librsdspatha) +librsds_la_SOURCES = + +dist_noinst_DATA += $(srcdir)/src/libsds/Cargo.toml \ + $(srcdir)/src/libsds/sds/*.rs + +else +# Just build the tqueue in C. +libsds_la_SOURCES += \ + src/libsds/sds/queue/tqueue.c +endif + #------------------------ # libnunc-stans #------------------------ @@ -2242,7 +2292,12 @@ rpmbrprep: dist-bzip2 rpmroot cp $(distdir).tar.bz2 $(RPMBUILD)/SOURCES cp $(srcdir)/rpm/389-ds-base-git.sh $(RPMBUILD)/SOURCES cp $(srcdir)/rpm/389-ds-base-devel.README $(RPMBUILD)/SOURCES - sed -e "s/__VERSION__/$(RPM_VERSION)/" -e "s/__RELEASE__/$(RPM_RELEASE)/" -e "s/__VERSION_PREREL__/$(VERSION_PREREL)/" -e "s/__NUNC_STANS_ON__/$(NUNC_STANS_ON)/" -e "s/__ASAN_ON__/$(ASAN_ON)/" < $(abs_builddir)/rpm/389-ds-base.spec > $(RPMBUILD)/SPECS/389-ds-base.spec + sed -e "s/__VERSION__/$(RPM_VERSION)/" \ + -e "s/__RELEASE__/$(RPM_RELEASE)/" \ + -e "s/__VERSION_PREREL__/$(VERSION_PREREL)/" \ + -e "s/__NUNC_STANS_ON__/$(NUNC_STANS_ON)/" \ + -e "s/__RUST_ON__/$(RUST_ON)/" \ + -e "s/__ASAN_ON__/$(ASAN_ON)/" < $(abs_builddir)/rpm/389-ds-base.spec > $(RPMBUILD)/SPECS/389-ds-base.spec # Requires rpmdevtools. Consider making this a dependancy of rpms. rpmsources: rpmbrprep diff --git a/configure.ac b/configure.ac index 91d6d398b..cb6c52882 100644 --- a/configure.ac +++ b/configure.ac @@ -24,6 +24,8 @@ AC_SUBST([CONSOLE_VERSION]) AM_MAINTAINER_MODE AC_CANONICAL_HOST +AC_CONFIG_MACRO_DIRS([m4]) + # Checks for programs. AC_PROG_CXX AC_PROG_CC @@ -78,29 +80,61 @@ AC_CHECK_FUNCS([clock_gettime], [], AC_MSG_ERROR([unable to locate required symb # This will detect if we need to add the LIBADD_DL value for us. LT_LIB_DLLOAD +# Optional rust component support. +AC_MSG_CHECKING(for --enable-rust) +AC_ARG_ENABLE(rust, AS_HELP_STRING([--enable-rust], [Enable rust language features (default: no)]), +[ + AC_CHECK_PROG(CARGO, [cargo], [yes], [no]) + AC_CHECK_PROG(RUSTC, [rustc], [yes], [no]) + + AS_IF([test "$CARGO" != "yes" -o "$RUSTC" != "yes"], [ + AC_MSG_FAILURE("Rust based plugins cannot be built cargo=$CARGO rustc=$RUSTC") + ]) + with_rust=yes + AC_MSG_RESULT(yes) +], +[ + AC_MSG_RESULT(no) +]) +AM_CONDITIONAL([RUST_ENABLE],[test -n "$with_rust"]) + AC_MSG_CHECKING(for --enable-debug) AC_ARG_ENABLE(debug, AS_HELP_STRING([--enable-debug], [Enable debug features (default: no)]), [ AC_MSG_RESULT(yes) debug_defs="-g3 -DDEBUG -DMCC_DEBUG -O0" + debug_rust_defs="-C debuginfo=2" + cargo_defs="" + rust_target_dir="debug" + with_debug=yes ], [ AC_MSG_RESULT(no) debug_defs="" + debug_rust_defs="-C debuginfo=2" + cargo_defs="--release" + rust_target_dir="release" ]) AC_SUBST([debug_defs]) +AC_SUBST([debug_rust_defs]) +AC_SUBST([cargo_defs]) +AC_SUBST([rust_target_dir]) +AM_CONDITIONAL([DEBUG],[test -n "$with_debug"]) AC_MSG_CHECKING(for --enable-asan) AC_ARG_ENABLE(asan, AS_HELP_STRING([--enable-asan], [Enable gcc address sanitizer options (default: no)]), [ AC_MSG_RESULT(yes) asan_defs="-fsanitize=address -fno-omit-frame-pointer" + asan_rust_defs="-Z sanitizer=address" ], [ AC_MSG_RESULT(no) asan_defs="" + asan_rust_defs="" ]) AC_SUBST([asan_defs]) +AC_SUBST([asan_rust_defs]) AM_CONDITIONAL(enable_asan,test "$enable_asan" = "yes") if test -z "$enable_perl" ; then diff --git a/rpm.mk b/rpm.mk index 378a46960..425480828 100644 --- a/rpm.mk +++ b/rpm.mk @@ -10,6 +10,7 @@ NAME_VERSION = $(PACKAGE)-$(RPM_VERSION)$(VERSION_PREREL) TARBALL = $(NAME_VERSION).tar.bz2 NUNC_STANS_ON = 1 ASAN_ON = 0 +RUST_ON = 0 clean: rm -rf dist @@ -35,6 +36,7 @@ rpmroot: sed -e s/__VERSION__/$(RPM_VERSION)/ -e s/__RELEASE__/$(RPM_RELEASE)/ \ -e s/__VERSION_PREREL__/$(VERSION_PREREL)/ \ -e s/__NUNC_STANS_ON__/$(NUNC_STANS_ON)/ \ + -e s/__RUST_ON__/$(RUST_ON)/ \ -e s/__ASAN_ON__/$(ASAN_ON)/ \ rpm/$(PACKAGE).spec.in > $(RPMBUILD)/SPECS/$(PACKAGE).spec diff --git a/rpm/389-ds-base.spec.in b/rpm/389-ds-base.spec.in index 2fac56032..d16ab5a5e 100644 --- a/rpm/389-ds-base.spec.in +++ b/rpm/389-ds-base.spec.in @@ -16,11 +16,15 @@ # This enables an ASAN build. This should not go to production, so we rename. %global use_asan __ASAN_ON__ + +# This enables rust in the build. +%global use_rust __RUST_ON__ + %if %{use_asan} %global use_tcmalloc 0 %global variant base-asan %else -%if %{_arch} != "s390x" && %{_arch} != "s390" +%if %{_arch} != "s390x" && %{_arch} != "s390" && !%{use_rust} %global use_tcmalloc 1 %else %global use_tcmalloc 0 @@ -84,6 +88,11 @@ BuildRequires: systemd-devel %if %{use_asan} BuildRequires: libasan %endif +# If rust is enabled +%if %{use_rust} +BuildRequires: cargo +BuildRequires: rust +%endif # Needed to support regeneration of the autotool artifacts. BuildRequires: autoconf BuildRequires: automake @@ -263,7 +272,7 @@ export CXX=clang++ # hack hack hack https://bugzilla.redhat.com/show_bug.cgi?id=833529 NSSARGS="--with-svrcore-inc=%{_includedir} --with-svrcore-lib=%{_libdir} --with-nss-lib=%{_libdir} --with-nss-inc=%{_includedir}/nss3" -%if %{use_asan} +%if %{use_asan} && !%{use_rust} ASAN_FLAGS="--enable-asan --enable-debug" %endif @@ -271,6 +280,10 @@ ASAN_FLAGS="--enable-asan --enable-debug" TCMALLOC_FLAGS="--enable-tcmalloc" %endif +%if %{use_rust} +RUST_FLAGS="--enable-rust" +%endif + # Rebuild the autotool artifacts now. autoreconf -fiv @@ -279,7 +292,7 @@ autoreconf -fiv --with-systemdsystemunitdir=%{_unitdir} \ --with-systemdsystemconfdir=%{_sysconfdir}/systemd/system \ --with-systemdgroupname=%{groupname} \ - $NSSARGS $TCMALLOC_FLAGS $ASAN_FLAGS \ + $NSSARGS $TCMALLOC_FLAGS $ASAN_FLAGS $RUST_FLAGS \ --enable-cmocka %if 0%{?rhel} >= 8 || 0%{?fedora} @@ -526,6 +539,9 @@ fi %{_libdir}/%{pkgname}/libnunc-stans.so %{_libdir}/%{pkgname}/libsds.so %{_libdir}/%{pkgname}/libldaputil.so +%if %{use_rust} +%{_libdir}/%{pkgname}/librsds.so +%endif %{_libdir}/pkgconfig/* %files libs @@ -537,6 +553,9 @@ fi %{_libdir}/%{pkgname}/libnunc-stans.so.* %{_libdir}/%{pkgname}/libsds.so.* %{_libdir}/%{pkgname}/libldaputil.so.* +%if %{use_rust} +%{_libdir}/%{pkgname}/librsds.so.* +%endif %files snmp %defattr(-,root,root,-) diff --git a/src/libsds/Cargo.toml b/src/libsds/Cargo.toml new file mode 100644 index 000000000..594326c7b --- /dev/null +++ b/src/libsds/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "rsds" +version = "0.1.0" +authors = ["William Brown <[email protected]>"] + +[dependencies] + +[lib] +path = "sds/lib.rs" +name = "rsds" +crate-type = ["dylib"] + +[profile.release] +panic = "abort" +# lto = true + diff --git a/src/libsds/sds/lib.rs b/src/libsds/sds/lib.rs new file mode 100644 index 000000000..aa70c7a8e --- /dev/null +++ b/src/libsds/sds/lib.rs @@ -0,0 +1,32 @@ +// BEGIN COPYRIGHT BLOCK +// Copyright (c) 2017, Red Hat, Inc +// All rights reserved. +// +// License: GPL (version 3 or any later version). +// See LICENSE for details. +// END COPYRIGHT BLOCK + +#![warn(missing_docs)] + +//! sds is a collection of datastructures used in the slapi api. This contains +//! a thread safe queue and others implemented in C. + +/// Implementation of a thread safe queue. +pub mod tqueue; + +#[repr(C)] +/// Slapi Data Structure Result types. Indicates the status of the operation +/// for C compatability (instead of Result<T> +pub enum sds_result { + /// The operation was a success + Success = 0, + /// An unknown error occured. This indicates a fault in the API. + UnknownError = 1, + /// A null pointer was provided as an argument to a function. This is + /// invalid. + NullPointer = 2, + /// The list is exhausted, no more elements can be returned. + ListExhausted = 16, +} + + diff --git a/src/libsds/sds/tqueue.rs b/src/libsds/sds/tqueue.rs new file mode 100644 index 000000000..b7042e514 --- /dev/null +++ b/src/libsds/sds/tqueue.rs @@ -0,0 +1,131 @@ +// BEGIN COPYRIGHT BLOCK +// Copyright (c) 2017, Red Hat, Inc +// All rights reserved. +// +// License: GPL (version 3 or any later version). +// See LICENSE for details. +// END COPYRIGHT BLOCK + +#![warn(missing_docs)] + +use super::sds_result; +use std::sync::Mutex; +use std::collections::LinkedList; + +// Borrow from libc +#[doc(hidden)] +#[allow(non_camel_case_types)] +#[repr(u8)] +pub enum c_void { + // Two dummy variants so the #[repr] attribute can be used. + #[doc(hidden)] + __Variant1, + #[doc(hidden)] + __Variant2, +} + +/// A thread safe queue. This is made to be compatible with the tqueue api +/// provided by libsds as a proof of concept. As a result it contains some C-isms +/// like holding a free function pointer (rather than drop trait). +pub struct TQueue { + q: Mutex<LinkedList<*const c_void>>, + free_fn: Option<extern "C" fn(*const c_void)>, +} + +impl TQueue { + /// Allocate a new thread safe queue. If the free function is provided + /// on drop of the TQueue, this function will be called on all remaining + /// elements of the queue. + pub fn new(free_fn: Option<extern "C" fn(*const c_void)>) -> Self { + TQueue { + q: Mutex::new(LinkedList::new()), + free_fn: free_fn, + } + } + + /// Push a pointer into the tail of the queue. + pub fn enqueue(&self, elem: *const c_void) { + let mut q_inner = self.q.lock().unwrap(); + q_inner.push_back(elem); + } + + /// Dequeue the head element of the queue. If not element + /// exists return None. + pub fn dequeue(&self) -> Option<*const c_void> { + let mut q_inner = self.q.lock().unwrap(); + q_inner.pop_front() + } +} + +impl Drop for TQueue { + fn drop(&mut self) { + println!("droping tqueue"); + if let Some(f) = self.free_fn { + let mut q_inner = self.q.lock().unwrap(); + let mut elem = (*q_inner).pop_front(); + while elem.is_some() { + (f)(elem.unwrap()); + elem = (*q_inner).pop_front(); + } + } + } +} + +#[no_mangle] +/// C compatible wrapper around the TQueue. Given a valid point, a TQueue pointer +/// is allocated on the heap and referenced in retq. free_fn_ptr may be NULL +/// but if it references a function, this will be called during drop of the TQueue. +pub extern fn sds_tqueue_init(retq: *mut *mut TQueue, free_fn_ptr: Option<extern "C" fn(*const c_void)>) -> sds_result { + // This piece of type signature magic is because in rust types that extern C, + // with option has None resolve to null. What this causes is we can wrap + // our fn ptr with Option in rust, but the C side gives us fn ptr or NULL, and + // it *works*. It makes the result complete safe on the rust side too! + if retq.is_null() { + return sds_result::NullPointer; + } + + let q = Box::new(TQueue::new(free_fn_ptr)); + unsafe { + *retq = Box::into_raw(q); + } + sds_result::Success +} + +#[no_mangle] +/// Push an element to the tail of the queue. The element may be NULL +pub extern fn sds_tqueue_enqueue(q: *const TQueue, elem: *const c_void) -> sds_result { + // Check for null .... + unsafe { (*q).enqueue(elem) }; + sds_result::Success +} + +#[no_mangle] +/// Dequeue from the head of the queue. The result will be placed into elem. +/// if elem is NULL no dequeue is attempted. If there are no more items +/// ListExhausted is returned. +pub extern fn sds_tqueue_dequeue(q: *const TQueue, elem: *mut *const c_void) -> sds_result { + if elem.is_null() { + return sds_result::NullPointer; + } + match unsafe { (*q).dequeue() } { + Some(e) => { + unsafe { *elem = e; }; + sds_result::Success + } + None => { + sds_result::ListExhausted + } + } +} + +#[no_mangle] +/// Free the queue and all remaining elements. After this point it is +/// not safe to access the queue. +pub extern fn sds_tqueue_destroy(q: *mut TQueue) -> sds_result { + // This will drop the queue and free it's content + // mem::drop(q); + let _q = unsafe { Box::from_raw(q) }; + sds_result::Success +} + + diff --git a/src/libsds/test/test_sds_tqueue.c b/src/libsds/test/test_sds_tqueue.c index cd14f6ecd..69bc0b74b 100644 --- a/src/libsds/test/test_sds_tqueue.c +++ b/src/libsds/test/test_sds_tqueue.c @@ -17,6 +17,7 @@ test_1_tqueue_invalid_create(void **state __attribute__((unused))) assert_int_equal(result, SDS_NULL_POINTER); } +#ifndef RUST_ENABLE static void test_2_tqueue_enqueue(void **state) { @@ -113,6 +114,7 @@ test_6_tqueue_dequeue_multiple(void **state) assert_ptr_equal(q->uq->head, NULL); assert_ptr_equal(q->uq->tail, NULL); } +#endif /* RUST_ENABLE */ static void test_7_tqueue_random(void **state) @@ -178,6 +180,7 @@ run_tqueue_tests(void) { const struct CMUnitTest tests[] = { cmocka_unit_test(test_1_tqueue_invalid_create), +#ifndef RUST_ENABLE cmocka_unit_test_setup_teardown(test_2_tqueue_enqueue, tqueue_test_setup, tqueue_test_teardown), @@ -193,6 +196,7 @@ run_tqueue_tests(void) cmocka_unit_test_setup_teardown(test_6_tqueue_dequeue_multiple, tqueue_test_setup, tqueue_test_teardown), +#endif cmocka_unit_test_setup_teardown(test_7_tqueue_random, tqueue_test_setup, tqueue_test_teardown),
0
734a2144fd59135e0bdf576c3f5070f8cb214b69
389ds/389-ds-base
Ticket 48923 - serverCmd timeout not working as expected Bug Description: When trying to start a server, and the startup fails, the start-dirsrv command will actually hang for quite a while, and the current timeout implementation does not apply to this scenario. Fix Description: Instead of calling "os.system()", use subprocess and a timer that will kill the process if it timeouts. https://fedorahosted.org/389/ticket/48923 Reviewed by: nhosoi(Thanks!)
commit 734a2144fd59135e0bdf576c3f5070f8cb214b69 Author: Mark Reynolds <[email protected]> Date: Wed Jul 13 12:02:02 2016 -0400 Ticket 48923 - serverCmd timeout not working as expected Bug Description: When trying to start a server, and the startup fails, the start-dirsrv command will actually hang for quite a while, and the current timeout implementation does not apply to this scenario. Fix Description: Instead of calling "os.system()", use subprocess and a timer that will kill the process if it timeouts. https://fedorahosted.org/389/ticket/48923 Reviewed by: nhosoi(Thanks!) diff --git a/src/lib389/lib389/tools.py b/src/lib389/lib389/tools.py index 34c01877e..9781dd3f2 100644 --- a/src/lib389/lib389/tools.py +++ b/src/lib389/lib389/tools.py @@ -25,11 +25,12 @@ import pwd import grp import logging import ldap - +import shlex import socket import getpass -# from .nss_ssl import nss_create_new_database +# from .nss_ssl import nss_create_new_database +from threading import Timer from lib389._constants import * from lib389._ldifconn import LDIFConn from lib389.properties import * @@ -95,6 +96,31 @@ USERADD = "/usr/sbin/useradd" NOLOGIN = "/sbin/nologin" +def kill_proc(proc, timeout): + """Kill a process after the timeout is reached + @param proc - The subprocess process + @param timeout - timeout in seconds + """ + timeout["value"] = True + proc.kill() + + +def runCmd(cmd, timeout_sec): + """Run a system command with a timeout + @param cmd - The full system command + @param timeout_sec - The timeoput value in seconds + @return - The result code + """ + proc = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + timeout = {"value": False} + timer = Timer(timeout_sec, kill_proc, [proc, timeout]) + timer.start() + stdout, stderr = proc.communicate() + timer.cancel() + return proc.returncode + + class DirSrvTools(object): """DirSrv mix-in.""" @@ -232,7 +258,9 @@ class DirSrvTools(object): if "USE_GDB" in os.environ or "USE_VALGRIND" in os.environ: timeout = timeout * 3 - timeout += int(time.time()) + + full_timeout = int(time.time()) + timeout + if cmd == 'stop': log.warn("unbinding before stop") try: @@ -258,9 +286,9 @@ class DirSrvTools(object): else: done = True - log.warn("Running command: %r" % (fullCmd)) - rc = os.system("%s" % (fullCmd)) - while not done and int(time.time()) < timeout: + log.warn("Running command: %r - timeout(%d)" % (fullCmd, timeout)) + rc = runCmd("%s" % fullCmd, timeout) + while not done and int(time.time()) < full_timeout: line = logfp.readline() while not done and line: lastLine = line @@ -272,7 +300,7 @@ class DirSrvTools(object): done = True elif line.find("Initialization Failed") >= 0: # sometimes the server fails to start - try again - rc = os.system("%s" % (fullCmd)) + rc = runCmd("%s" % (fullCmd), timeout) pos = logfp.tell() break elif line.find("exiting.") >= 0:
0
545e9a6936cbd280eb767e3b41897f3ad812c66b
389ds/389-ds-base
Need to terminate dna plugin config entry with a newline
commit 545e9a6936cbd280eb767e3b41897f3ad812c66b Author: Rich Megginson <[email protected]> Date: Fri Feb 23 04:10:50 2007 +0000 Need to terminate dna plugin config entry with a newline diff --git a/ldap/admin/src/create_instance.c b/ldap/admin/src/create_instance.c index cf5c16b34..620ab4a86 100644 --- a/ldap/admin/src/create_instance.c +++ b/ldap/admin/src/create_instance.c @@ -3236,6 +3236,7 @@ char *ds_gen_confs(char *sroot, server_config_s *cf, char *cs_path) fprintf(f, "nsslapd-plugintype: preoperation\n"); fprintf(f, "nsslapd-pluginenabled: off\n"); fprintf(f, "nsslapd-pluginPath: %s/libdna-plugin%s\n", cf->plugin_dir, shared_lib); + fprintf(f, "\n"); #endif /* ENABLE_DNA */ fprintf(f, "dn: cn=ldbm database,cn=plugins,cn=config\n");
0
cdb83c417b28299b88e2a9ad896b78ef0f1a8fa2
389ds/389-ds-base
Ticket 47640 - Linked attributes transaction not aborted when linked entry does not exit Bug Description: When adding an entry and the target entry does not exist, the operation is still allowed. Fix Description: Return an error and useful error message to the client. https://fedorahosted.org/389/ticket/47640 Reviewed by: nhosoi(Thanks!)
commit cdb83c417b28299b88e2a9ad896b78ef0f1a8fa2 Author: Mark Reynolds <[email protected]> Date: Tue Jun 9 15:32:57 2015 -0400 Ticket 47640 - Linked attributes transaction not aborted when linked entry does not exit Bug Description: When adding an entry and the target entry does not exist, the operation is still allowed. Fix Description: Return an error and useful error message to the client. https://fedorahosted.org/389/ticket/47640 Reviewed by: nhosoi(Thanks!) diff --git a/dirsrvtests/tickets/ticket47640_test.py b/dirsrvtests/tickets/ticket47640_test.py new file mode 100644 index 000000000..648337e1c --- /dev/null +++ b/dirsrvtests/tickets/ticket47640_test.py @@ -0,0 +1,122 @@ +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +installation1_prefix = None + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + [email protected](scope="module") +def topology(request): + global installation1_prefix + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + + # Creating standalone instance ... + standalone = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + instance_standalone = standalone.exists() + if instance_standalone: + standalone.delete() + standalone.create() + standalone.open() + + # Clear out the tmp dir + standalone.clearTmpDir(__file__) + + return TopologyStandalone(standalone) + + +def test_ticket47640(topology): + ''' + Linked Attrs Plugins - verify that if the plugin fails to update the link entry + that the entire operation is aborted + ''' + + # Enable Dynamic plugins, and the linked Attrs plugin + try: + topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', 'on')]) + except ldap.LDAPError, e: + ldap.fatal('Failed to enable dynamic plugin!' + e.message['desc']) + assert False + + try: + topology.standalone.plugins.enable(name=PLUGIN_LINKED_ATTRS) + except ValueError, e: + ldap.fatal('Failed to enable linked attributes plugin!' + e.message['desc']) + assert False + + # Add the plugin config entry + try: + topology.standalone.add_s(Entry(('cn=manager link,cn=Linked Attributes,cn=plugins,cn=config', { + 'objectclass': 'top extensibleObject'.split(), + 'cn': 'Manager Link', + 'linkType': 'seeAlso', + 'managedType': 'seeAlso' + }))) + except ldap.LDAPError, e: + log.fatal('Failed to add linked attr config entry: error ' + e.message['desc']) + assert False + + # Add an entry who has a link to an entry that does not exist + OP_REJECTED = False + try: + topology.standalone.add_s(Entry(('uid=manager,' + DEFAULT_SUFFIX, { + 'objectclass': 'top extensibleObject'.split(), + 'uid': 'manager', + 'seeAlso': 'uid=user,dc=example,dc=com' + }))) + except ldap.UNWILLING_TO_PERFORM: + # Success + log.info('Add operation correctly rejected.') + OP_REJECTED = True + except ldap.LDAPError, e: + log.fatal('Add operation incorrectly rejected: error %s - ' + + 'expected "unwilling to perform"' % e.message['desc']) + assert False + if not OP_REJECTED: + log.fatal('Add operation incorrectly allowed') + assert False + + log.info('Test complete') + + +def test_ticket47640_final(topology): + topology.standalone.delete() + log.info('Testcase PASSED') + + +def run_isolated(): + global installation1_prefix + installation1_prefix = None + + topo = topology(True) + test_ticket47640(topo) + test_ticket47640_final(topo) + + +if __name__ == '__main__': + run_isolated() + diff --git a/ldap/servers/plugins/linkedattrs/linked_attrs.c b/ldap/servers/plugins/linkedattrs/linked_attrs.c index e30286772..27984a9b2 100644 --- a/ldap/servers/plugins/linkedattrs/linked_attrs.c +++ b/ldap/servers/plugins/linkedattrs/linked_attrs.c @@ -55,8 +55,6 @@ static Slapi_RWLock *g_config_lock; static void *_PluginID = NULL; static char *_PluginDN = NULL; int plugin_is_betxn = 0; -/* For future use - enforce all linked attribute operations succeed */ -static int strict_results = 0; static Slapi_PluginDesc pdesc = { LINK_FEATURE_DESC, VENDOR, @@ -108,14 +106,14 @@ static int linked_attrs_config_exists_reverse(struct configEntry *entry); static int linked_attrs_oktodo(Slapi_PBlock *pb); void linked_attrs_load_array(Slapi_Value **array, Slapi_Attr *attr); int linked_attrs_compare(const void *a, const void *b); -static int linked_attrs_add_backpointers(char *linkdn, struct configEntry *config, +static int linked_attrs_add_backpointers(Slapi_PBlock *pb, char *linkdn, struct configEntry *config, Slapi_Mod *smod); static int linked_attrs_del_backpointers(Slapi_PBlock *pb, char *linkdn, struct configEntry *config, Slapi_Mod *smod); static int linked_attrs_replace_backpointers(Slapi_PBlock *pb, char *linkdn, struct configEntry *config, Slapi_Mod *smod); -static int linked_attrs_mod_backpointers(char *linkdn, char *type, char *scope, - int modop, Slapi_ValueSet *targetvals); +static int linked_attrs_mod_backpointers(Slapi_PBlock *pb, char *linkdn, char *type, + char *scope, int modop, Slapi_ValueSet *targetvals); /* * Config cache locking functions @@ -1252,14 +1250,14 @@ linked_attrs_compare(const void *a, const void *b) * by the values in smod. */ static int -linked_attrs_add_backpointers(char *linkdn, struct configEntry *config, +linked_attrs_add_backpointers(Slapi_PBlock *pb, char *linkdn, struct configEntry *config, Slapi_Mod *smod) { Slapi_ValueSet *vals = slapi_valueset_new(); int rc = LDAP_SUCCESS; slapi_valueset_set_from_smod(vals, smod); - rc = linked_attrs_mod_backpointers(linkdn, config->managedtype, config->scope, + rc = linked_attrs_mod_backpointers(pb, linkdn, config->managedtype, config->scope, LDAP_MOD_ADD, vals); slapi_valueset_free(vals); @@ -1294,7 +1292,7 @@ linked_attrs_del_backpointers(Slapi_PBlock *pb, char *linkdn, slapi_valueset_set_from_smod(vals, smod); } - rc = linked_attrs_mod_backpointers(linkdn, config->managedtype, config->scope, + rc = linked_attrs_mod_backpointers(pb, linkdn, config->managedtype, config->scope, LDAP_MOD_DELETE, vals); slapi_valueset_free(vals); @@ -1413,13 +1411,13 @@ linked_attrs_replace_backpointers(Slapi_PBlock *pb, char *linkdn, /* Perform the actual updates to the target entries. */ if (delvals) { - rc = linked_attrs_mod_backpointers(linkdn, config->managedtype, + rc = linked_attrs_mod_backpointers(pb, linkdn, config->managedtype, config->scope, LDAP_MOD_DELETE, delvals); slapi_valueset_free(delvals); } if (rc == LDAP_SUCCESS && addvals) { - rc = linked_attrs_mod_backpointers(linkdn, config->managedtype, + rc = linked_attrs_mod_backpointers(pb, linkdn, config->managedtype, config->scope, LDAP_MOD_ADD, addvals); slapi_valueset_free(addvals); } @@ -1437,7 +1435,7 @@ linked_attrs_replace_backpointers(Slapi_PBlock *pb, char *linkdn, * Performs backpointer management. */ static int -linked_attrs_mod_backpointers(char *linkdn, char *type, +linked_attrs_mod_backpointers(Slapi_PBlock *pb, char *linkdn, char *type, char *scope, int modop, Slapi_ValueSet *targetvals) { char *val[2]; @@ -1493,13 +1491,19 @@ linked_attrs_mod_backpointers(char *linkdn, char *type, slapi_modify_internal_set_pb_ext(mod_pb, targetsdn, mods, 0, 0, linked_attrs_get_plugin_id(), 0); slapi_modify_internal_pb(mod_pb); - if (strict_results){ - /* we are enforcing strict results, so return the error */ - slapi_pblock_get(mod_pb, SLAPI_PLUGIN_INTOP_RESULT, &rc); - if(rc != LDAP_SUCCESS){ - slapi_sdn_free(&targetsdn); - break; - } + slapi_pblock_get(mod_pb, SLAPI_PLUGIN_INTOP_RESULT, &rc); + if(rc != LDAP_SUCCESS){ + char *err_msg = NULL; + + err_msg = PR_smprintf("Linked Attrs Plugin: Failed to update " + "link to target entry (%s) error %d", + targetdn, rc); + slapi_log_error(SLAPI_LOG_PLUGIN, LINK_PLUGIN_SUBSYSTEM, "%s\n", err_msg); + slapi_pblock_set(pb, SLAPI_PB_RESULT_TEXT, err_msg); + PR_smprintf_free(err_msg); + slapi_sdn_free(&targetsdn); + rc = LDAP_UNWILLING_TO_PERFORM; + break; } /* Initialize the pblock so we can reuse it. */ slapi_pblock_init(mod_pb); @@ -1684,7 +1688,7 @@ linked_attrs_mod_post_op(Slapi_PBlock *pb) case LDAP_MOD_ADD: /* Find the entries pointed to by the new * values and add the backpointers. */ - rc = linked_attrs_add_backpointers(dn, config, smod); + rc = linked_attrs_add_backpointers(pb, dn, config, smod); break; case LDAP_MOD_DELETE: /* Find the entries pointed to by the deleted @@ -1786,7 +1790,7 @@ linked_attrs_add_post_op(Slapi_PBlock *pb) slapi_attr_get_valueset(attr, &vals); slapi_lock_mutex(config->lock); - rc = linked_attrs_mod_backpointers(dn, config->managedtype, + rc = linked_attrs_mod_backpointers(pb, dn, config->managedtype, config->scope, LDAP_MOD_ADD, vals); slapi_unlock_mutex(config->lock); @@ -1875,7 +1879,7 @@ linked_attrs_del_post_op(Slapi_PBlock *pb) slapi_attr_get_valueset(attr, &vals); slapi_lock_mutex(config->lock); - rc = linked_attrs_mod_backpointers(dn, config->managedtype, + rc = linked_attrs_mod_backpointers(pb, dn, config->managedtype, config->scope, LDAP_MOD_DELETE, vals); slapi_unlock_mutex(config->lock); @@ -1909,7 +1913,7 @@ linked_attrs_del_post_op(Slapi_PBlock *pb) slapi_lock_mutex(config->lock); /* Delete forward link value. */ - rc = linked_attrs_mod_backpointers(dn, config->linktype, + rc = linked_attrs_mod_backpointers(pb, dn, config->linktype, config->scope, LDAP_MOD_DELETE, vals); slapi_unlock_mutex(config->lock); @@ -2018,7 +2022,7 @@ linked_attrs_modrdn_post_op(Slapi_PBlock *pb) slapi_lock_mutex(config->lock); /* Delete old dn value. */ - rc = linked_attrs_mod_backpointers(old_dn, config->managedtype, + rc = linked_attrs_mod_backpointers(pb, old_dn, config->managedtype, config->scope, LDAP_MOD_DELETE, vals); slapi_unlock_mutex(config->lock); @@ -2045,7 +2049,7 @@ linked_attrs_modrdn_post_op(Slapi_PBlock *pb) slapi_lock_mutex(config->lock); /* Add new dn value. */ - rc = linked_attrs_mod_backpointers(new_dn, config->managedtype, + rc = linked_attrs_mod_backpointers(pb, new_dn, config->managedtype, config->scope, LDAP_MOD_ADD, vals); slapi_unlock_mutex(config->lock); @@ -2079,7 +2083,7 @@ linked_attrs_modrdn_post_op(Slapi_PBlock *pb) slapi_lock_mutex(config->lock); /* Delete old dn value. */ - rc = linked_attrs_mod_backpointers(old_dn, config->linktype, + rc = linked_attrs_mod_backpointers(pb, old_dn, config->linktype, config->scope, LDAP_MOD_DELETE, vals); if(rc != LDAP_SUCCESS){ slapi_log_error(SLAPI_LOG_FATAL, LINK_PLUGIN_SUBSYSTEM, @@ -2091,7 +2095,7 @@ linked_attrs_modrdn_post_op(Slapi_PBlock *pb) } /* Add new dn value. */ - rc = linked_attrs_mod_backpointers(new_dn, config->linktype, + rc = linked_attrs_mod_backpointers(pb, new_dn, config->linktype, config->scope, LDAP_MOD_ADD, vals); slapi_unlock_mutex(config->lock);
0
24f8b6d93aaca127221432062ba276398c4859b2
389ds/389-ds-base
Issue:50112 - Port ACI test suit from TET to python3(misc and syntax) Port ACI test suit from TET to python3(misc and syntax) https://pagure.io/389-ds-base/issue/50112 Reviewed by: thierry bordaz, William Brown, Matus Honek, Ludwig Krispenz, Simon Pichugin
commit 24f8b6d93aaca127221432062ba276398c4859b2 Author: Anuj Borah <[email protected]> Date: Thu Jan 24 10:24:31 2019 +0530 Issue:50112 - Port ACI test suit from TET to python3(misc and syntax) Port ACI test suit from TET to python3(misc and syntax) https://pagure.io/389-ds-base/issue/50112 Reviewed by: thierry bordaz, William Brown, Matus Honek, Ludwig Krispenz, Simon Pichugin diff --git a/dirsrvtests/tests/suites/acl/misc_test.py b/dirsrvtests/tests/suites/acl/misc_test.py new file mode 100644 index 000000000..8394c92aa --- /dev/null +++ b/dirsrvtests/tests/suites/acl/misc_test.py @@ -0,0 +1,413 @@ +""" +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 RED Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK ---- +""" + +import os +import pytest + +from lib389._constants import DEFAULT_SUFFIX, PW_DM +from lib389.idm.user import UserAccount, UserAccounts +from lib389._mapped_object import DSLdapObject +from lib389.idm.account import Accounts, Anonymous +from lib389.idm.organizationalunit import OrganizationalUnit, OrganizationalUnits +from lib389.idm.group import Group, Groups +from lib389.topologies import topology_st as topo +from lib389.idm.domain import Domain +from lib389.plugins import ACLPlugin + +import ldap + + +PEOPLE = "ou=PEOPLE,{}".format(DEFAULT_SUFFIX) +DYNGROUP = "cn=DYNGROUP,{}".format(PEOPLE) +CONTAINER_1_DELADD = "ou=Product Development,{}".format(DEFAULT_SUFFIX) +CONTAINER_2_DELADD = "ou=Accounting,{}".format(DEFAULT_SUFFIX) + + [email protected](scope="function") +def aci_of_user(request, topo): + """ + :param request: + :param topo: + """ + aci_list = Domain(topo.standalone, DEFAULT_SUFFIX).get_attr_vals('aci') + + def finofaci(): + """ + Removes and Restores ACIs after the test. + """ + domain = Domain(topo.standalone, DEFAULT_SUFFIX) + domain.remove_all('aci') + for i in aci_list: + domain.add("aci", i) + + request.addfinalizer(finofaci) + + [email protected](scope="function") +def clean(request, topo): + """ + :param request: + :param topo: + """ + ous = OrganizationalUnits(topo.standalone, DEFAULT_SUFFIX) + try: + for i in ['Product Development', 'Accounting']: + ous.create(properties={'ou': i}) + except ldap.ALREADY_EXISTS as eoor_eoor: + topo.standalone.log.info("Exception (expected): %s" % type(eoor_eoor).__name__) + + def fin(): + """ + Deletes entries after the test. + """ + for scope_scope in [CONTAINER_1_DELADD, CONTAINER_2_DELADD, PEOPLE]: + try: + DSLdapObject(topo.standalone, scope_scope).delete() + except ldap.ALREADY_EXISTS as eoor_eoor: + topo.standalone.log.info("Exception (expected): %s" % type(eoor_eoor).__name__) + + request.addfinalizer(fin) + + +def test_accept_aci_in_addition_to_acl(topo, clean, aci_of_user): + """ + Misc Test 2 accept aci in addition to acl + :id:8e9408fa-7db8-11e8-adaa-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + uas = UserAccounts(topo.standalone, DEFAULT_SUFFIX, rdn='ou=product development') + user = uas.create_test_user() + for i in [('mail', '[email protected]'), ('givenname', 'Anuj'), ('userPassword', PW_DM)]: + user.set(i[0], i[1]) + + aci_target = "(targetattr=givenname)" + aci_allow = ('(version 3.0; acl "Name of the ACI"; deny (read, search, compare, write)') + aci_subject = 'userdn="ldap:///anyone";)' + Domain(topo.standalone, CONTAINER_1_DELADD).add("aci", aci_target + aci_allow + aci_subject) + + conn = Anonymous(topo.standalone).bind() + # aci will block targetattr=givenname to anyone + user = UserAccount(conn, user.dn) + with pytest.raises(AssertionError): + assert user.get_attr_val_utf8('givenname') == 'Anuj' + # aci will allow targetattr=uid to anyone + assert user.get_attr_val_utf8('uid') == 'test_user_1000' + + for i in uas.list(): + i.delete() + + [email protected] +def test_more_then_40_acl_will_crash_slapd(topo, clean, aci_of_user): + """ + bug 334451 : more then 40 acl will crash slapd + superseded by Bug 772778 - acl cache overflown problem with > 200 acis + :id:93a44c60-7db8-11e8-9439-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + uas = UserAccounts(topo.standalone, DEFAULT_SUFFIX, rdn='ou=Accounting') + user = uas.create_test_user() + + aci_target = '(target ="ldap:///{}")(targetattr !="userPassword")'.format(CONTAINER_1_DELADD) + # more_then_40_acl_will not crash_slapd + for i in range(40): + aci_allow = '(version 3.0;acl "ACI_{}";allow (read, search, compare)'.format(i) + aci_subject = 'userdn="ldap:///anyone";)' + aci_body = aci_target + aci_allow + aci_subject + Domain(topo.standalone, CONTAINER_1_DELADD).add("aci", aci_body) + conn = Anonymous(topo.standalone).bind() + assert UserAccount(conn, user.dn).get_attr_val_utf8('uid') == 'test_user_1000' + + for i in uas.list(): + i.delete() + [email protected] +def test_search_access_should_not_include_read_access(topo, clean, aci_of_user): + """ + bug 345643 + Misc Test 4 search access should not include read access + :id:98ab173e-7db8-11e8-a309-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + assert Domain(topo.standalone, DEFAULT_SUFFIX).present('aci') + Domain(topo.standalone, DEFAULT_SUFFIX)\ + .add("aci", [f'(target ="ldap:///{DEFAULT_SUFFIX}")(targetattr !="userPassword")' + '(version 3.0;acl "anonymous access";allow (search)' + '(userdn = "ldap:///anyone");)', + f'(target="ldap:///{DEFAULT_SUFFIX}") (targetattr = "*")(version 3.0; ' + 'acl "allow self write";allow(write) ' + 'userdn = "ldap:///self";)', + f'(target="ldap:///{DEFAULT_SUFFIX}") (targetattr = "*")(version 3.0; ' + 'acl "Allow all admin group"; allow(all) groupdn = "ldap:///cn=Directory ' + 'Administrators, {}";)']) + + conn = Anonymous(topo.standalone).bind() + # search_access_should_not_include_read_access + suffix = Domain(conn, DEFAULT_SUFFIX) + with pytest.raises(AssertionError): + assert suffix.present('aci') + + +def test_only_allow_some_targetattr(topo, clean, aci_of_user): + """ + Misc Test 5 only allow some targetattr (1/2) + :id:9d27f048-7db8-11e8-a71c-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + + uas = UserAccounts(topo.standalone, DEFAULT_SUFFIX, rdn=None) + for i in range(1, 3): + user = uas.create_test_user(uid=i, gid=i) + user.replace_many(('cn', 'Anuj1'), ('mail', '[email protected]')) + + Domain(topo.standalone, DEFAULT_SUFFIX).\ + replace("aci", '(target="ldap:///{}")(targetattr="mail||objectClass")' + '(version 3.0; acl "Test";allow (read,search,compare) ' + '(userdn = "ldap:///anyone"); )'.format(DEFAULT_SUFFIX)) + + conn = Anonymous(topo.standalone).bind() + accounts = Accounts(conn, DEFAULT_SUFFIX) + + # aci will allow only mail targetattr + assert len(accounts.filter('(mail=*)')) == 2 + # aci will allow only mail targetattr + assert not accounts.filter('(cn=*)') + # with root no , blockage + assert len(Accounts(topo.standalone, DEFAULT_SUFFIX).filter('(uid=*)')) == 2 + + for i in uas.list(): + i.delete() + + +def test_only_allow_some_targetattr_two(topo, clean, aci_of_user): + """ + Misc Test 6 only allow some targetattr (2/2)" + :id:a188239c-7db8-11e8-903e-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + uas = UserAccounts(topo.standalone, DEFAULT_SUFFIX, rdn=None) + for i in range(5): + user = uas.create_test_user(uid=i, gid=i) + user.replace_many(('mail', '[email protected]'), + ('cn', 'Anuj'), ('userPassword', PW_DM)) + + user1 = uas.create_test_user() + user1.replace_many(('mail', '[email protected]'), ('userPassword', PW_DM)) + + Domain(topo.standalone, DEFAULT_SUFFIX).\ + replace("aci", '(target="ldap:///{}") (targetattr="mail||objectClass")' + '(targetfilter="cn=Anuj") (version 3.0; acl "$tet_thistest"; ' + 'allow (compare,read,search) ' + '(userdn = "ldap:///anyone"); )'.format(DEFAULT_SUFFIX)) + + conn = UserAccount(topo.standalone, user.dn).bind(PW_DM) + # aci will allow only mail targetattr but only for cn=Anuj + account = Accounts(conn, DEFAULT_SUFFIX) + assert len(account.filter('(mail=*)')) == 5 + assert not account.filter('(cn=*)') + + for i in account.filter('(mail=*)'): + assert i.get_attr_val_utf8('mail') == '[email protected]' + + + conn = Anonymous(topo.standalone).bind() + # aci will allow only mail targetattr but only for cn=Anuj + account = Accounts(conn, DEFAULT_SUFFIX) + assert len(account.filter('(mail=*)')) == 5 + assert not account.filter('(cn=*)') + + for i in account.filter('(mail=*)'): + assert i.get_attr_val_utf8('mail') == '[email protected]' + + # with root no blockage + assert len(Accounts(topo.standalone, DEFAULT_SUFFIX).filter('(mail=*)')) == 6 + + for i in uas.list(): + i.delete() + + + [email protected] +def test_memberurl_needs_to_be_normalized(topo, clean, aci_of_user): + """ + Non-regression test for BUG 326000: MemberURL needs to be normalized + :id:a5d172e6-7db8-11e8-aca7-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + ou_ou = OrganizationalUnit(topo.standalone, "ou=PEOPLE,{}".format(DEFAULT_SUFFIX)) + ou_ou.set('aci', '(targetattr= *)' + '(version 3.0; acl "tester"; allow(all) ' + 'groupdn = "ldap:///cn =DYNGROUP,ou=PEOPLE, {}";)'.format(DEFAULT_SUFFIX)) + + groups = Groups(topo.standalone, DEFAULT_SUFFIX, rdn='ou=PEOPLE') + groups.create(properties={"cn": "DYNGROUP", + "description": "DYNGROUP", + 'objectClass': 'groupOfURLS', + 'memberURL': "ldap:///ou=PEOPLE,{}??sub?" + "(uid=test_user_2)".format(DEFAULT_SUFFIX)}) + + uas = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + for demo1 in [(1, "Entry to test rights on."), (2, "Member of DYNGROUP")]: + user = uas.create_test_user(uid=demo1[0], gid=demo1[0]) + user.replace_many(('description', demo1[1]), ('userPassword', PW_DM)) + + ##with normal aci + conn = UserAccount(topo.standalone, uas.list()[1].dn).bind(PW_DM) + harry = UserAccount(conn, uas.list()[1].dn) + harry.add('sn', 'FRED') + + ##with abnomal aci + dygrp = Group(topo.standalone, DYNGROUP) + dygrp.remove('memberurl', "ldap:///ou=PEOPLE,{}??sub?(uid=test_user_2)".format(DEFAULT_SUFFIX)) + dygrp.add('memberurl', "ldap:///ou=PEOPLE,{}??sub?(uid=tesT_UsEr_2)".format(DEFAULT_SUFFIX)) + harry.add('sn', 'Not FRED') + + for i in uas.list(): + i.delete() + [email protected] +def test_greater_than_200_acls_can_be_created(topo, clean, aci_of_user): + """ + Misc 10, check that greater than 200 ACLs can be created. Bug 624370 + :id:ac020252-7db8-11e8-8652-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + # greater_than_200_acls_can_be_created + uas = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + for i in range(200): + user = uas.create_test_user(uid=i, gid=i) + user.set('aci', '(targetattr = "description")' + '(version 3.0;acl "foo{}"; allow (read, search, compare)' + '(userdn="ldap:///anyone");)'.format(i)) + + assert user.\ + get_attr_val_utf8('aci') == '(targetattr = "description")' \ + '(version 3.0;acl "foo{}"; allow ' \ + '(read, search, compare)' \ + '(userdn="ldap:///anyone");)'.format(i) + for i in uas.list(): + i.delete() + + [email protected] +def test_server_bahaves_properly_with_very_long_attribute_names(topo, clean, aci_of_user): + """ + Make sure the server bahaves properly with very long attribute names. Bug 624453. + :id:b0d31942-7db8-11e8-a833-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + users.create_test_user() + users.list()[0].set('userpassword', PW_DM) + + user = UserAccount(topo.standalone, 'uid=test_user_1000,ou=People,{}'.format(DEFAULT_SUFFIX)) + with pytest.raises(ldap.INVALID_SYNTAX): + user.add("aci", "a" * 9000) + + +def test_do_bind_as_201_distinct_users(topo, clean, aci_of_user): + """ + Do bind as 201 distinct users + Increase the nsslapd-aclpb-max-selected-acls in cn=ACL Plugin,cn=plugins,cn=config + Restart the server + Do bind as 201 distinct users + :id:c0060532-7db8-11e8-a124-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + uas = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + for i in range(50): + user = uas.create_test_user(uid=i, gid=i) + user.set('userPassword', PW_DM) + + for i in range(len(uas.list())): + uas.list()[i].bind(PW_DM) + + ACLPlugin(topo.standalone).replace("nsslapd-aclpb-max-selected-acls", '220') + topo.standalone.restart() + + for i in range(len(uas.list())): + uas.list()[i].bind(PW_DM) + + +if __name__ == "__main__": + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s -v %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/acl/syntax_test.py b/dirsrvtests/tests/suites/acl/syntax_test.py new file mode 100644 index 000000000..1f48f973e --- /dev/null +++ b/dirsrvtests/tests/suites/acl/syntax_test.py @@ -0,0 +1,258 @@ +""" +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK ---- +""" + +import os +import pytest + +from lib389._constants import DEFAULT_SUFFIX +from lib389.idm.domain import Domain +from lib389.topologies import topology_st as topo + +import ldap + +INVALID = [('test_targattrfilters_1', + f'(targattrfilters ="add=title:title=fred),del=cn:(cn!=harry)")' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_targattrfilters_2', + f'(targattrfilters ="add=:(title=fred),del=cn:(cn!=harry)")' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_targattrfilters_3', + f'(targattrfilters ="add=:(title=fred),del=cn:(cn!=harry))' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_targattrfilters_4', + f'(targattrfilters ="add=title:(title=fred),=cn:(cn!=harry")' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_targattrfilters_5', + f'(targattrfilters ="add=title:(|(title=fred)(cn=harry)),del=cn:(cn=harry)")' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_targattrfilters_6', + f'(targattrfilters ="add=title:(|(title=fred)(title=harry)),del=cn:(title=harry)")' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_targattrfilters_7', + f'(targattrfilters ="add=title:(cn=architect), ' + f'del=title:(title=architect) && l:(l=cn=Meylan,dc=example,dc=com")")' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_targattrfilters_8', + f'(targattrfilters ="add=title:(cn=architect)")' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_targattrfilters_9', + f'(targattrfilters ="add=title:(cn=arch*)")' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_targattrfilters_10', + f'(targattrfilters ="add=title:(cn >= 1)")' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_targattrfilters_11', + f'(targattrfilters ="add=title:(cn <= 1)")' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_targattrfilters_12', + f'(targattrfilters ="add=title:(cn ~= 1)")' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_targattrfilters_13', + f'(targattrfilters ="add=title:(!(cn ~= 1))")' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_targattrfilters_14', + f'(targattrfilters ="add=title:(&(cn=fred)(cn ~= 1))")' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_targattrfilters_15', + f'(targattrfilters ="add=title:(|(cn=fred)(cn ~= 1))")' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_targattrfilters_16', + f'(targattrfilters ="add=title:(&(|(title=fred)(title=harry))(cn ~= 1))")' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_targattrfilters_17', + f'\(targattrfilters ="add=title:(&(|(&(title=harry)(title=fred))' + f'(title=harry))(title ~= 1))")' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_targattrfilters_19', + f'(target = ldap:///cn=Jeff Vedder,ou=Product Development,{DEFAULT_SUFFIX})' + f'(targetattr=*)' + f'(version 3.0; acl "Name of the ACI"; deny(write)gropdn="ldap:///anyone";)'), + ('test_targattrfilters_21', + f'(target = ldap:///cn=Jeff Vedder,ou=Product Development,{DEFAULT_SUFFIX})' + f'(targetattr=*)' + f'(version 3.0; acl "Name of the ACI"; deny(rite)userdn="ldap:///anyone";)'), + ('test_targattrfilters_22', + f'(targt = ldap:///cn=Jeff Vedder,ou=Product Development,{DEFAULT_SUFFIX})' + f'(targetattr=*)' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_targattrfilters_23', + f'(target = ldap:///cn=Jeff Vedder,ou=Product Development,{DEFAULT_SUFFIX})' + f'(targetattr=*)' + f'(version 3.0; acl "Name of the ACI"; absolute (all)userdn="ldap:///anyone";)'), + ('test_Missing_acl_mispel', + f'(target = ldap:///cn=Jeff Vedder,ou=Product Development,{DEFAULT_SUFFIX})' + f'(targetattr=*)' + f'(version 3.0; alc "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_Missing_acl_string', + f'(target = ldap:///cn=Jeff Vedder,ou=Product Development,{DEFAULT_SUFFIX})' + f'(targetattr=*)' + f'(version 3.0; "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_Wrong_version_string', + f'(target = ldap:///cn=Jeff Vedder,ou=Product Development,{DEFAULT_SUFFIX})' + f'(targetattr=*)' + f'(version 2.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_Missing_version_string', + f'(target = ldap:///cn=Jeff Vedder,ou=Product Development,{DEFAULT_SUFFIX})' + f'(targetattr=*)' + f'(; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_Authenticate_statement', + f'(target = ldap:///cn=Jeff Vedder,ou=Product Development,{DEFAULT_SUFFIX})' + f'(targetattr != "uid")' + f'(targetattr=*)(version 3.0; acl "Name of the ACI"; deny absolute (all)' + f'userdn="ldap:///anyone";)'), + ('test_Multiple_targets', + f'(target = ldap:///ou=Product Development,{DEFAULT_SUFFIX})' + f'(target = ldap:///ou=Product Testing,{DEFAULT_SUFFIX})(targetattr=*)' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_Target_set_to_self', + f'(target = ldap:///self)(targetattr=*)' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_target_set_with_ldap_instead_of_ldap', + f'(target = ldap:\\\{DEFAULT_SUFFIX})(targetattr=*)' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_target_set_with_more_than_three', + f'(target = ldap:////{DEFAULT_SUFFIX})(targetattr=*)' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_target_set_with_less_than_three', + f'(target = ldap://{DEFAULT_SUFFIX})(targetattr=*)' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_bind_rule_set_with_less_than_three', + f'(target = ldap:///{DEFAULT_SUFFIX})(targetattr=*)' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:/anyone";)'), + ('test_Use_semicolon_instead_of_comma_in_permission', + f'(target = ldap:///{DEFAULT_SUFFIX})(targetattr=*)' + f'(version 3.0; acl "Name of the ACI"; deny ' + f'(read; search; compare; write)userdn="ldap:///anyone";)'), + ('test_Use_double_equal_instead_of_equal_in_the_target', + f'(target == ldap:///{DEFAULT_SUFFIX})(targetattr=*)' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_use_double_equal_instead_of_equal_in_user_and_group_access', + f'(target = ldap:///{DEFAULT_SUFFIX})' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)' + f'userdn == "ldap:///anyone";)'), + ('test_donot_cote_the_name_of_the_aci', + f'(target = ldap:///{DEFAULT_SUFFIX})' + f'(version 3.0; acl Name of the ACI ; deny absolute (all)userdn = "ldap:///anyone";)'), + ('test_extra_parentheses_case_1', + f'( )(target = ldap:///{DEFAULT_SUFFIX}) (targetattr=*)' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn = "ldap:///anyone";)'), + ('test_extra_parentheses_case_2', + f'(((((target = ldap:///{DEFAULT_SUFFIX})(targetattr=*)' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)' + f'userdn == "ldap:///anyone";)'), + ('test_extra_parentheses_case_3', + f'(((target = ldap:///{DEFAULT_SUFFIX}) (targetattr=*)' + f'(version 3.0; acl "Name of the ACI"; deny absolute ' + f'(all)userdn = "ldap:///anyone";)))'), + ('test_no_semicolon_at_the_end_of_the_aci', + f'(target = ldap:///{DEFAULT_SUFFIX}) (targetattr=*)' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn = "ldap:///anyone")'), + ('test_a_character_different_of_a_semicolon_at_the_end_of_the_aci', + f'(target = ldap:///{DEFAULT_SUFFIX}) (targetattr=*)' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn = "ldap:///anyone"%)'), + ('test_bad_filter', + f'(target = ldap:///{DEFAULT_SUFFIX}) ' + f'(targetattr="cn |&| sn |(|) uid")' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn = "ldap:///anyone";)'), + ('test_Use_double_equal_instead_of_equal_in_the_targattrfilters', + f'(target = ldap:///{DEFAULT_SUFFIX})(targattrfilters== "add=title:(title=architect)")' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_Use_double_equal_instead_of_equal_inside_the_targattrfilters', + f'(target = ldap:///{DEFAULT_SUFFIX})(targattrfilters="add==title:(title==architect)")' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'),] + + +FAILED = [('test_targattrfilters_18', + f'(target = ldap:///cn=Jeff Vedder,ou=Product Development,{DEFAULT_SUFFIX})' + f'(targetattr=*)' + f'(version 3.0; acl "Name of the ACI"; deny(write)userdn="ldap:///{"123" * 300}";)'), + ('test_targattrfilters_20', + f'(target = ldap:///cn=Jeff Vedder,ou=Product Development,{DEFAULT_SUFFIX})' + f'(targetattr=*)' + f'(version 3.0; acl "Name of the ACI"; deny(write)userdns="ldap:///anyone";)'), + ('test_bind_rule_set_with_more_than_three', + f'(target = ldap:///{DEFAULT_SUFFIX})(targetattr=*)' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)' + f'userdn="ldap:////////anyone";)'), + ('test_Use_double_equal_instead_of_equal_in_the_targetattr', + f'(target = ldap:///{DEFAULT_SUFFIX})(targetattr==*)' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_Use_double_equal_instead_of_equal_in_the_targetfilter', + f'(target = ldap:///{DEFAULT_SUFFIX})(targetfilter==*)' + f'(version 3.0; acl "Name of the ACI"; deny absolute ' + f'(all)userdn="ldap:///anyone";)'), ] + + [email protected](reason='https://bugzilla.redhat.com/show_bug.cgi?id=1691473') [email protected]("real_value", [a[1] for a in FAILED], + ids=[a[0] for a in FAILED]) +def test_aci_invalid_syntax_fail(topo, real_value): + """ + + Try to set wrong ACI syntax. + + :id: d544d09a-6ed1-11e8-8872-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Create ACI + 2. Try to setup the ACI with Instance + :expectedresults: + 1. It should pass + 2. It should not pass + """ + domain = Domain(topo.standalone, DEFAULT_SUFFIX) + with pytest.raises(ldap.INVALID_SYNTAX): + domain.add("aci", real_value) + + [email protected]("real_value", [a[1] for a in INVALID], + ids=[a[0] for a in INVALID]) +def test_aci_invalid_syntax(topo, real_value): + """ + + Try to set wrong ACI syntax. + + :id: d544d09a-6ed1-11e8-8872-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Create ACI + 2. Try to setup the ACI with Instance + :expectedresults: + 1. It should pass + 2. It should not pass + """ + domain = Domain(topo.standalone, DEFAULT_SUFFIX) + with pytest.raises(ldap.INVALID_SYNTAX): + domain.add("aci", real_value) + + +def test_target_set_above_the_entry_test(topo): + """ + Try to set wrong ACI syntax. + + :id: d544d09a-6ed1-11e8-8872-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Create ACI + 2. Try to setup the ACI with Instance + :expectedresults: + 1. It should pass + 2. It should not pass + """ + domain = Domain(topo.standalone, "ou=People,{}".format(DEFAULT_SUFFIX)) + with pytest.raises(ldap.INVALID_SYNTAX): + domain.add("aci", f'(target = ldap:///{DEFAULT_SUFFIX})' + f'(targetattr=*)(version 3.0; acl "Name of the ACI"; deny absolute ' + f'(all)userdn="ldap:///anyone";)') + + +if __name__ == "__main__": + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s -v %s" % CURRENT_FILE)
0
7ba8a80cfbaed9f6d727f98ed8c284943b3295e1
389ds/389-ds-base
Ticket 50260 - backend txn plugins can corrupt entry cache Bug Description: If a nested backend txn plugin fails, any updates it made that went into the entry cache still persist after the database transaction is aborted. Fix Description: In order to be sure the entry cache is not corrupted after a backend txn plugin failure we need to flush all the cache entries that were added to the cache after the parent operation was started. To do this we record the start time the original operation, (or parent operation), and we record the time any entry is added to the cache. Then on failure we do a comparision and remove the entry from the cache if it's not in use. If it is in use we add a "invalid" flag which triggers the entry to be removed when the cache entry is returned by the owner. https://pagure.io/389-ds-base/issue/50260 CI tested and ASAN approved. Reviewed by: firstyear, tbordaz, and lkrispen (Thanks!!!)
commit 7ba8a80cfbaed9f6d727f98ed8c284943b3295e1 Author: Mark Reynolds <[email protected]> Date: Thu Mar 7 15:38:25 2019 -0500 Ticket 50260 - backend txn plugins can corrupt entry cache Bug Description: If a nested backend txn plugin fails, any updates it made that went into the entry cache still persist after the database transaction is aborted. Fix Description: In order to be sure the entry cache is not corrupted after a backend txn plugin failure we need to flush all the cache entries that were added to the cache after the parent operation was started. To do this we record the start time the original operation, (or parent operation), and we record the time any entry is added to the cache. Then on failure we do a comparision and remove the entry from the cache if it's not in use. If it is in use we add a "invalid" flag which triggers the entry to be removed when the cache entry is returned by the owner. https://pagure.io/389-ds-base/issue/50260 CI tested and ASAN approved. Reviewed by: firstyear, tbordaz, and lkrispen (Thanks!!!) diff --git a/dirsrvtests/tests/suites/betxns/betxn_test.py b/dirsrvtests/tests/suites/betxns/betxn_test.py index 3b814348d..2aadddef2 100644 --- a/dirsrvtests/tests/suites/betxns/betxn_test.py +++ b/dirsrvtests/tests/suites/betxns/betxn_test.py @@ -7,22 +7,23 @@ # --- END COPYRIGHT BLOCK --- # import pytest -import six import ldap from lib389.tasks import * from lib389.utils import * from lib389.topologies import topology_st - -from lib389.plugins import SevenBitCheckPlugin, AttributeUniquenessPlugin, MemberOfPlugin - +from lib389.plugins import (SevenBitCheckPlugin, AttributeUniquenessPlugin, + MemberOfPlugin, ManagedEntriesPlugin, + ReferentialIntegrityPlugin, MEPTemplates, + MEPConfigs) from lib389.idm.user import UserAccounts, TEST_USER_PROPERTIES -from lib389.idm.group import Groups - -from lib389._constants import DEFAULT_SUFFIX, PLUGIN_7_BIT_CHECK, PLUGIN_ATTR_UNIQUENESS, PLUGIN_MEMBER_OF +from lib389.idm.organizationalunit import OrganizationalUnits +from lib389.idm.group import Groups, Group +from lib389._constants import DEFAULT_SUFFIX logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) + def test_betxt_7bit(topology_st): """Test that the 7-bit plugin correctly rejects an invalid update @@ -52,7 +53,6 @@ def test_betxt_7bit(topology_st): sevenbc.enable() topology_st.standalone.restart() - users = UserAccounts(topology_st.standalone, basedn=DEFAULT_SUFFIX) user = users.create(properties=TEST_USER_PROPERTIES) @@ -69,7 +69,7 @@ def test_betxt_7bit(topology_st): user_check = users.get("testuser") - assert user_check.dn == user.dn + assert user_check.dn.lower() == user.dn.lower() # # Cleanup - remove the user @@ -100,9 +100,6 @@ def test_betxn_attr_uniqueness(topology_st): 5. Test user entry should be removed """ - USER1_DN = 'uid=test_entry1,' + DEFAULT_SUFFIX - USER2_DN = 'uid=test_entry2,' + DEFAULT_SUFFIX - attruniq = AttributeUniquenessPlugin(topology_st.standalone) attruniq.enable() topology_st.standalone.restart() @@ -110,26 +107,22 @@ def test_betxn_attr_uniqueness(topology_st): users = UserAccounts(topology_st.standalone, basedn=DEFAULT_SUFFIX) user1 = users.create(properties={ 'uid': 'testuser1', - 'cn' : 'testuser1', - 'sn' : 'user1', - 'uidNumber' : '1001', - 'gidNumber' : '2001', - 'homeDirectory' : '/home/testuser1' + 'cn': 'testuser1', + 'sn': 'user1', + 'uidNumber': '1001', + 'gidNumber': '2001', + 'homeDirectory': '/home/testuser1' }) - try: - user2 = users.create(properties={ + with pytest.raises(ldap.LDAPError): + users.create(properties={ 'uid': ['testuser2', 'testuser1'], - 'cn' : 'testuser2', - 'sn' : 'user2', - 'uidNumber' : '1002', - 'gidNumber' : '2002', - 'homeDirectory' : '/home/testuser2' + 'cn': 'testuser2', + 'sn': 'user2', + 'uidNumber': '1002', + 'gidNumber': '2002', + 'homeDirectory': '/home/testuser2' }) - log.fatal('test_betxn_attr_uniqueness: The second entry was incorrectly added.') - assert False - except ldap.LDAPError as e: - log.error('test_betxn_attr_uniqueness: Failed to add test user as expected:') user1.delete() @@ -191,8 +184,8 @@ def test_betxn_memberof(topology_st): log.info('test_betxn_memberof: PASSED') -def test_betxn_modrdn_memberof(topology_st): - """Test modrdn operartions and memberOf +def test_betxn_modrdn_memberof_cache_corruption(topology_st): + """Test modrdn operations and memberOf :id: 70d0b96e-b693-4bf7-bbf5-102a66ac5994 @@ -227,18 +220,18 @@ def test_betxn_modrdn_memberof(topology_st): # Create user and add it to group users = UserAccounts(topology_st.standalone, basedn=DEFAULT_SUFFIX) - user = users.create(properties=TEST_USER_PROPERTIES) + user = users.ensure_state(properties=TEST_USER_PROPERTIES) if not ds_is_older('1.3.7'): user.remove('objectClass', 'nsMemberOf') group.add_member(user.dn) # Attempt modrdn that should fail, but the original entry should stay in the cache - with pytest.raises(ldap.OBJECTCLASS_VIOLATION): + with pytest.raises(ldap.OBJECT_CLASS_VIOLATION): group.rename('cn=group_to_people', newsuperior=peoplebase) # Should fail, but not with NO_SUCH_OBJECT as the original entry should still be in the cache - with pytest.raises(ldap.OBJECTCLASS_VIOLATION): + with pytest.raises(ldap.OBJECT_CLASS_VIOLATION): group.rename('cn=group_to_people', newsuperior=peoplebase) # @@ -247,6 +240,108 @@ def test_betxn_modrdn_memberof(topology_st): log.info('test_betxn_modrdn_memberof: PASSED') +def test_ri_and_mep_cache_corruption(topology_st): + """Test RI plugin aborts change after MEP plugin fails. + This is really testing the entry cache for corruption + + :id: 70d0b96e-b693-4bf7-bbf5-102a66ac5995 + + :setup: Standalone instance + + :steps: 1. Enable and configure mep and ri plugins + 2. Add user and add it to a group + 3. Disable MEP plugin and remove MEP group + 4. Delete user + 5. Check that user is still a member of the group + + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. It fails with NO_SUCH_OBJECT + 5. Success + + """ + # Start plugins + topology_st.standalone.config.set('nsslapd-dynamic-plugins', 'on') + mep_plugin = ManagedEntriesPlugin(topology_st.standalone) + mep_plugin.enable() + ri_plugin = ReferentialIntegrityPlugin(topology_st.standalone) + ri_plugin.enable() + + # Add our org units + ous = OrganizationalUnits(topology_st.standalone, DEFAULT_SUFFIX) + ou_people = ous.create(properties={'ou': 'managed_people'}) + ou_groups = ous.create(properties={'ou': 'managed_groups'}) + + # Configure MEP + mep_templates = MEPTemplates(topology_st.standalone, DEFAULT_SUFFIX) + mep_template1 = mep_templates.create(properties={ + 'cn': 'MEP template', + 'mepRDNAttr': 'cn', + 'mepStaticAttr': 'objectclass: posixGroup|objectclass: extensibleObject'.split('|'), + 'mepMappedAttr': 'cn: $cn|uid: $cn|gidNumber: $uidNumber'.split('|') + }) + mep_configs = MEPConfigs(topology_st.standalone) + mep_configs.create(properties={'cn': 'config', + 'originScope': ou_people.dn, + 'originFilter': 'objectclass=posixAccount', + 'managedBase': ou_groups.dn, + 'managedTemplate': mep_template1.dn}) + + # Add an entry that meets the MEP scope + users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX, + rdn='ou={}'.format(ou_people.rdn)) + user = users.create(properties={ + 'uid': 'test-user1', + 'cn': 'test-user', + 'sn': 'test-user', + 'uidNumber': '10011', + 'gidNumber': '20011', + 'homeDirectory': '/home/test-user1' + }) + + # Add group + groups = Groups(topology_st.standalone, DEFAULT_SUFFIX) + user_group = groups.ensure_state(properties={'cn': 'group', 'member': user.dn}) + + # Check if a managed group entry was created + mep_group = Group(topology_st.standalone, dn='cn={},{}'.format(user.rdn, ou_groups.dn)) + if not mep_group.exists(): + log.fatal("MEP group was not created for the user") + assert False + + # Mess with MEP so it fails + mep_plugin.disable() + mep_group.delete() + mep_plugin.enable() + + # Add another group for verify entry cache is not corrupted + test_group = groups.create(properties={'cn': 'test_group'}) + + # Delete user, should fail, and user should still be a member + with pytest.raises(ldap.NO_SUCH_OBJECT): + user.delete() + + # Verify membership is intact + if not user_group.is_member(user.dn): + log.fatal("Member was incorrectly removed from the group!! Or so it seems") + + # Restart server and test again in case this was a cache issue + topology_st.standalone.restart() + if user_group.is_member(user.dn): + log.info("The entry cache was corrupted") + assert False + + assert False + + # Verify test group is still found in entry cache by deleting it + test_group.delete() + + # Success + log.info("Test PASSED") + + if __name__ == '__main__': # Run isolated # -s for DEBUG mode diff --git a/dirsrvtests/tests/suites/plugins/acceptance_test.py b/dirsrvtests/tests/suites/plugins/acceptance_test.py index 894c0ff25..f44b684ce 100644 --- a/dirsrvtests/tests/suites/plugins/acceptance_test.py +++ b/dirsrvtests/tests/suites/plugins/acceptance_test.py @@ -18,7 +18,7 @@ from lib389.utils import * from lib389.plugins import * from lib389._constants import * from lib389.dseldif import DSEldif -from lib389.idm.user import UserAccounts, UserAccount +from lib389.idm.user import UserAccounts from lib389.idm.group import Groups from lib389.idm.organizationalunit import OrganizationalUnits from lib389.idm.domain import Domain diff --git a/ldap/servers/slapd/back-ldbm/back-ldbm.h b/ldap/servers/slapd/back-ldbm/back-ldbm.h index 79115fe12..f690ad99a 100644 --- a/ldap/servers/slapd/back-ldbm/back-ldbm.h +++ b/ldap/servers/slapd/back-ldbm/back-ldbm.h @@ -312,48 +312,52 @@ typedef struct struct backcommon { - int ep_type; /* to distinguish backdn from backentry */ - struct backcommon *ep_lrunext; /* for the cache */ - struct backcommon *ep_lruprev; /* for the cache */ - ID ep_id; /* entry id */ - char ep_state; /* state in the cache */ -#define ENTRY_STATE_DELETED 0x1 /* entry is marked as deleted */ -#define ENTRY_STATE_CREATING 0x2 /* entry is being created; don't touch it */ -#define ENTRY_STATE_NOTINCACHE 0x4 /* cache_add failed; not in the cache */ - int ep_refcnt; /* entry reference cnt */ - size_t ep_size; /* for cache tracking */ + int32_t ep_type; /* to distinguish backdn from backentry */ + struct backcommon *ep_lrunext; /* for the cache */ + struct backcommon *ep_lruprev; /* for the cache */ + ID ep_id; /* entry id */ + uint8_t ep_state; /* state in the cache */ +#define ENTRY_STATE_DELETED 0x1 /* entry is marked as deleted */ +#define ENTRY_STATE_CREATING 0x2 /* entry is being created; don't touch it */ +#define ENTRY_STATE_NOTINCACHE 0x4 /* cache_add failed; not in the cache */ +#define ENTRY_STATE_INVALID 0x8 /* cache entry is invalid and needs to be removed */ + int32_t ep_refcnt; /* entry reference cnt */ + size_t ep_size; /* for cache tracking */ + struct timespec ep_create_time; /* the time the entry was added to the cache */ }; -/* From ep_type through ep_size MUST be identical to backcommon */ +/* From ep_type through ep_create_time MUST be identical to backcommon */ struct backentry { - int ep_type; /* to distinguish backdn from backentry */ - struct backcommon *ep_lrunext; /* for the cache */ - struct backcommon *ep_lruprev; /* for the cache */ - ID ep_id; /* entry id */ - char ep_state; /* state in the cache */ - int ep_refcnt; /* entry reference cnt */ - size_t ep_size; /* for cache tracking */ - Slapi_Entry *ep_entry; /* real entry */ + int32_t ep_type; /* to distinguish backdn from backentry */ + struct backcommon *ep_lrunext; /* for the cache */ + struct backcommon *ep_lruprev; /* for the cache */ + ID ep_id; /* entry id */ + uint8_t ep_state; /* state in the cache */ + int32_t ep_refcnt; /* entry reference cnt */ + size_t ep_size; /* for cache tracking */ + struct timespec ep_create_time; /* the time the entry was added to the cache */ + Slapi_Entry *ep_entry; /* real entry */ Slapi_Entry *ep_vlventry; - void *ep_dn_link; /* linkage for the 3 hash */ - void *ep_id_link; /* tables used for */ - void *ep_uuid_link; /* looking up entries */ - PRMonitor *ep_mutexp; /* protection for mods; make it reentrant */ + void *ep_dn_link; /* linkage for the 3 hash */ + void *ep_id_link; /* tables used for */ + void *ep_uuid_link; /* looking up entries */ + PRMonitor *ep_mutexp; /* protection for mods; make it reentrant */ }; -/* From ep_type through ep_size MUST be identical to backcommon */ +/* From ep_type through ep_create_time MUST be identical to backcommon */ struct backdn { - int ep_type; /* to distinguish backdn from backentry */ - struct backcommon *ep_lrunext; /* for the cache */ - struct backcommon *ep_lruprev; /* for the cache */ - ID ep_id; /* entry id */ - char ep_state; /* state in the cache; share ENTRY_STATE_* */ - int ep_refcnt; /* entry reference cnt */ - uint64_t ep_size; /* for cache tracking */ + int32_t ep_type; /* to distinguish backdn from backentry */ + struct backcommon *ep_lrunext; /* for the cache */ + struct backcommon *ep_lruprev; /* for the cache */ + ID ep_id; /* entry id */ + uint8_t ep_state; /* state in the cache; share ENTRY_STATE_* */ + int32_t ep_refcnt; /* entry reference cnt */ + uint64_t ep_size; /* for cache tracking */ + struct timespec ep_create_time; /* the time the entry was added to the cache */ Slapi_DN *dn_sdn; - void *dn_id_link; /* for hash table */ + void *dn_id_link; /* for hash table */ }; /* for the in-core cache of entries */ diff --git a/ldap/servers/slapd/back-ldbm/backentry.c b/ldap/servers/slapd/back-ldbm/backentry.c index f2fe780db..972842bcb 100644 --- a/ldap/servers/slapd/back-ldbm/backentry.c +++ b/ldap/servers/slapd/back-ldbm/backentry.c @@ -23,7 +23,8 @@ backentry_free(struct backentry **bep) return; } ep = *bep; - PR_ASSERT(ep->ep_state & (ENTRY_STATE_DELETED | ENTRY_STATE_NOTINCACHE)); + + PR_ASSERT(ep->ep_state & (ENTRY_STATE_DELETED | ENTRY_STATE_NOTINCACHE | ENTRY_STATE_INVALID)); if (ep->ep_entry != NULL) { slapi_entry_free(ep->ep_entry); } diff --git a/ldap/servers/slapd/back-ldbm/cache.c b/ldap/servers/slapd/back-ldbm/cache.c index a27505c4b..ba9d26f13 100644 --- a/ldap/servers/slapd/back-ldbm/cache.c +++ b/ldap/servers/slapd/back-ldbm/cache.c @@ -56,11 +56,14 @@ #define LOG(...) #endif -#define LRU_DETACH(cache, e) lru_detach((cache), (void *)(e)) +typedef enum { + ENTRY_CACHE, + DN_CACHE, +} CacheType; +#define LRU_DETACH(cache, e) lru_detach((cache), (void *)(e)) #define CACHE_LRU_HEAD(cache, type) ((type)((cache)->c_lruhead)) #define CACHE_LRU_TAIL(cache, type) ((type)((cache)->c_lrutail)) - #define BACK_LRU_NEXT(entry, type) ((type)((entry)->ep_lrunext)) #define BACK_LRU_PREV(entry, type) ((type)((entry)->ep_lruprev)) @@ -185,6 +188,7 @@ new_hash(u_long size, u_long offset, HashFn hfn, HashTestFn tfn) int add_hash(Hashtable *ht, void *key, uint32_t keylen, void *entry, void **alt) { + struct backcommon *back_entry = (struct backcommon *)entry; u_long val, slot; void *e; @@ -202,6 +206,7 @@ add_hash(Hashtable *ht, void *key, uint32_t keylen, void *entry, void **alt) e = HASH_NEXT(ht, e); } /* ok, it's not already there, so add it */ + back_entry->ep_create_time = slapi_current_rel_time_hr(); HASH_NEXT(ht, entry) = ht->slot[slot]; ht->slot[slot] = entry; return 1; @@ -492,6 +497,89 @@ cache_make_hashes(struct cache *cache, int type) } } +/* + * Helper function for flush_hash() to calculate if the entry should be + * removed from the cache. + */ +static int32_t +flush_remove_entry(struct timespec *entry_time, struct timespec *start_time) +{ + struct timespec diff; + + slapi_timespec_diff(entry_time, start_time, &diff); + if (diff.tv_sec >= 0) { + return 1; + } else { + return 0; + } +} + +/* + * Flush all the cache entries that were added after the "start time" + * This is called when a backend transaction plugin fails, and we need + * to remove all the possible invalid entries in the cache. + * + * If the ref count is 0, we can straight up remove it from the cache, but + * if the ref count is greater than 1, then the entry is currently in use. + * In the later case we set the entry state to ENTRY_STATE_INVALID, and + * when the owning thread cache_returns() the cache entry is automatically + * removed so another thread can not use/lock the invalid cache entry. + */ +static void +flush_hash(struct cache *cache, struct timespec *start_time, int32_t type) +{ + void *e, *laste = NULL; + Hashtable *ht = cache->c_idtable; + + cache_lock(cache); + + for (size_t i = 0; i < ht->size; i++) { + e = ht->slot[i]; + while (e) { + struct backcommon *entry = (struct backcommon *)e; + uint64_t remove_it = 0; + if (flush_remove_entry(&entry->ep_create_time, start_time)) { + /* Mark the entry to be removed */ + slapi_log_err(SLAPI_LOG_CACHE, "flush_hash", "[%s] Removing entry id (%d)\n", + type ? "DN CACHE" : "ENTRY CACHE", entry->ep_id); + remove_it = 1; + } + laste = e; + e = HASH_NEXT(ht, e); + + if (remove_it) { + /* since we have the cache lock we know we can trust refcnt */ + entry->ep_state |= ENTRY_STATE_INVALID; + if (entry->ep_refcnt == 0) { + entry->ep_refcnt++; + lru_delete(cache, laste); + if (type == ENTRY_CACHE) { + entrycache_remove_int(cache, laste); + entrycache_return(cache, (struct backentry **)&laste); + } else { + dncache_remove_int(cache, laste); + dncache_return(cache, (struct backdn **)&laste); + } + } else { + /* Entry flagged for removal */ + slapi_log_err(SLAPI_LOG_CACHE, "flush_hash", + "[%s] Flagging entry to be removed later: id (%d) refcnt: %d\n", + type ? "DN CACHE" : "ENTRY CACHE", entry->ep_id, entry->ep_refcnt); + } + } + } + } + + cache_unlock(cache); +} + +void +revert_cache(ldbm_instance *inst, struct timespec *start_time) +{ + flush_hash(&inst->inst_cache, start_time, ENTRY_CACHE); + flush_hash(&inst->inst_dncache, start_time, DN_CACHE); +} + /* initialize the cache */ int cache_init(struct cache *cache, uint64_t maxsize, int64_t maxentries, int type) @@ -1142,7 +1230,7 @@ entrycache_return(struct cache *cache, struct backentry **bep) } else { ASSERT(e->ep_refcnt > 0); if (!--e->ep_refcnt) { - if (e->ep_state & ENTRY_STATE_DELETED) { + if (e->ep_state & (ENTRY_STATE_DELETED | ENTRY_STATE_INVALID)) { const char *ndn = slapi_sdn_get_ndn(backentry_get_sdn(e)); if (ndn) { /* @@ -1154,6 +1242,13 @@ entrycache_return(struct cache *cache, struct backentry **bep) LOG("entrycache_return -Failed to remove %s from dn table\n", ndn); } } + if (e->ep_state & ENTRY_STATE_INVALID) { + /* Remove it from the hash table before we free the back entry */ + slapi_log_err(SLAPI_LOG_CACHE, "entrycache_return", + "Finally flushing invalid entry: %d (%s)\n", + e->ep_id, backentry_get_ndn(e)); + entrycache_remove_int(cache, e); + } backentry_free(bep); } else { lru_add(cache, e); @@ -1535,7 +1630,7 @@ cache_lock_entry(struct cache *cache, struct backentry *e) /* make sure entry hasn't been deleted now */ cache_lock(cache); - if (e->ep_state & (ENTRY_STATE_DELETED | ENTRY_STATE_NOTINCACHE)) { + if (e->ep_state & (ENTRY_STATE_DELETED | ENTRY_STATE_NOTINCACHE | ENTRY_STATE_INVALID)) { cache_unlock(cache); PR_ExitMonitor(e->ep_mutexp); LOG("<= cache_lock_entry (DELETED)\n"); @@ -1696,7 +1791,14 @@ dncache_return(struct cache *cache, struct backdn **bdn) } else { ASSERT((*bdn)->ep_refcnt > 0); if (!--(*bdn)->ep_refcnt) { - if ((*bdn)->ep_state & ENTRY_STATE_DELETED) { + if ((*bdn)->ep_state & (ENTRY_STATE_DELETED | ENTRY_STATE_INVALID)) { + if ((*bdn)->ep_state & ENTRY_STATE_INVALID) { + /* Remove it from the hash table before we free the back dn */ + slapi_log_err(SLAPI_LOG_CACHE, "dncache_return", + "Finally flushing invalid entry: %d (%s)\n", + (*bdn)->ep_id, slapi_sdn_get_dn((*bdn)->dn_sdn)); + dncache_remove_int(cache, (*bdn)); + } backdn_free(bdn); } else { lru_add(cache, (void *)*bdn); diff --git a/ldap/servers/slapd/back-ldbm/ldbm_add.c b/ldap/servers/slapd/back-ldbm/ldbm_add.c index f26911595..8c0439c5e 100644 --- a/ldap/servers/slapd/back-ldbm/ldbm_add.c +++ b/ldap/servers/slapd/back-ldbm/ldbm_add.c @@ -97,6 +97,8 @@ ldbm_back_add(Slapi_PBlock *pb) PRUint64 conn_id; int op_id; int result_sent = 0; + int32_t parent_op = 0; + struct timespec parent_time; if (slapi_pblock_get(pb, SLAPI_CONN_ID, &conn_id) < 0) { conn_id = 0; /* connection is NULL */ @@ -147,6 +149,13 @@ ldbm_back_add(Slapi_PBlock *pb) slapi_entry_delete_values(e, numsubordinates, NULL); dblayer_txn_init(li, &txn); + + if (txn.back_txn_txn == NULL) { + /* This is the parent operation, get the time */ + parent_op = 1; + parent_time = slapi_current_rel_time_hr(); + } + /* the calls to perform searches require the parent txn if any so set txn to the parent_txn until we begin the child transaction */ if (parent_txn) { @@ -1212,6 +1221,11 @@ ldbm_back_add(Slapi_PBlock *pb) slapi_pblock_set(pb, SLAPI_PLUGIN_OPRETURN, ldap_result_code ? &ldap_result_code : &retval); } slapi_pblock_get(pb, SLAPI_PB_RESULT_TEXT, &ldap_result_message); + + /* Revert the caches if this is the parent operation */ + if (parent_op) { + revert_cache(inst, &parent_time); + } goto error_return; } diff --git a/ldap/servers/slapd/back-ldbm/ldbm_delete.c b/ldap/servers/slapd/back-ldbm/ldbm_delete.c index 3a27fd071..98b3d82be 100644 --- a/ldap/servers/slapd/back-ldbm/ldbm_delete.c +++ b/ldap/servers/slapd/back-ldbm/ldbm_delete.c @@ -79,6 +79,8 @@ ldbm_back_delete(Slapi_PBlock *pb) ID tomb_ep_id = 0; int result_sent = 0; Connection *pb_conn; + int32_t parent_op = 0; + struct timespec parent_time; if (slapi_pblock_get(pb, SLAPI_CONN_ID, &conn_id) < 0) { conn_id = 0; /* connection is NULL */ @@ -100,6 +102,13 @@ ldbm_back_delete(Slapi_PBlock *pb) dblayer_txn_init(li, &txn); /* the calls to perform searches require the parent txn if any so set txn to the parent_txn until we begin the child transaction */ + + if (txn.back_txn_txn == NULL) { + /* This is the parent operation, get the time */ + parent_op = 1; + parent_time = slapi_current_rel_time_hr(); + } + if (parent_txn) { txn.back_txn_txn = parent_txn; } else { @@ -1270,6 +1279,11 @@ replace_entry: slapi_pblock_set(pb, SLAPI_PLUGIN_OPRETURN, &retval); } slapi_pblock_get(pb, SLAPI_PB_RESULT_TEXT, &ldap_result_message); + + /* Revert the caches if this is the parent operation */ + if (parent_op) { + revert_cache(inst, &parent_time); + } goto error_return; } if (parent_found) { diff --git a/ldap/servers/slapd/back-ldbm/ldbm_modify.c b/ldap/servers/slapd/back-ldbm/ldbm_modify.c index cc4319e5f..b90b3e0f0 100644 --- a/ldap/servers/slapd/back-ldbm/ldbm_modify.c +++ b/ldap/servers/slapd/back-ldbm/ldbm_modify.c @@ -412,6 +412,8 @@ ldbm_back_modify(Slapi_PBlock *pb) int fixup_tombstone = 0; int ec_locked = 0; int result_sent = 0; + int32_t parent_op = 0; + struct timespec parent_time; slapi_pblock_get(pb, SLAPI_BACKEND, &be); slapi_pblock_get(pb, SLAPI_PLUGIN_PRIVATE, &li); @@ -426,6 +428,13 @@ ldbm_back_modify(Slapi_PBlock *pb) dblayer_txn_init(li, &txn); /* must do this before first goto error_return */ /* the calls to perform searches require the parent txn if any so set txn to the parent_txn until we begin the child transaction */ + + if (txn.back_txn_txn == NULL) { + /* This is the parent operation, get the time */ + parent_op = 1; + parent_time = slapi_current_rel_time_hr(); + } + if (parent_txn) { txn.back_txn_txn = parent_txn; } else { @@ -864,6 +873,11 @@ ldbm_back_modify(Slapi_PBlock *pb) slapi_pblock_set(pb, SLAPI_PLUGIN_OPRETURN, ldap_result_code ? &ldap_result_code : &retval); } slapi_pblock_get(pb, SLAPI_PB_RESULT_TEXT, &ldap_result_message); + + /* Revert the caches if this is the parent operation */ + if (parent_op) { + revert_cache(inst, &parent_time); + } goto error_return; } retval = plugin_call_mmr_plugin_postop(pb, NULL,SLAPI_PLUGIN_BE_TXN_POST_MODIFY_FN); diff --git a/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c b/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c index e4d0337d4..73e50ebcc 100644 --- a/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c +++ b/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c @@ -97,6 +97,8 @@ ldbm_back_modrdn(Slapi_PBlock *pb) int op_id; int result_sent = 0; Connection *pb_conn = NULL; + int32_t parent_op = 0; + struct timespec parent_time; if (slapi_pblock_get(pb, SLAPI_CONN_ID, &conn_id) < 0) { conn_id = 0; /* connection is NULL */ @@ -134,6 +136,13 @@ ldbm_back_modrdn(Slapi_PBlock *pb) /* dblayer_txn_init needs to be called before "goto error_return" */ dblayer_txn_init(li, &txn); + + if (txn.back_txn_txn == NULL) { + /* This is the parent operation, get the time */ + parent_op = 1; + parent_time = slapi_current_rel_time_hr(); + } + /* the calls to perform searches require the parent txn if any so set txn to the parent_txn until we begin the child transaction */ if (parent_txn) { @@ -1208,6 +1217,11 @@ ldbm_back_modrdn(Slapi_PBlock *pb) slapi_pblock_set(pb, SLAPI_PLUGIN_OPRETURN, ldap_result_code ? &ldap_result_code : &retval); } slapi_pblock_get(pb, SLAPI_PB_RESULT_TEXT, &ldap_result_message); + + /* Revert the caches if this is the parent operation */ + if (parent_op) { + revert_cache(inst, &parent_time); + } goto error_return; } retval = plugin_call_mmr_plugin_postop(pb, NULL,SLAPI_PLUGIN_BE_TXN_POST_MODRDN_FN); @@ -1353,8 +1367,13 @@ error_return: slapi_pblock_set(pb, SLAPI_PLUGIN_OPRETURN, ldap_result_code ? &ldap_result_code : &retval); } slapi_pblock_get(pb, SLAPI_PB_RESULT_TEXT, &ldap_result_message); + + /* Revert the caches if this is the parent operation */ + if (parent_op) { + revert_cache(inst, &parent_time); + } } - retval = plugin_call_mmr_plugin_postop(pb, NULL,SLAPI_PLUGIN_BE_TXN_POST_MODRDN_FN); + retval = plugin_call_mmr_plugin_postop(pb, NULL,SLAPI_PLUGIN_BE_TXN_POST_MODRDN_FN); /* Release SERIAL LOCK */ dblayer_txn_abort(be, &txn); /* abort crashes in case disk full */ @@ -1411,17 +1430,6 @@ common_return: "operation failed, the target entry is cleared from dncache (%s)\n", slapi_entry_get_dn(ec->ep_entry)); CACHE_REMOVE(&inst->inst_dncache, bdn); CACHE_RETURN(&inst->inst_dncache, &bdn); - /* - * If the new/invalid entry (ec) is in the cache, that means we need to - * swap it out with the original entry (e) --> to undo the swap that - * modrdn_rename_entry_update_indexes() did. - */ - if (cache_is_in_cache(&inst->inst_cache, ec)) { - if (cache_replace(&inst->inst_cache, ec, e) != 0) { - slapi_log_err(SLAPI_LOG_ALERT, "ldbm_back_modrdn", - "failed to replace cache entry after error\n"); - } - } } if (ec && inst) { diff --git a/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h b/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h index 5749e2676..00d4aea7c 100644 --- a/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h +++ b/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h @@ -55,6 +55,7 @@ void cache_unlock_entry(struct cache *cache, struct backentry *e); int cache_replace(struct cache *cache, void *oldptr, void *newptr); int cache_has_otherref(struct cache *cache, void *bep); int cache_is_in_cache(struct cache *cache, void *ptr); +void revert_cache(ldbm_instance *inst, struct timespec *start_time); #ifdef CACHE_DEBUG void check_entry_cache(struct cache *cache, struct backentry *e); diff --git a/ldap/servers/slapd/slapi-plugin.h b/ldap/servers/slapd/slapi-plugin.h index 4bf226882..9135a12c4 100644 --- a/ldap/servers/slapd/slapi-plugin.h +++ b/ldap/servers/slapd/slapi-plugin.h @@ -6747,6 +6747,12 @@ time_t slapi_current_time(void) __attribute__((deprecated)); * \return timespec of the current relative system time. */ struct timespec slapi_current_time_hr(void); +/** + * Returns the current system time as a hr clock + * + * \return timespec of the current monotonic time. + */ +struct timespec slapi_current_rel_time_hr(void); /** * Returns the current system time as a hr clock in UTC timezone. * This clock adjusts with ntp steps, and should NOT be
0
83a7705b432a55a648b07acb331d3d94afa4b3d7
389ds/389-ds-base
Ticket 49007 - Update DS basic test to better work with systemd. Bug Description: The basic test used to assume certain systemd paths. Fix Description: Because lib389 has picked up support for systemctl, we can now call upon this in our tests. https://fedorahosted.org/389/ticket/49007 Author: wibrown Review by: mreynolds (Thanks!)
commit 83a7705b432a55a648b07acb331d3d94afa4b3d7 Author: William Brown <[email protected]> Date: Wed Oct 12 16:51:17 2016 +1000 Ticket 49007 - Update DS basic test to better work with systemd. Bug Description: The basic test used to assume certain systemd paths. Fix Description: Because lib389 has picked up support for systemctl, we can now call upon this in our tests. https://fedorahosted.org/389/ticket/49007 Author: wibrown Review by: mreynolds (Thanks!) diff --git a/dirsrvtests/tests/suites/basic/basic_test.py b/dirsrvtests/tests/suites/basic/basic_test.py index 8b0ad9b53..1033fd6a3 100644 --- a/dirsrvtests/tests/suites/basic/basic_test.py +++ b/dirsrvtests/tests/suites/basic/basic_test.py @@ -595,43 +595,26 @@ def test_basic_referrals(topology, import_example_ldif): def test_basic_systemctl(topology, import_example_ldif): - """Test systemctl can stop and start the server. Also test that start reports an + """Test systemctl/lib389 can stop and start the server. Also test that start reports an error when the instance does not start. Only for RPM builds """ log.info('Running test_basic_systemctl...') - # We can only use systemctl on RPM installations - if topology.standalone.prefix and topology.standalone.prefix != '/': - return - - data_dir = topology.standalone.getDir(__file__, DATA_DIR) - tmp_dir = '/tmp' - config_dir = topology.standalone.confdir - start_ds = 'sudo systemctl start dirsrv@' + topology.standalone.serverid + '.service' - stop_ds = 'sudo systemctl stop dirsrv@' + topology.standalone.serverid + '.service' - is_running = 'sudo systemctl is-active dirsrv@' + topology.standalone.serverid + '.service' + config_dir = topology.standalone.get_config_dir() # # Stop the server # log.info('Stopping the server...') - rc = os.system(stop_ds) - log.info('Check the status...') - if rc != 0 or os.system(is_running) == 0: - log.fatal('test_basic_systemctl: Failed to stop the server') - assert False + topology.standalone.stop() log.info('Stopped the server.') # # Start the server # log.info('Starting the server...') - rc = os.system(start_ds) - log.info('Check the status...') - if rc != 0 or os.system(is_running) != 0: - log.fatal('test_basic_systemctl: Failed to start the server') - assert False + topology.standalone.start() log.info('Started the server.') # @@ -639,22 +622,21 @@ def test_basic_systemctl(topology, import_example_ldif): # and verify that systemctl detects the failed start # log.info('Stopping the server...') - rc = os.system(stop_ds) - log.info('Check the status...') - if rc != 0 or os.system(is_running) == 0: - log.fatal('test_basic_systemctl: Failed to stop the server') - assert False + topology.standalone.stop() log.info('Stopped the server before breaking the dse.ldif.') - shutil.copy(config_dir + '/dse.ldif', tmp_dir) - shutil.copy(data_dir + 'basic/dse.ldif.broken', config_dir + '/dse.ldif') + shutil.copy(config_dir + '/dse.ldif', config_dir + '/dse.ldif.correct' ) + open(config_dir + '/dse.ldif', 'w').close() + # We need to kill the .bak file too, DS is just too smart! + open(config_dir + '/dse.ldif.bak', 'w').close() log.info('Attempting to start the server with broken dse.ldif...') - rc = os.system(start_ds) + try: + topology.standalone.start() + except: + log.info('Server failed to start as expected') log.info('Check the status...') - if rc == 0 or os.system(is_running) == 0: - log.fatal('test_basic_systemctl: The server incorrectly started') - assert False + assert(not topology.standalone.status()) log.info('Server failed to start as expected') time.sleep(5) @@ -662,16 +644,12 @@ def test_basic_systemctl(topology, import_example_ldif): # Fix the dse.ldif, and make sure the server starts up, # and systemctl correctly identifies the successful start # - shutil.copy(tmp_dir + '/dse.ldif', config_dir) + shutil.copy(config_dir + '/dse.ldif.correct', config_dir + '/dse.ldif' ) log.info('Starting the server with good dse.ldif...') - rc = os.system(start_ds) - time.sleep(5) + topology.standalone.start() log.info('Check the status...') - if rc != 0 or os.system(is_running) != 0: - log.fatal('test_basic_systemctl: Failed to start the server') - assert False + assert(topology.standalone.status()) log.info('Server started after fixing dse.ldif.') - time.sleep(1) log.info('test_basic_systemctl: PASSED') diff --git a/ldap/admin/src/defaults.inf.in b/ldap/admin/src/defaults.inf.in index 80d159628..c0469b349 100644 --- a/ldap/admin/src/defaults.inf.in +++ b/ldap/admin/src/defaults.inf.in @@ -46,6 +46,9 @@ cert_dir = @instconfigdir@/slapd-{instance_name} lock_dir = @localstatedir@/lock/dirsrv/slapd-{instance_name} log_dir = @localstatedir@/log/dirsrv/slapd-{instance_name} +access_log = @localstatedir@/log/dirsrv/slapd-{instance_name}/access +audit_log = @localstatedir@/log/dirsrv/slapd-{instance_name}/audit +error_log = @localstatedir@/log/dirsrv/slapd-{instance_name}/error inst_dir = @localstatedir@/lib/dirsrv/slapd-{instance_name} db_dir = @localstatedir@/lib/dirsrv/slapd-{instance_name}/db backup_dir = @localstatedir@/lib/dirsrv/slapd-{instance_name}/bak
0
07bd20fa32a062915991aed01879a7ac52e336f3
389ds/389-ds-base
Issue 6534 - CI fails with Fedora 41 and DNF5 Bug Description: DNF5 no longer supports verbose option. Fix Description: Remove `-v` flag. Fixes: https://github.com/389ds/389-ds-base/issues/6534 Reviewed by: @progier389 (Thanks!)
commit 07bd20fa32a062915991aed01879a7ac52e336f3 Author: Viktor Ashirov <[email protected]> Date: Tue May 27 07:21:44 2025 +0200 Issue 6534 - CI fails with Fedora 41 and DNF5 Bug Description: DNF5 no longer supports verbose option. Fix Description: Remove `-v` flag. Fixes: https://github.com/389ds/389-ds-base/issues/6534 Reviewed by: @progier389 (Thanks!) diff --git a/.github/workflows/lmdbpytest.yml b/.github/workflows/lmdbpytest.yml index dae65e223..21a55b7bb 100644 --- a/.github/workflows/lmdbpytest.yml +++ b/.github/workflows/lmdbpytest.yml @@ -94,7 +94,7 @@ jobs: do echo "Waiting for container to be ready..." done - sudo docker exec $CID sh -c "dnf install -y -v dist/rpms/*rpm" + sudo docker exec $CID sh -c "dnf install -y dist/rpms/*rpm" export PASSWD=$(openssl rand -base64 32) sudo docker exec $CID sh -c "echo \"${PASSWD}\" | passwd --stdin root" sudo docker exec $CID sh -c "systemctl start dbus.service" diff --git a/.github/workflows/pytest.yml b/.github/workflows/pytest.yml index 76a1eb4b0..d8434dfbb 100644 --- a/.github/workflows/pytest.yml +++ b/.github/workflows/pytest.yml @@ -94,7 +94,7 @@ jobs: do echo "Waiting for container to be ready..." done - sudo docker exec $CID sh -c "dnf install -y -v dist/rpms/*rpm" + sudo docker exec $CID sh -c "dnf install -y dist/rpms/*rpm" export PASSWD=$(openssl rand -base64 32) sudo docker exec $CID sh -c "echo \"${PASSWD}\" | passwd --stdin root" sudo docker exec $CID sh -c "systemctl start dbus.service"
0
60cb52040704686d9541a2e2eb2765d86cb10af2
389ds/389-ds-base
Ticket 49840 - ds-replcheck command returns traceback errors against ldif files having garbage content when run in offline mode Description: Added a basic check to see if the LDIF files are actually LDIF files. Also added checks that the database RUV are present as well. https://pagure.io/389-ds-base/issue/49840 Reviewed by: spichugi(Thanks!)
commit 60cb52040704686d9541a2e2eb2765d86cb10af2 Author: Mark Reynolds <[email protected]> Date: Mon Jul 9 15:50:09 2018 -0400 Ticket 49840 - ds-replcheck command returns traceback errors against ldif files having garbage content when run in offline mode Description: Added a basic check to see if the LDIF files are actually LDIF files. Also added checks that the database RUV are present as well. https://pagure.io/389-ds-base/issue/49840 Reviewed by: spichugi(Thanks!) diff --git a/ldap/admin/src/scripts/ds-replcheck b/ldap/admin/src/scripts/ds-replcheck index f94a6cb27..3b96cb69e 100755 --- a/ldap/admin/src/scripts/ds-replcheck +++ b/ldap/admin/src/scripts/ds-replcheck @@ -10,18 +10,19 @@ # import os +import sys import re import time import ldap import ldapurl import argparse import getpass - +from ldif import LDIFRecordList from ldap.ldapobject import SimpleLDAPObject from ldap.cidict import cidict from ldap.controls import SimplePagedResultsControl -VERSION = "1.3" +VERSION = "1.4" RUV_FILTER = '(&(nsuniqueid=ffffffff-ffffffff-ffffffff-ffffffff)(objectclass=nstombstone))' LDAP = 'ldap' LDAPS = 'ldaps' @@ -394,14 +395,17 @@ def ldif_search(LDIF, dn): return result -def get_dns(LDIF, opts): +def get_dns(LDIF, filename, opts): ''' Get all the DN's from an LDIF file ''' dns = [] found = False + found_ruv = False + LDIF.seek(0) for line in LDIF: if line.startswith('dn: ') and line[4:].startswith('nsuniqueid=ffffffff-ffffffff-ffffffff-ffffffff'): opts['ruv_dn'] = line[4:].lower().strip() + found_ruv = True elif line.startswith('dn: '): found = True dn = line[4:].lower().strip() @@ -415,6 +419,14 @@ def get_dns(LDIF, opts): found = False dns.append(dn) + if not found_ruv: + print('Failed to find the database RUV in the LDIF file: ' + filename + ', the LDIF ' + + 'file must contain replication state information.') + dns = None + else: + # All good, reset cursor + LDIF.seek(0) + return dns @@ -423,6 +435,7 @@ def get_ldif_ruv(LDIF, opts): ''' LDIF.seek(0) result = ldif_search(LDIF, opts['ruv_dn']) + LDIF.seek(0) # Reset cursor return result['entry'].data['nsds50ruv'] @@ -557,6 +570,7 @@ def do_offline_report(opts, output_file=None): rconflicts = [] rtombstones = 0 mtombstones = 0 + idx = 0 # Open LDIF files try: @@ -569,12 +583,36 @@ def do_offline_report(opts, output_file=None): RLDIF = open(opts['rldif'], "r") except Exception as e: print('Failed to open Replica LDIF: ' + str(e)) + MLDIF.close() + return None + + # Verify LDIF Files + try: + print("Validating Master ldif file ({})...".format(opts['mldif'])) + LDIFRecordList(MLDIF).parse() + except ValueError: + print('Master LDIF file in invalid, aborting...') + MLDIF.close() + RLDIF.close() + return None + try: + print("Validating Replica ldif file ({})...".format(opts['rldif'])) + LDIFRecordList(RLDIF).parse() + except ValueError: + print('Replica LDIF file is invalid, aborting...') + MLDIF.close() + RLDIF.close() return None # Get all the dn's, and entry counts print ("Gathering all the DN's...") - master_dns = get_dns(MLDIF, opts) - replica_dns = get_dns(RLDIF, opts) + master_dns = get_dns(MLDIF, opts['mldif'], opts) + replica_dns = get_dns(RLDIF, opts['rldif'], opts) + if master_dns is None or replica_dns is None: + print("Aborting scan...") + MLDIF.close() + RLDIF.close() + sys.exit(1) m_count = len(master_dns) r_count = len(replica_dns) @@ -583,11 +621,6 @@ def do_offline_report(opts, output_file=None): opts['master_ruv'] = get_ldif_ruv(MLDIF, opts) opts['replica_ruv'] = get_ldif_ruv(RLDIF, opts) - # Reset the cursors - idx = 0 - MLDIF.seek(idx) - RLDIF.seek(idx) - """ Compare the master entries with the replica's. Take our list of dn's from the master ldif and get that entry( dn) from the master and replica ldif. In this phase we keep keep track of conflict/tombstone counts, and we check for
0
3cda974c123bb5c3ee58cfc2b737ba9aaaae24f8
389ds/389-ds-base
Issue 5415 - Hostname when set to localhost causing failures in other tests Description: When the hostname is set to localhost it is causing failures in other test suites like replication Fixes: https://github.com/389ds/389-ds-base/issues/5415 Reviewed by: @bsimonova, @tbordaz (Thanks!)
commit 3cda974c123bb5c3ee58cfc2b737ba9aaaae24f8 Author: Akshay Adhikari <[email protected]> Date: Thu Aug 11 19:00:41 2022 +0530 Issue 5415 - Hostname when set to localhost causing failures in other tests Description: When the hostname is set to localhost it is causing failures in other test suites like replication Fixes: https://github.com/389ds/389-ds-base/issues/5415 Reviewed by: @bsimonova, @tbordaz (Thanks!) diff --git a/dirsrvtests/tests/suites/acl/keywords_part2_test.py b/dirsrvtests/tests/suites/acl/keywords_part2_test.py index 5b27024d4..ed838a34b 100644 --- a/dirsrvtests/tests/suites/acl/keywords_part2_test.py +++ b/dirsrvtests/tests/suites/acl/keywords_part2_test.py @@ -20,6 +20,7 @@ from lib389._constants import DEFAULT_SUFFIX, PW_DM from lib389.idm.domain import Domain from lib389.idm.organizationalunit import OrganizationalUnit from lib389.idm.user import UserAccount +from lib389.utils import * pytestmark = pytest.mark.tier1 @@ -39,7 +40,7 @@ NIGHTWORKER_KEY = "uid=NIGHTWORKER_KEY,{}".format(TIMEOFDAY_OU_KEY) NOWORKER_KEY = "uid=NOWORKER_KEY,{}".format(TIMEOFDAY_OU_KEY) -def test_access_from_certain_network_only_ip(topo, add_user, aci_of_user): +def test_access_from_certain_network_only_ip(topo, add_user, aci_of_user, request): """ User can access the data when connecting from certain network only as per the ACI. @@ -95,8 +96,14 @@ def test_access_from_certain_network_only_ip(topo, add_user, aci_of_user): with pytest.raises(ldap.INSUFFICIENT_ACCESS): org.replace("seeAlso", "cn=1") + def fin(): + log.info('Setting the hostname back to orginal') + socket.sethostname(old_hostname) -def test_connection_from_an_unauthorized_network(topo, add_user, aci_of_user): + request.addfinalizer(fin) + + +def test_connection_from_an_unauthorized_network(topo, add_user, aci_of_user, request): """ User cannot access the data when connectin from an unauthorized network as per the ACI. @@ -145,6 +152,12 @@ def test_connection_from_an_unauthorized_network(topo, add_user, aci_of_user): # now user can access data org.replace("seeAlso", "cn=1") + def fin(): + log.info('Setting the hostname back to orginal') + socket.sethostname(old_hostname) + + request.addfinalizer(fin) + def test_ip_keyword_test_noip_cannot(topo, add_user, aci_of_user): """ diff --git a/dirsrvtests/tests/suites/paged_results/paged_results_test.py b/dirsrvtests/tests/suites/paged_results/paged_results_test.py index 672f7191c..d490c4af2 100644 --- a/dirsrvtests/tests/suites/paged_results/paged_results_test.py +++ b/dirsrvtests/tests/suites/paged_results/paged_results_test.py @@ -72,6 +72,7 @@ def create_user(topology_st, request): def fin(): log.info('Deleting user simplepaged_test') user.delete() + socket.sethostname(OLD_HOSTNAME) request.addfinalizer(fin)
0
a94ae27a5fa2c7fad2c67e6fd89ea9563b535715
389ds/389-ds-base
Issue 5425 - CLI - add confirmation arg when deleting backend Description: Add "--do-it" CLI argument when deleting a backend and its subsuffixes fixes: https://github.com/389ds/389-ds-base/issues/5425 Reviewed by: tbordaz & progier(Thanks!!)
commit a94ae27a5fa2c7fad2c67e6fd89ea9563b535715 Author: Mark Reynolds <[email protected]> Date: Fri Dec 16 09:47:25 2022 -0500 Issue 5425 - CLI - add confirmation arg when deleting backend Description: Add "--do-it" CLI argument when deleting a backend and its subsuffixes fixes: https://github.com/389ds/389-ds-base/issues/5425 Reviewed by: tbordaz & progier(Thanks!!) diff --git a/src/cockpit/389-console/src/lib/database/suffix.jsx b/src/cockpit/389-console/src/lib/database/suffix.jsx index 11133f1a7..88a0e5564 100644 --- a/src/cockpit/389-console/src/lib/database/suffix.jsx +++ b/src/cockpit/389-console/src/lib/database/suffix.jsx @@ -742,7 +742,7 @@ export class Suffix extends React.Component { }) const cmd = [ "dsconf", "-j", "ldapi://%2fvar%2frun%2fslapd-" + this.props.serverId + ".socket", - "backend", "delete", this.props.suffix + "backend", "delete", this.props.suffix, "--do-it" ]; log_cmd("doDelete", "Delete database", cmd); cockpit diff --git a/src/lib389/lib389/cli_conf/backend.py b/src/lib389/lib389/cli_conf/backend.py index d32e3327c..07e3df246 100644 --- a/src/lib389/lib389/cli_conf/backend.py +++ b/src/lib389/lib389/cli_conf/backend.py @@ -236,14 +236,17 @@ def backend_delete(inst, basedn, log, args, warn=True): dn = _search_backend_dn(inst, args.be_name) if dn is None: raise ValueError("Unable to find a backend with the name: ({})".format(args.be_name)) - if warn and args.json is False: - _warn(dn, msg="Deleting %s %s" % (SINGULAR.__name__, dn)) + if not args.ack: + log.info("""Not removing backend: if you are really sure add: --do-it""") + else: + if warn and args.json is False: + _warn(dn, msg="Deleting %s %s" % (SINGULAR.__name__, dn)) - be = _get_backend(inst, args.be_name) - _recursively_del_backends(be) - be.delete() + be = _get_backend(inst, args.be_name) + _recursively_del_backends(be) + be.delete() - log.info("The database, and any sub-suffixes, were sucessfully deleted") + log.info("The database, and any sub-suffixes, were successfully deleted") def backend_import(inst, basedn, log, args): @@ -1152,6 +1155,9 @@ def create_parser(subparsers): delete_parser = subcommands.add_parser('delete', help='Delete a backend database') delete_parser.set_defaults(func=backend_delete) delete_parser.add_argument('be_name', help='The backend name or suffix') + delete_parser.add_argument('--do-it', dest="ack", + help="Remove backend and its subsuffixes", + action='store_true', default=False) ####################################################### # Get Suffix Tree (for use in web console)
0
26f78c1b3992684982845a4d483368e73bc90743
389ds/389-ds-base
Ticket 49325 - fix rust linking. Bug Description: An issue with the rpm was found that the way the static .a was built would cause a missing library error when the rpm was distributed. Fix Description: Because both cargo and automake are opinionated and stubborn, this leaves the option as have cargo generate the .so but write manual rules for install and linking. This has been tested now with non-prefix, prefix, rpm build, copr. https://pagure.io/389-ds-base/issue/49325 Author: wibrown Review by: mreynolds (Thanks!)
commit 26f78c1b3992684982845a4d483368e73bc90743 Author: William Brown <[email protected]> Date: Tue Nov 21 11:23:05 2017 +0100 Ticket 49325 - fix rust linking. Bug Description: An issue with the rpm was found that the way the static .a was built would cause a missing library error when the rpm was distributed. Fix Description: Because both cargo and automake are opinionated and stubborn, this leaves the option as have cargo generate the .so but write manual rules for install and linking. This has been tested now with non-prefix, prefix, rpm build, copr. https://pagure.io/389-ds-base/issue/49325 Author: wibrown Review by: mreynolds (Thanks!) diff --git a/Makefile.am b/Makefile.am index 8df31e595..3ac5584c8 100644 --- a/Makefile.am +++ b/Makefile.am @@ -314,11 +314,7 @@ bin_PROGRAMS = dbscan \ # based on defines # ---------------------------------------------------------------------------------------- -server_LTLIBRARIES = -if RUST_ENABLE -server_LTLIBRARIES += librsds.la -endif -server_LTLIBRARIES += libsds.la libnunc-stans.la libldaputil.la libslapd.la libns-dshttpd.la +server_LTLIBRARIES = libsds.la libnunc-stans.la libldaputil.la libslapd.la libns-dshttpd.la # this is how to add optional plugins @@ -1101,25 +1097,34 @@ libsds_la_CPPFLAGS = $(AM_CPPFLAGS) $(SDS_CPPFLAGS) libsds_la_LDFLAGS = $(AM_LDFLAGS) $(SDS_LDFLAGS) if RUST_ENABLE -libsds_la_LIBADD = librsds.la - -librsdspatha = $(abs_top_builddir)/rs/@rust_target_dir@/librsds.a -librsdspatho = $(abs_top_builddir)/rs/@rust_target_dir@/librsds.o -# Remember, these emit to cargo_target_dir/<at>rust_target_dir<at>/emit target -$(librsdspatha): Makefile src/libsds/Cargo.toml src/libsds/sds/lib.rs src/libsds/sds/tqueue.rs - CARGO_TARGET_DIR=$(abs_top_builddir)/rs cargo rustc $(CARGO_FLAGS) --verbose --manifest-path=$(srcdir)/src/libsds/Cargo.toml -- $(RUSTC_FLAGS) --emit link=$(librsdspatha) +### Why does this exist? +# +# Both cargo and autotools are really opinionated. You can't generate the correct +# outputs from cargo/rust for automake to use. But by the same token, you can't +# convince automake to use the outputs we *do* have. So instead, we manually +# create and install the .so instead. +# +# This acts like .PHONY for some reason ... -$(librsdspatho): Makefile src/libsds/Cargo.toml src/libsds/sds/lib.rs src/libsds/sds/tqueue.rs - CARGO_TARGET_DIR=$(abs_top_builddir)/rs cargo rustc $(CARGO_FLAGS) --verbose --manifest-path=$(srcdir)/src/libsds/Cargo.toml -- $(RUSTC_FLAGS) --emit obj=$(librsdspatho) +libsds_la_LDFLAGS += -L$(abs_builddir)/.libs -lrsds +libsds_la_DEPENDENCIES = librsds.so -am_librsds_la_OBJECTS = $(librsdspatho) -librsds_la_LIBADD = $(librsdspatha) -librsds_la_SOURCES = +librsds.so: src/libsds/Cargo.toml src/libsds/sds/lib.rs src/libsds/sds/tqueue.rs + CARGO_TARGET_DIR=$(abs_top_builddir)/rs \ + cargo rustc $(CARGO_FLAGS) --verbose --manifest-path=$(srcdir)/src/libsds/Cargo.toml \ + -- $(RUSTC_FLAGS) + mkdir -p $(abs_builddir)/.libs + cp $(abs_top_builddir)/rs/@rust_target_dir@/librsds.so $(abs_builddir)/.libs/librsds.so dist_noinst_DATA += $(srcdir)/src/libsds/Cargo.toml \ $(srcdir)/src/libsds/sds/*.rs +# echo $(serverdir) +install-data-local: + $(MKDIR_P) $(DESTDIR)$(serverdir) + $(INSTALL) -c -m 755 $(abs_builddir)/.libs/librsds.so $(DESTDIR)$(serverdir)/librsds.so + else # Just build the tqueue in C. libsds_la_SOURCES += \ diff --git a/rpm/389-ds-base.spec.in b/rpm/389-ds-base.spec.in index c79d4d086..19c4f146c 100644 --- a/rpm/389-ds-base.spec.in +++ b/rpm/389-ds-base.spec.in @@ -560,9 +560,6 @@ fi %{_libdir}/%{pkgname}/libnunc-stans.so %{_libdir}/%{pkgname}/libsds.so %{_libdir}/%{pkgname}/libldaputil.so -%if %{use_rust} -%{_libdir}/%{pkgname}/librsds.so -%endif %{_libdir}/pkgconfig/* %files libs @@ -575,7 +572,7 @@ fi %{_libdir}/%{pkgname}/libsds.so.* %{_libdir}/%{pkgname}/libldaputil.so.* %if %{use_rust} -%{_libdir}/%{pkgname}/librsds.so.* +%{_libdir}/%{pkgname}/librsds.so %endif %files snmp diff --git a/src/libsds/Cargo.toml b/src/libsds/Cargo.toml index 594326c7b..2c57e6664 100644 --- a/src/libsds/Cargo.toml +++ b/src/libsds/Cargo.toml @@ -8,7 +8,7 @@ authors = ["William Brown <[email protected]>"] [lib] path = "sds/lib.rs" name = "rsds" -crate-type = ["dylib"] +crate-type = ["cdylib"] [profile.release] panic = "abort"
0
a8bc418842a31fedb9a52fdec423a56736c8d371
389ds/389-ds-base
Resolves: 437900 Summary: Add AUXILIARY keyword to domainRelatedObject and simpleSecurityObject definitions.
commit a8bc418842a31fedb9a52fdec423a56736c8d371 Author: Nathan Kinder <[email protected]> Date: Mon Jan 12 23:49:44 2009 +0000 Resolves: 437900 Summary: Add AUXILIARY keyword to domainRelatedObject and simpleSecurityObject definitions. diff --git a/ldap/schema/28pilot.ldif b/ldap/schema/28pilot.ldif index ee90fc9f0..6a5db43c4 100644 --- a/ldap/schema/28pilot.ldif +++ b/ldap/schema/28pilot.ldif @@ -88,7 +88,7 @@ objectClasses: ( 0.9.2342.19200300.100.4.5 NAME 'account' DESC 'Standard LDAP ob objectClasses: ( 0.9.2342.19200300.100.4.6 NAME 'document' DESC 'Standard LDAP objectclass' SUP pilotObject MUST ( documentIdentifier ) MAY ( abstract $ authorCN $ authorSN $ cn $ description $ documentAuthor $ documentLocation $ documentPublisher $ documentStore $ documentTitle $ documentVersion $ keywords $ l $ o $ obsoletedByDocument $ obsoletesDocument $ ou $ seeAlso $ subject $ updatedByDocument $ updatesDocument ) X-ORIGIN 'RFC 1274' ) objectClasses: ( 0.9.2342.19200300.100.4.7 NAME 'room' DESC 'Standard LDAP objectclass' SUP top MUST ( cn ) MAY ( description $ roomNumber $ seeAlso $ telephoneNumber ) X-ORIGIN 'RFC 1274' ) objectClasses: ( 0.9.2342.19200300.100.4.9 NAME 'documentSeries' DESC 'Standard LDAP objectclass' SUP top MUST ( cn ) MAY ( description $ l $ o $ ou $ seeAlso $ telephoneNumber ) X-ORIGIN 'RFC 1274' ) -objectClasses: ( 0.9.2342.19200300.100.4.17 NAME 'domainRelatedObject' DESC 'Standard LDAP objectclass' SUP top MUST ( associatedDomain ) X-ORIGIN 'RFC 1274' ) +objectClasses: ( 0.9.2342.19200300.100.4.17 NAME 'domainRelatedObject' DESC 'Standard LDAP objectclass' SUP top AUXILIARY MUST ( associatedDomain ) X-ORIGIN 'RFC 1274' ) objectClasses: ( 0.9.2342.19200300.100.4.18 NAME 'friendlyCountry' DESC 'Standard LDAP objectclass' SUP country MUST ( co ) X-ORIGIN 'RFC 1274' ) -objectClasses: ( 0.9.2342.19200300.100.4.19 NAME 'simpleSecurityObject' DESC 'Standard LDAP objectclass' SUP top MUST ( userPassword ) X-ORIGIN 'RFC 1274' ) +objectClasses: ( 0.9.2342.19200300.100.4.19 NAME 'simpleSecurityObject' DESC 'Standard LDAP objectclass' SUP top AUXILIARY MUST ( userPassword ) X-ORIGIN 'RFC 1274' ) objectClasses: ( 0.9.2342.19200300.100.4.20 NAME 'pilotOrganization' DESC 'Standard LDAP objectclass' SUP top MUST ( ou $ o ) MAY ( buildingName $ businessCategory $ description $ destinationIndicator $ facsimileTelephoneNumber $ internationaliSDNNumber $ l $ physicalDeliveryOfficeName $ postOfficeBox $ postalAddress $ postalCode $ preferredDeliveryMethod $ registeredAddress $ searchGuide $ seeAlso $ st $ street $ telephoneNumber $ teletexTerminalIdentifier $ telexNumber $ userPassword $ x121Address ) X-ORIGIN 'RFC 1274' )
0
a0b50187e70cb3a60a1ae70578dcaa53780d38ac
389ds/389-ds-base
[195258] Changes for the internal build; Comment#16 Picking up new components: adminserver, setuputil, adminutil
commit a0b50187e70cb3a60a1ae70578dcaa53780d38ac Author: Noriko Hosoi <[email protected]> Date: Wed Jun 21 18:55:09 2006 +0000 [195258] Changes for the internal build; Comment#16 Picking up new components: adminserver, setuputil, adminutil diff --git a/component_versions.mk b/component_versions.mk index c9e0f859f..2f60fac0a 100644 --- a/component_versions.mk +++ b/component_versions.mk @@ -109,7 +109,7 @@ endif # admin server ifndef ADM_RELDATE - ADM_RELDATE = 20060512 + ADM_RELDATE = 20060619 endif ifndef ADM_VERSDIR ADM_VERSDIR = adminserver/1.0 @@ -122,7 +122,7 @@ endif # setuputil ifndef SETUPUTIL_RELDATE - SETUPUTIL_RELDATE = 20060405 + SETUPUTIL_RELDATE = 20060615 endif ifndef SETUPUTIL_VER SETUPUTIL_VER = 10 @@ -144,7 +144,7 @@ ifndef ADMINUTIL_VER ADMINUTIL_DOT_VER=1.0 endif ifndef ADMINUTIL_RELDATE - ADMINUTIL_RELDATE=20060511 + ADMINUTIL_RELDATE=20060615 endif ifndef ADMINUTIL_VERSDIR
0
8965a8f7ee0dd6fee1b3266d7a1163b98c1e1fbc
389ds/389-ds-base
Ticket #48067 - Add bugzilla tests for ds_logs Description: Adding regression tests for ds_logs. Setting custom log level combined with default log level stripping the default log level. Manaully editing dse.ldif to set value 64 for error log level throws error. https://pagure.io/389-ds-base/issue/48067 Reviewed by: spichugi Signed-off-by: Simon Pichugin <[email protected]>
commit 8965a8f7ee0dd6fee1b3266d7a1163b98c1e1fbc Author: Sankar Ramalingam <[email protected]> Date: Mon Aug 21 21:17:00 2017 +0530 Ticket #48067 - Add bugzilla tests for ds_logs Description: Adding regression tests for ds_logs. Setting custom log level combined with default log level stripping the default log level. Manaully editing dse.ldif to set value 64 for error log level throws error. https://pagure.io/389-ds-base/issue/48067 Reviewed by: spichugi Signed-off-by: Simon Pichugin <[email protected]> diff --git a/dirsrvtests/tests/suites/ds_logs/regression_test.py b/dirsrvtests/tests/suites/ds_logs/regression_test.py new file mode 100644 index 000000000..15cd98fd5 --- /dev/null +++ b/dirsrvtests/tests/suites/ds_logs/regression_test.py @@ -0,0 +1,73 @@ +# --- BEGIN COPYRIGHT BLOCK --- + # Copyright (C) 2017 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# + +import pytest +from lib389.dseldif import DSEldif +from lib389._constants import DN_CONFIG, LOG_REPLICA, LOG_DEFAULT, LOG_TRACE, LOG_ACL +from lib389.utils import os, logging +from lib389.topologies import topology_st as topo + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + + [email protected] [email protected]("log_level", [(LOG_REPLICA + LOG_DEFAULT), (LOG_ACL + LOG_DEFAULT), (LOG_TRACE + LOG_DEFAULT)]) +def test_default_loglevel_stripped(topo, log_level): + """ The default log level 16384 is stripped from the log level returned to a client + + :id: c300f8f1-aa11-4621-b124-e2be51930a6b + :feature: Logging + :setup: Standalone instance + :steps: 1. Change the error log level to the default and custom value. + 2. Check if the server returns the new value. + :expectedresults: + 1. Changing the error log level should be successful. + 2. Server should return the new log level. + """ + + assert topo.standalone.config.set('nsslapd-errorlog-level', str(log_level)) + assert topo.standalone.config.get_attr_val_int('nsslapd-errorlog-level') == log_level + + [email protected] +def test_dse_config_loglevel_error(topo): + """ Manually setting nsslapd-errorlog-level to 64 in dse.ldif throws error + + :id: 0eeefa17-ec1c-4208-8e7b-44d8fbc38f10 + :feature: Logging + :setup: Standalone instance + :steps: 1. Stop the server, edit dse.ldif file and change nsslapd-errorlog-level value to 64 + 2. Start the server and observe the error logs. + :expectedresults: + 1. Server should be successfully stopped and nsslapd-errorlog-level value should be changed. + 2. Server should be successfully started without any errors being reported in the logs. + """ + + topo.standalone.stop(timeout=10) + dse_ldif = DSEldif(topo.standalone) + try: + dse_ldif.replace(DN_CONFIG, 'nsslapd-errorlog-level', 64) + except: + log.error('Failed to replace cn=config values of nsslapd-errorlog-level') + raise + topo.standalone.start(timeout=10) + assert not topo.standalone.ds_error_log.match( + '.*nsslapd-errorlog-level: ignoring 64 \\(since -d 266354688 was given on the command line\\).*') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE)
0
8ecada01035e8b90538d72b42510e742870b0671
389ds/389-ds-base
Issue 50984 - Memory leaks in disk monitoring Description: Memory leaks are reported by the disk monitoring test suite. The direct leak is related to char **dirs array which is not freed at all. Free the array when we clean up or go to shutdown. Fix disk_monitoring_test.py::test_below_half_of_the_threshold_not_starting_after_shutdown. It should accept different exception when the instance is not started. https://pagure.io/389-ds-base/issue/50984 Reviewed by: firstyear (Thanks!)
commit 8ecada01035e8b90538d72b42510e742870b0671 Author: Simon Pichugin <[email protected]> Date: Thu Mar 26 19:33:47 2020 +0100 Issue 50984 - Memory leaks in disk monitoring Description: Memory leaks are reported by the disk monitoring test suite. The direct leak is related to char **dirs array which is not freed at all. Free the array when we clean up or go to shutdown. Fix disk_monitoring_test.py::test_below_half_of_the_threshold_not_starting_after_shutdown. It should accept different exception when the instance is not started. https://pagure.io/389-ds-base/issue/50984 Reviewed by: firstyear (Thanks!) diff --git a/dirsrvtests/tests/suites/disk_monitoring/disk_monitoring_test.py b/dirsrvtests/tests/suites/disk_monitoring/disk_monitoring_test.py index 200213ece..2434bf2c7 100644 --- a/dirsrvtests/tests/suites/disk_monitoring/disk_monitoring_test.py +++ b/dirsrvtests/tests/suites/disk_monitoring/disk_monitoring_test.py @@ -576,8 +576,10 @@ def test_below_half_of_the_threshold_not_starting_after_shutdown(topo, setup, re else: subprocess.call(['dd', 'if=/dev/zero', f'of={file_path}', 'bs=1M', f'count={FULL_THR_FILL_SIZE}']) _withouterrorlog(topo, 'topo.standalone.status() == True', 120) - with pytest.raises(subprocess.CalledProcessError): + try: topo.standalone.start() + except (ValueError, subprocess.CalledProcessError): + topo.standalone.log.info("Instance start up has failed as expected") _witherrorlog(topo, f'is too far below the threshold({THRESHOLD_BYTES} bytes). Exiting now', 2) # Verify DS has recovered from shutdown os.remove(file_path) diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c index 0aa17e0e6..e7a5c2802 100644 --- a/ldap/servers/slapd/daemon.c +++ b/ldap/servers/slapd/daemon.c @@ -612,6 +612,7 @@ disk_monitoring_thread(void *nothing __attribute__((unused))) slapi_be_free(&be); } } + slapi_ch_array_free(dirs); return; } /* @@ -710,6 +711,7 @@ disk_monitoring_thread(void *nothing __attribute__((unused))) slapi_be_free(&be); } } + slapi_ch_array_free(dirs); g_set_shutdown(SLAPI_SHUTDOWN_DISKFULL); return; } diff --git a/ldap/servers/slapd/main.c b/ldap/servers/slapd/main.c index 9e34926cc..7d7c9ac8d 100644 --- a/ldap/servers/slapd/main.c +++ b/ldap/servers/slapd/main.c @@ -957,6 +957,7 @@ main(int argc, char **argv) return_value = 1; goto cleanup; } + slapi_ch_array_free(dirs); } /* log the max fd limit as it is typically set in env/systemd */ slapi_log_err(SLAPI_LOG_INFO, "main",
0
31cd7a838aef30d80be6efe519cc2e821811c645
389ds/389-ds-base
Ticket #447 - Possible to add invalid attribute to nsslapd-allowed-to-delete-attrs Bug description: If given value of nsslapd-allowed-to-delete-attrs are all invalid attributes, e.g., nsslapd-allowed-to-delete-attrs: invalid0 invalid1 they were logged as invalid, but accidentally set to nsslapd-allowed- to-delete-attrs. Fix description: This patch checks the validation result and if there is no valid attributes given to nsslapd-allowed-to-delete-attrs, it issues a message in the error log: nsslapd-allowed-to-delete-attrs: Given attributes are all invalid. No effects. and it returns an error. The modify operation fails with "DSA is unwilling to perform". https://fedorahosted.org/389/ticket/447 Reviewed by [email protected] (Thank you, Rich!)
commit 31cd7a838aef30d80be6efe519cc2e821811c645 Author: Noriko Hosoi <[email protected]> Date: Wed Jan 8 10:30:04 2014 -0800 Ticket #447 - Possible to add invalid attribute to nsslapd-allowed-to-delete-attrs Bug description: If given value of nsslapd-allowed-to-delete-attrs are all invalid attributes, e.g., nsslapd-allowed-to-delete-attrs: invalid0 invalid1 they were logged as invalid, but accidentally set to nsslapd-allowed- to-delete-attrs. Fix description: This patch checks the validation result and if there is no valid attributes given to nsslapd-allowed-to-delete-attrs, it issues a message in the error log: nsslapd-allowed-to-delete-attrs: Given attributes are all invalid. No effects. and it returns an error. The modify operation fails with "DSA is unwilling to perform". https://fedorahosted.org/389/ticket/447 Reviewed by [email protected] (Thank you, Rich!) diff --git a/ldap/servers/slapd/libglobs.c b/ldap/servers/slapd/libglobs.c index d0bcbb95e..603c7ceaf 100644 --- a/ldap/servers/slapd/libglobs.c +++ b/ldap/servers/slapd/libglobs.c @@ -6815,15 +6815,23 @@ config_set_allowed_to_delete_attrs( const char *attrname, char *value, /* given value included unknown attribute, * we need to re-create a value. */ /* reuse the duplicated string for the new attr value. */ - for (s = allowed, d = vcopy; s && *s; s++) { - size_t slen = strlen(*s); - memmove(d, *s, slen); - d += slen; - memmove(d, " ", 1); - d++; + if (allowed && (NULL == *allowed)) { + /* all the values to allow to delete are invalid */ + slapi_log_error(SLAPI_LOG_FATAL, "config", + "%s: Given attributes are all invalid. No effects.\n", + CONFIG_ALLOWED_TO_DELETE_ATTRIBUTE); + return LDAP_NO_SUCH_ATTRIBUTE; + } else { + for (s = allowed, d = vcopy; s && *s; s++) { + size_t slen = strlen(*s); + memmove(d, *s, slen); + d += slen; + memmove(d, " ", 1); + d++; + } + *(d-1) = '\0'; + strcpy(value, vcopy); /* original value needs to be refreshed */ } - *(d-1) = '\0'; - strcpy(value, vcopy); /* original value needs to be refreshed */ } else { slapi_ch_free_string(&vcopy); vcopy = slapi_ch_strdup(value);
0
155fe7dba0cb6d8621f46ad974d3ce87f47338f1
389ds/389-ds-base
Pull svrcore from sbsintegration
commit 155fe7dba0cb6d8621f46ad974d3ce87f47338f1 Author: Nathan Kinder <[email protected]> Date: Mon Mar 14 20:03:04 2005 +0000 Pull svrcore from sbsintegration diff --git a/components.mk b/components.mk index 807ffb5c2..9751df7a8 100644 --- a/components.mk +++ b/components.mk @@ -375,7 +375,8 @@ SVRCORE_INCDIR = $(SVRCORE_BUILD_DIR)/include SVRCORE_INCLUDE = -I$(SVRCORE_INCDIR) #SVRCORE_LIBNAMES = svrplcy svrcore SVRCORE_LIBNAMES = svrcore -SVRCORE_IMPORT = $(COMPONENTS_DIR)/svrcore/$(SVRCORE_RELDATE)/$(NSOBJDIR_NAME) +#SVRCORE_IMPORT = $(COMPONENTS_DIR)/svrcore/$(SVRCORE_RELDATE)/$(NSOBJDIR_NAME) +SVRCORE_IMPORT = $(COMPONENTS_DIR_DEV)/svrcore/$(SVRCORE_RELDATE)/$(NSOBJDIR_NAME) ifeq ($(ARCH), WINNT) SVRCOREOBJNAME = $(addsuffix .lib, $(SVRCORE_LIBNAMES))
0
ff977cc87dac578704d90e2c1a3b6843735a8390
389ds/389-ds-base
Issue 4747 - Remove unstable/unstatus tests (followup) (#4809) Bug description: test_syncrepl_basic test is unstable (1 fail out of 10 run) with a error.PyAsn1Error exception. Fix description: flag this tests as flaky relates: https://github.com/389ds/389-ds-base/issues/4747 Reviewed by: self reviewed (one line commit) Platforms tested: F33
commit ff977cc87dac578704d90e2c1a3b6843735a8390 Author: tbordaz <[email protected]> Date: Wed Jun 16 13:41:27 2021 +0200 Issue 4747 - Remove unstable/unstatus tests (followup) (#4809) Bug description: test_syncrepl_basic test is unstable (1 fail out of 10 run) with a error.PyAsn1Error exception. Fix description: flag this tests as flaky relates: https://github.com/389ds/389-ds-base/issues/4747 Reviewed by: self reviewed (one line commit) Platforms tested: F33 diff --git a/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py b/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py index 0b422accf..d98c6e544 100644 --- a/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py +++ b/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py @@ -110,6 +110,9 @@ def init_sync_repl_plugins(topology, request): pass request.addfinalizer(fin) +#unstable or unstatus tests, skipped for now +#it fails, let's say 1 time out of 10, while decoding asn1 response [email protected](max_runs=2, min_passes=1) @pytest.mark.skipif(ldap.__version__ < '3.3.1', reason="python ldap versions less that 3.3.1 have bugs in sync repl that will cause this to fail!") def test_syncrepl_basic(topology):
0
22f2f9a1502e63bb169b7d599b5a3b35ddb31b8a
389ds/389-ds-base
Issue 50426 - nsSSL3Ciphers is limited to 1024 characters Bug Description: There was a hardcoded buffer for processing TLS ciphers. Anything over 1024 characters was truncated and was not applied. Fix Description: Don't use a fixed size buffer and just use the entire string. When printing errors about invalid format then we must use a fixed sized buffer, but we will truncate that log value as to not exceed the ssl logging function's buffer, and still output a useful message. ASAN approved https://pagure.io/389-ds-base/issue/50426 Reviewed by: firstyear, tbordaz, and spichugi (Thanks!!!)
commit 22f2f9a1502e63bb169b7d599b5a3b35ddb31b8a Author: Mark Reynolds <[email protected]> Date: Fri Jun 7 09:21:31 2019 -0400 Issue 50426 - nsSSL3Ciphers is limited to 1024 characters Bug Description: There was a hardcoded buffer for processing TLS ciphers. Anything over 1024 characters was truncated and was not applied. Fix Description: Don't use a fixed size buffer and just use the entire string. When printing errors about invalid format then we must use a fixed sized buffer, but we will truncate that log value as to not exceed the ssl logging function's buffer, and still output a useful message. ASAN approved https://pagure.io/389-ds-base/issue/50426 Reviewed by: firstyear, tbordaz, and spichugi (Thanks!!!) diff --git a/dirsrvtests/tests/suites/tls/cipher_test.py b/dirsrvtests/tests/suites/tls/cipher_test.py new file mode 100644 index 000000000..058931046 --- /dev/null +++ b/dirsrvtests/tests/suites/tls/cipher_test.py @@ -0,0 +1,51 @@ +import pytest +import os +from lib389.config import Encryption +from lib389.topologies import topology_st as topo + + +def test_long_cipher_list(topo): + """Test a long cipher list, and makre sure it is not truncated + + :id: bc400f54-3966-49c8-b640-abbf4fb2377d + :setup: Standalone Instance + :steps: + 1. Set nsSSL3Ciphers to a very long list of ciphers + 2. Ciphers are applied correctly + :expectedresults: + 1. Success + 2. Success + """ + ENABLED_CIPHER = "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384::AES-GCM::AEAD::256" + DISABLED_CIPHER = "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256::AES-GCM::AEAD::128" + CIPHER_LIST = ( + "-all,-SSL_CK_RC4_128_WITH_MD5,-SSL_CK_RC4_128_EXPORT40_WITH_MD5,-SSL_CK_RC2_128_CBC_WITH_MD5," + "-SSL_CK_RC2_128_CBC_EXPORT40_WITH_MD5,-SSL_CK_DES_64_CBC_WITH_MD5,-SSL_CK_DES_192_EDE3_CBC_WITH_MD5," + "-TLS_RSA_WITH_RC4_128_MD5,-TLS_RSA_WITH_RC4_128_SHA,-TLS_RSA_WITH_3DES_EDE_CBC_SHA," + "-TLS_RSA_WITH_DES_CBC_SHA,-SSL_RSA_FIPS_WITH_3DES_EDE_CBC_SHA,-SSL_RSA_FIPS_WITH_DES_CBC_SHA," + "-TLS_RSA_EXPORT_WITH_RC4_40_MD5,-TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5,-TLS_RSA_WITH_NULL_MD5," + "-TLS_RSA_WITH_NULL_SHA,-TLS_RSA_EXPORT1024_WITH_DES_CBC_SHA,-SSL_FORTEZZA_DMS_WITH_FORTEZZA_CBC_SHA," + "-SSL_FORTEZZA_DMS_WITH_RC4_128_SHA,-SSL_FORTEZZA_DMS_WITH_NULL_SHA,-TLS_DHE_DSS_WITH_DES_CBC_SHA," + "-TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA,-TLS_DHE_RSA_WITH_DES_CBC_SHA,-TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA," + "+TLS_RSA_WITH_AES_128_CBC_SHA,-TLS_DHE_DSS_WITH_AES_128_CBC_SHA,-TLS_DHE_RSA_WITH_AES_128_CBC_SHA," + "+TLS_RSA_WITH_AES_256_CBC_SHA,-TLS_DHE_DSS_WITH_AES_256_CBC_SHA,-TLS_DHE_RSA_WITH_AES_256_CBC_SHA," + "-TLS_RSA_EXPORT1024_WITH_RC4_56_SHA,-TLS_DHE_DSS_WITH_RC4_128_SHA,-TLS_ECDHE_RSA_WITH_RC4_128_SHA," + "-TLS_RSA_WITH_NULL_SHA,-TLS_RSA_EXPORT1024_WITH_DES_CBC_SHA,-SSL_CK_DES_192_EDE3_CBC_WITH_MD5," + "-TLS_RSA_WITH_RC4_128_MD5,-TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,-TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA," + "-TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,+TLS_AES_128_GCM_SHA256,+TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384" + ) + + topo.standalone.enable_tls() + enc = Encryption(topo.standalone) + enc.set('nsSSL3Ciphers', CIPHER_LIST) + topo.standalone.restart() + enabled_ciphers = enc.get_attr_vals_utf8('nssslenabledciphers') + assert ENABLED_CIPHER in enabled_ciphers + assert DISABLED_CIPHER not in enabled_ciphers + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main(["-s", CURRENT_FILE]) diff --git a/ldap/servers/slapd/ssl.c b/ldap/servers/slapd/ssl.c index 2d7bc2bd6..a89b1de68 100644 --- a/ldap/servers/slapd/ssl.c +++ b/ldap/servers/slapd/ssl.c @@ -95,7 +95,6 @@ static char *configDN = "cn=encryption,cn=config"; #define CIPHER_SET_ALLOWWEAKDHPARAM 0x200 /* allowWeakDhParam is on */ #define CIPHER_SET_DISALLOWWEAKDHPARAM 0x400 /* allowWeakDhParam is off */ - #define CIPHER_SET_ISDEFAULT(flag) \ (((flag)&CIPHER_SET_DEFAULT) ? PR_TRUE : PR_FALSE) #define CIPHER_SET_ISALL(flag) \ @@ -694,10 +693,12 @@ _conf_setciphers(char *setciphers, int flags) active = 0; break; default: - PR_snprintf(err, sizeof(err), "invalid ciphers <%s>: format is " - "+cipher1,-cipher2...", - raw); - return slapi_ch_strdup(err); + if (strlen(raw) > MAGNUS_ERROR_LEN) { + PR_snprintf(err, sizeof(err) - 3, "%s...", raw); + return slapi_ch_smprintf("invalid ciphers <%s>: format is +cipher1,-cipher2...", err); + } else { + return slapi_ch_smprintf("invalid ciphers <%s>: format is +cipher1,-cipher2...", raw); + } } if ((t = strchr(setciphers, ','))) *t++ = '\0'; @@ -1694,7 +1695,6 @@ slapd_ssl_init2(PRFileDesc **fd, int startTLS) PRUint16 NSSVersionMax = enabledNSSVersions.max; char mymin[VERSION_STR_LENGTH], mymax[VERSION_STR_LENGTH]; char newmax[VERSION_STR_LENGTH]; - char cipher_string[1024]; int allowweakcipher = CIPHER_SET_DEFAULTWEAKCIPHER; int_fast16_t renegotiation = (int_fast16_t)SSL_RENEGOTIATE_REQUIRES_XTN; @@ -1735,21 +1735,17 @@ slapd_ssl_init2(PRFileDesc **fd, int startTLS) "Ignoring it and set it to default.", val, configDN); } } - slapi_ch_free((void **)&val); + slapi_ch_free_string(&val); /* Set SSL cipher preferences */ - *cipher_string = 0; - if (ciphers && (*ciphers) && PL_strcmp(ciphers, "blank")) - PL_strncpyz(cipher_string, ciphers, sizeof(cipher_string)); - slapi_ch_free((void **)&ciphers); - - if (NULL != (val = _conf_setciphers(cipher_string, allowweakcipher))) { + if (NULL != (val = _conf_setciphers(ciphers, allowweakcipher))) { errorCode = PR_GetError(); slapd_SSL_warn("Failed to set SSL cipher " "preference information: %s (" SLAPI_COMPONENT_NAME_NSPR " error %d - %s)", val, errorCode, slapd_pr_strerror(errorCode)); - slapi_ch_free((void **)&val); + slapi_ch_free_string(&val); } + slapi_ch_free_string(&ciphers); freeConfigEntry(&e); /* Import pr fd into SSL */ @@ -1820,12 +1816,12 @@ slapd_ssl_init2(PRFileDesc **fd, int startTLS) activation = slapi_entry_attr_get_charptr(e, "nssslactivation"); if ((!activation) || (!PL_strcasecmp(activation, "off"))) { /* this family was turned off, goto next */ - slapi_ch_free((void **)&activation); + slapi_ch_free_string(&activation); freeConfigEntry(&e); continue; } - slapi_ch_free((void **)&activation); + slapi_ch_free_string(&activation); token = slapi_entry_attr_get_charptr(e, "nsssltoken"); personality = slapi_entry_attr_get_charptr(e, "nssslpersonalityssl"); @@ -1842,8 +1838,8 @@ slapd_ssl_init2(PRFileDesc **fd, int startTLS) "family information. Missing nsssltoken or" "nssslpersonalityssl in %s (" SLAPI_COMPONENT_NAME_NSPR " error %d - %s)", *family, errorCode, slapd_pr_strerror(errorCode)); - slapi_ch_free((void **)&token); - slapi_ch_free((void **)&personality); + slapi_ch_free_string(&token); + slapi_ch_free_string(&personality); freeConfigEntry(&e); continue; } @@ -1870,7 +1866,7 @@ slapd_ssl_init2(PRFileDesc **fd, int startTLS) "private key for cert %s of family %s (" SLAPI_COMPONENT_NAME_NSPR " error %d - %s)", cert_name, *family, errorCode, slapd_pr_strerror(errorCode)); - slapi_ch_free((void **)&personality); + slapi_ch_free_string(&personality); CERT_DestroyCertificate(cert); cert = NULL; freeConfigEntry(&e);
0
ea689555775f4c4219d5411a6b447ff57f074446
389ds/389-ds-base
Bug 619122 - fix coverify Defect Type: Resource leaks issues CID 11975 - 12053 https://bugzilla.redhat.com/show_bug.cgi?id=619122 Resolves: bug 619122 Bug description: fix coverify Defect Type: Resource leaks issues CID 12000. description: The plugin_setup() has been modified to release value properly.
commit ea689555775f4c4219d5411a6b447ff57f074446 Author: Endi S. Dewata <[email protected]> Date: Thu Jul 29 14:11:06 2010 -0500 Bug 619122 - fix coverify Defect Type: Resource leaks issues CID 11975 - 12053 https://bugzilla.redhat.com/show_bug.cgi?id=619122 Resolves: bug 619122 Bug description: fix coverify Defect Type: Resource leaks issues CID 12000. description: The plugin_setup() has been modified to release value properly. diff --git a/ldap/servers/slapd/plugin.c b/ldap/servers/slapd/plugin.c index 658fcf477..e31c846e9 100644 --- a/ldap/servers/slapd/plugin.c +++ b/ldap/servers/slapd/plugin.c @@ -2139,12 +2139,14 @@ plugin_setup(Slapi_Entry *plugin_entry, struct slapi_componentid *group, "an integer between %d and %d\n", ATTR_PLUGIN_PRECEDENCE, PLUGIN_MIN_PRECEDENCE, PLUGIN_MAX_PRECEDENCE); status = -1; + slapi_ch_free((void**)&value); goto PLUGIN_CLEANUP; } else { plugin->plg_precedence = precedence; } + slapi_ch_free((void**)&value); } if (!(value = slapi_entry_attr_get_charptr(plugin_entry,
0
b81c8ba38c29e15e13b0dd0bf6f5d3c773d31b20
389ds/389-ds-base
Ticket 48538 - Failed to delete old semaphore Bug Description: I misunderstood the sem_unlink call, and logged the wrong filepath. Fix Description: Fix the file path of the semaphore. https://pagure.io/389-ds-base/issue/48538 Author: wibrown Review by: mreynolds (Thanks!)
commit b81c8ba38c29e15e13b0dd0bf6f5d3c773d31b20 Author: William Brown <[email protected]> Date: Fri May 12 10:09:32 2017 +1000 Ticket 48538 - Failed to delete old semaphore Bug Description: I misunderstood the sem_unlink call, and logged the wrong filepath. Fix Description: Fix the file path of the semaphore. https://pagure.io/389-ds-base/issue/48538 Author: wibrown Review by: mreynolds (Thanks!) diff --git a/ldap/servers/slapd/snmp_collator.c b/ldap/servers/slapd/snmp_collator.c index 852d1572a..efe574ca0 100644 --- a/ldap/servers/slapd/snmp_collator.c +++ b/ldap/servers/slapd/snmp_collator.c @@ -458,23 +458,23 @@ snmp_collator_create_semaphore(void) * around. Recreate it since we don't know what state it is in. */ if (sem_unlink(stats_sem_name) != 0) { slapi_log_err(SLAPI_LOG_EMERG, "snmp_collator_create_semaphore", - "Failed to delete old semaphore for stats file (%s). " - "Error %d (%s).\n", stats_sem_name, errno, slapd_system_strerror(errno) ); + "Failed to delete old semaphore for stats file (/dev/shm/sem.%s). " + "Error %d (%s).\n", stats_sem_name + 1, errno, slapd_system_strerror(errno) ); exit(1); } if ((stats_sem = sem_open(stats_sem_name, O_CREAT | O_EXCL, SLAPD_DEFAULT_FILE_MODE, 1)) == SEM_FAILED) { /* No dice */ slapi_log_err(SLAPI_LOG_EMERG, "snmp_collator_create_semaphore", - "Failed to create semaphore for stats file (%s). Error %d (%s).\n", - stats_sem_name, errno, slapd_system_strerror(errno) ); + "Failed to create semaphore for stats file (/dev/shm/sem.%s). Error %d (%s).\n", + stats_sem_name + 1, errno, slapd_system_strerror(errno) ); exit(1); } } else { /* Some other problem occurred creating the semaphore. */ slapi_log_err(SLAPI_LOG_EMERG, "snmp_collator_create_semaphore", - "Failed to create semaphore for stats file (%s). Error %d.(%s)\n", - stats_sem_name, errno, slapd_system_strerror(errno) ); + "Failed to create semaphore for stats file (/dev/shm/sem.%s). Error %d.(%s)\n", + stats_sem_name + 1, errno, slapd_system_strerror(errno) ); exit(1); } }
0
921e51777d72ee7508fd95bbc6d3aa4ca4bdc44b
389ds/389-ds-base
610281 - fix coverity Defect Type: Control flow issues https://bugzilla.redhat.com/show_bug.cgi?id=610281 11843 DEADCODE Triaged Unassigned Bug Minor Fix Required mm_init() ds/ldap/servers/slapd/tools/mmldif.c Comment: A variable tailorfile is not used. Remove it.
commit 921e51777d72ee7508fd95bbc6d3aa4ca4bdc44b Author: Noriko Hosoi <[email protected]> Date: Thu Jul 8 12:21:46 2010 -0700 610281 - fix coverity Defect Type: Control flow issues https://bugzilla.redhat.com/show_bug.cgi?id=610281 11843 DEADCODE Triaged Unassigned Bug Minor Fix Required mm_init() ds/ldap/servers/slapd/tools/mmldif.c Comment: A variable tailorfile is not used. Remove it. diff --git a/ldap/servers/slapd/tools/mmldif.c b/ldap/servers/slapd/tools/mmldif.c index 409ce3eda..291702a88 100644 --- a/ldap/servers/slapd/tools/mmldif.c +++ b/ldap/servers/slapd/tools/mmldif.c @@ -639,7 +639,6 @@ int mm_init(int argc, char * argv[]) int c; char *ofn = NULL; char *prog = argv[0]; - char *tailorfile = NULL; time(&tl); seed = (char)tl; @@ -712,7 +711,6 @@ int mm_init(int argc, char * argv[]) hashmask = 0xfff; hashtable = (entry_t **)calloc(0x1000, sizeof(entry_t*)); - if (tailorfile) free(tailorfile); return 0; }
0
6ea32f9f5224cdec2658b6fed003b38388f881a7
389ds/389-ds-base
Issue 4513 - Fix replication CI test failures (#4557) Desciption: Add missing tests from previous commit. Relates: #4513 Reviewed by: @mreynolds, @Firstyear (Thanks!)
commit 6ea32f9f5224cdec2658b6fed003b38388f881a7 Author: Simon Pichugin <[email protected]> Date: Fri Jan 22 16:17:30 2021 +0100 Issue 4513 - Fix replication CI test failures (#4557) Desciption: Add missing tests from previous commit. Relates: #4513 Reviewed by: @mreynolds, @Firstyear (Thanks!) diff --git a/dirsrvtests/tests/suites/replication/regression_i2_test.py b/dirsrvtests/tests/suites/replication/regression_i2_test.py new file mode 100644 index 000000000..b3369f961 --- /dev/null +++ b/dirsrvtests/tests/suites/replication/regression_i2_test.py @@ -0,0 +1,90 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2021 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import logging +import pytest +from lib389.utils import * +from lib389._constants import * +from lib389.replica import Replicas, ReplicationManager +from lib389.dseldif import * +from lib389.topologies import topology_i2 as topo_i2 + + +pytestmark = pytest.mark.tier1 + +NEW_SUFFIX_NAME = 'test_repl' +NEW_SUFFIX = 'o={}'.format(NEW_SUFFIX_NAME) +NEW_BACKEND = 'repl_base' +CHANGELOG = 'cn=changelog,{}'.format(DN_USERROOT_LDBM) +MAXAGE_ATTR = 'nsslapd-changelogmaxage' +MAXAGE_STR = '30' +TRIMINTERVAL_STR = '5' +TRIMINTERVAL = 'nsslapd-changelogtrim-interval' + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + + +def test_special_symbol_replica_agreement(topo_i2): + """ Check if agreement starts with "cn=->..." then + after upgrade does it get removed. + + :id: 68aa0072-4dd4-4e33-b107-cb383a439125 + :setup: two standalone instance + :steps: + 1. Create and Enable Replication on standalone2 and role as consumer + 2. Create and Enable Replication on standalone1 and role as master + 3. Create a Replication agreement starts with "cn=->..." + 4. Perform an upgrade operation over the master + 5. Check if the agreement is still present or not. + :expectedresults: + 1. It should be successful + 2. It should be successful + 3. It should be successful + 4. It should be successful + 5. It should be successful + """ + + master = topo_i2.ins["standalone1"] + consumer = topo_i2.ins["standalone2"] + consumer.replica.enableReplication(suffix=DEFAULT_SUFFIX, role=ReplicaRole.CONSUMER, replicaId=CONSUMER_REPLICAID) + repl = ReplicationManager(DEFAULT_SUFFIX) + repl.create_first_master(master) + + properties = {RA_NAME: '-\\3meTo_{}:{}'.format(consumer.host, str(consumer.port)), + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + + master.agreement.create(suffix=SUFFIX, + host=consumer.host, + port=consumer.port, + properties=properties) + + master.agreement.init(SUFFIX, consumer.host, consumer.port) + + replica_server = Replicas(master).get(DEFAULT_SUFFIX) + + master.upgrade('online') + + agmt = replica_server.get_agreements().list()[0] + + assert agmt.get_attr_val_utf8('cn') == '-\\3meTo_{}:{}'.format(consumer.host, str(consumer.port)) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/replication/regression_m2_test.py b/dirsrvtests/tests/suites/replication/regression_m2_test.py new file mode 100644 index 000000000..25354e9d0 --- /dev/null +++ b/dirsrvtests/tests/suites/replication/regression_m2_test.py @@ -0,0 +1,883 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2021 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import re +import time +import logging +import ldif +import ldap +import pytest +import subprocess +from lib389.idm.user import TEST_USER_PROPERTIES, UserAccounts +from lib389.pwpolicy import PwPolicyManager +from lib389.utils import * +from lib389._constants import * +from lib389.idm.organizationalunit import OrganizationalUnits +from lib389.idm.user import UserAccount +from lib389.idm.group import Groups, Group +from lib389.idm.domain import Domain +from lib389.idm.directorymanager import DirectoryManager +from lib389.replica import Replicas, ReplicationManager, ReplicaRole +from lib389.agreement import Agreements +from lib389 import pid_from_file +from lib389.dseldif import * +from lib389.topologies import topology_m2 as topo_m2, TopologyMain, create_topology, _remove_ssca_db + + +pytestmark = pytest.mark.tier1 + +NEW_SUFFIX_NAME = 'test_repl' +NEW_SUFFIX = 'o={}'.format(NEW_SUFFIX_NAME) +NEW_BACKEND = 'repl_base' +CHANGELOG = 'cn=changelog,{}'.format(DN_USERROOT_LDBM) +MAXAGE_ATTR = 'nsslapd-changelogmaxage' +MAXAGE_STR = '30' +TRIMINTERVAL_STR = '5' +TRIMINTERVAL = 'nsslapd-changelogtrim-interval' + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + + +def find_start_location(file, no): + log_pattern = re.compile("slapd_daemon - slapd started.") + count = 0 + while True: + line = file.readline() + log.debug("_pattern_errorlog: [%d] %s" % (file.tell(), line)) + found = log_pattern.search(line) + if (found): + count = count + 1 + if (count == no): + return file.tell() + if (line == ''): + break + return -1 + + +def pattern_errorlog(file, log_pattern, start_location=0): + + count = 0 + log.debug("_pattern_errorlog: start from the beginning") + file.seek(start_location) + + # Use a while true iteration because 'for line in file: hit a + # python bug that break file.tell() + while True: + line = file.readline() + log.debug("_pattern_errorlog: [%d] %s" % (file.tell(), line)) + found = log_pattern.search(line) + if (found): + count = count + 1 + if (line == ''): + break + + log.debug("_pattern_errorlog: complete (count=%d)" % count) + return count + + +def _move_ruv(ldif_file): + """ Move RUV entry in an ldif file to the top""" + + with open(ldif_file) as f: + parser = ldif.LDIFRecordList(f) + parser.parse() + + ldif_list = parser.all_records + for dn in ldif_list: + if dn[0].startswith('nsuniqueid=ffffffff-ffffffff-ffffffff-ffffffff'): + ruv_index = ldif_list.index(dn) + ldif_list.insert(0, ldif_list.pop(ruv_index)) + break + + with open(ldif_file, 'w') as f: + ldif_writer = ldif.LDIFWriter(f) + for dn, entry in ldif_list: + ldif_writer.unparse(dn, entry) + + +def _remove_replication_data(ldif_file): + """ Remove the replication data from ldif file: + db2lif without -r includes some of the replica data like + - nsUniqueId + - keepalive entries + This function filters the ldif fil to remove these data + """ + + with open(ldif_file) as f: + parser = ldif.LDIFRecordList(f) + parser.parse() + + ldif_list = parser.all_records + # Iterate on a copy of the ldif entry list + for dn, entry in ldif_list[:]: + if dn.startswith('cn=repl keep alive'): + ldif_list.remove((dn, entry)) + else: + entry.pop('nsUniqueId') + with open(ldif_file, 'w') as f: + ldif_writer = ldif.LDIFWriter(f) + for dn, entry in ldif_list: + ldif_writer.unparse(dn, entry) + + [email protected](scope="function") +def topo_with_sigkill(request): + """Create Replication Deployment with two masters""" + + topology = create_topology({ReplicaRole.MASTER: 2}) + + def _kill_ns_slapd(inst): + pid = str(pid_from_file(inst.ds_paths.pid_file)) + cmd = ['kill', '-9', pid] + subprocess.Popen(cmd, stdout=subprocess.PIPE) + + def fin(): + # Kill the hanging process at the end of test to prevent failures in the following tests + if DEBUGGING: + [_kill_ns_slapd(inst) for inst in topology] + else: + [_kill_ns_slapd(inst) for inst in topology] + assert _remove_ssca_db(topology) + [inst.stop() for inst in topology if inst.exists()] + [inst.delete() for inst in topology if inst.exists()] + request.addfinalizer(fin) + + return topology + + [email protected]() +def create_entry(topo_m2, request): + """Add test entry using UserAccounts""" + + log.info('Adding a test entry user') + users = UserAccounts(topo_m2.ms["master1"], DEFAULT_SUFFIX) + tuser = users.ensure_state(properties=TEST_USER_PROPERTIES) + return tuser + + +def add_ou_entry(server, idx, parent): + ous = OrganizationalUnits(server, parent) + name = 'OU%d' % idx + ous.create(properties={'ou': '%s' % name}) + + +def add_user_entry(server, idx, parent): + users = UserAccounts(server, DEFAULT_SUFFIX, rdn=parent) + user_properties = { + 'uid': 'tuser%d' % idx, + 'givenname': 'test', + 'cn': 'Test User%d' % idx, + 'sn': 'user%d' % idx, + 'userpassword': PW_DM, + 'uidNumber': '1000%d' % idx, + 'gidNumber': '2000%d' % idx, + 'homeDirectory': '/home/{}'.format('tuser%d' % idx) + } + users.create(properties=user_properties) + + +def del_user_entry(server, idx, parent): + users = UserAccounts(server, DEFAULT_SUFFIX, rdn=parent) + test_user = users.get('tuser%d' % idx) + test_user.delete() + + +def rename_entry(server, idx, ou_name, new_parent): + users = UserAccounts(server, DEFAULT_SUFFIX, rdn=ou_name) + name = 'tuser%d' % idx + rdn = 'uid=%s' % name + test_user = users.get(name) + test_user.rename(new_rdn=rdn, newsuperior=new_parent) + + +def add_ldapsubentry(server, parent): + pwp = PwPolicyManager(server) + policy_props = {'passwordStorageScheme': 'ssha', + 'passwordCheckSyntax': 'on', + 'passwordInHistory': '6', + 'passwordChange': 'on', + 'passwordMinAge': '0', + 'passwordExp': 'off', + 'passwordMustChange': 'off',} + log.info('Create password policy for subtree {}'.format(parent)) + pwp.create_subtree_policy(parent, policy_props) + + +def test_double_delete(topo_m2, create_entry): + """Check that double delete of the entry doesn't crash server + + :id: 3496c82d-636a-48c9-973c-2455b12164cc + :setup: Two masters replication setup, a test entry + :steps: + 1. Delete the entry on the first master + 2. Delete the entry on the second master + 3. Check that server is alive + :expectedresults: + 1. Entry should be successfully deleted from first master + 2. Entry should be successfully deleted from second aster + 3. Server should me alive + """ + + m1 = topo_m2.ms["master1"] + m2 = topo_m2.ms["master2"] + + repl = ReplicationManager(DEFAULT_SUFFIX) + repl.disable_to_master(m1, [m2]) + repl.disable_to_master(m2, [m1]) + + log.info('Deleting entry {} from master1'.format(create_entry.dn)) + topo_m2.ms["master1"].delete_s(create_entry.dn) + + log.info('Deleting entry {} from master2'.format(create_entry.dn)) + topo_m2.ms["master2"].delete_s(create_entry.dn) + + repl.enable_to_master(m2, [m1]) + repl.enable_to_master(m1, [m2]) + + repl.test_replication(m1, m2) + repl.test_replication(m2, m1) + + [email protected] +def test_repl_modrdn(topo_m2): + """Test that replicated MODRDN does not break replication + + :id: a3e17698-9eb4-41e0-b537-8724b9915fa6 + :setup: Two masters replication setup + :steps: + 1. Add 3 test OrganizationalUnits A, B and C + 2. Add 1 test user under OU=A + 3. Add same test user under OU=B + 4. Stop Replication + 5. Apply modrdn to M1 - move test user from OU A -> C + 6. Apply modrdn on M2 - move test user from OU B -> C + 7. Start Replication + 8. Check that there should be only one test entry under ou=C on both masters + 9. Check that the replication is working fine both ways M1 <-> M2 + :expectedresults: + 1. This should pass + 2. This should pass + 3. This should pass + 4. This should pass + 5. This should pass + 6. This should pass + 7. This should pass + 8. This should pass + 9. This should pass + """ + + master1 = topo_m2.ms["master1"] + master2 = topo_m2.ms["master2"] + + repl = ReplicationManager(DEFAULT_SUFFIX) + + log.info("Add test entries - Add 3 OUs and 2 same users under 2 different OUs") + OUs = OrganizationalUnits(master1, DEFAULT_SUFFIX) + OU_A = OUs.create(properties={ + 'ou': 'A', + 'description': 'A', + }) + OU_B = OUs.create(properties={ + 'ou': 'B', + 'description': 'B', + }) + OU_C = OUs.create(properties={ + 'ou': 'C', + 'description': 'C', + }) + + users = UserAccounts(master1, DEFAULT_SUFFIX, rdn='ou={}'.format(OU_A.rdn)) + tuser_A = users.create(properties=TEST_USER_PROPERTIES) + + users = UserAccounts(master1, DEFAULT_SUFFIX, rdn='ou={}'.format(OU_B.rdn)) + tuser_B = users.create(properties=TEST_USER_PROPERTIES) + + repl.test_replication(master1, master2) + repl.test_replication(master2, master1) + + log.info("Stop Replication") + topo_m2.pause_all_replicas() + + log.info("Apply modrdn to M1 - move test user from OU A -> C") + master1.rename_s(tuser_A.dn, 'uid=testuser1', newsuperior=OU_C.dn, delold=1) + + log.info("Apply modrdn on M2 - move test user from OU B -> C") + master2.rename_s(tuser_B.dn, 'uid=testuser1', newsuperior=OU_C.dn, delold=1) + + log.info("Start Replication") + topo_m2.resume_all_replicas() + + log.info("Wait for sometime for repl to resume") + repl.test_replication(master1, master2) + repl.test_replication(master2, master1) + + log.info("Check that there should be only one test entry under ou=C on both masters") + users = UserAccounts(master1, DEFAULT_SUFFIX, rdn='ou={}'.format(OU_C.rdn)) + assert len(users.list()) == 1 + + users = UserAccounts(master2, DEFAULT_SUFFIX, rdn='ou={}'.format(OU_C.rdn)) + assert len(users.list()) == 1 + + log.info("Check that the replication is working fine both ways, M1 <-> M2") + repl.test_replication(master1, master2) + repl.test_replication(master2, master1) + + +def test_password_repl_error(topo_m2, create_entry): + """Check that error about userpassword replication is properly logged + + :id: 714130ff-e4f0-4633-9def-c1f4b24abfef + :setup: Four masters replication setup, a test entry + :steps: + 1. Change userpassword on the first master + 2. Restart the servers to flush the logs + 3. Check the error log for an replication error + :expectedresults: + 1. Password should be successfully changed + 2. Server should be successfully restarted + 3. There should be no replication errors in the error log + """ + + m1 = topo_m2.ms["master1"] + m2 = topo_m2.ms["master2"] + TEST_ENTRY_NEW_PASS = 'new_pass' + + log.info('Clean the error log') + m2.deleteErrorLogs() + + log.info('Set replication loglevel') + m2.config.loglevel((ErrorLog.REPLICA,)) + + log.info('Modifying entry {} - change userpassword on master 1'.format(create_entry.dn)) + + create_entry.set('userpassword', TEST_ENTRY_NEW_PASS) + + repl = ReplicationManager(DEFAULT_SUFFIX) + repl.wait_for_replication(m1, m2) + + log.info('Restart the servers to flush the logs') + for num in range(1, 3): + topo_m2.ms["master{}".format(num)].restart() + + try: + log.info('Check that password works on master 2') + create_entry_m2 = UserAccount(m2, create_entry.dn) + create_entry_m2.bind(TEST_ENTRY_NEW_PASS) + + log.info('Check the error log for the error with {}'.format(create_entry.dn)) + assert not m2.ds_error_log.match('.*can.t add a change for {}.*'.format(create_entry.dn)) + finally: + log.info('Set the default loglevel') + m2.config.loglevel((ErrorLog.DEFAULT,)) + + +def test_invalid_agmt(topo_m2): + """Test adding that an invalid agreement is properly rejected and does not crash the server + + :id: 6c3b2a7e-edcd-4327-a003-6bd878ff722b + :setup: Four masters replication setup + :steps: + 1. Add invalid agreement (nsds5ReplicaEnabled set to invalid value) + 2. Verify the server is still running + :expectedresults: + 1. Invalid repl agreement should be rejected + 2. Server should be still running + """ + + m1 = topo_m2.ms["master1"] + m2 = topo_m2.ms["master2"] + + repl = ReplicationManager(DEFAULT_SUFFIX) + + replicas = Replicas(m1) + replica = replicas.get(DEFAULT_SUFFIX) + agmts = replica.get_agreements() + + # Add invalid agreement (nsds5ReplicaEnabled set to invalid value) + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + agmts.create(properties={ + 'cn': 'whatever', + 'nsDS5ReplicaRoot': DEFAULT_SUFFIX, + 'nsDS5ReplicaBindDN': 'cn=replication manager,cn=config', + 'nsDS5ReplicaBindMethod': 'simple', + 'nsDS5ReplicaTransportInfo': 'LDAP', + 'nsds5replicaTimeout': '5', + 'description': "test agreement", + 'nsDS5ReplicaHost': m2.host, + 'nsDS5ReplicaPort': str(m2.port), + 'nsDS5ReplicaCredentials': 'whatever', + 'nsds5ReplicaEnabled': 'YEAH MATE, LETS REPLICATE' + }) + + # Verify the server is still running + repl = ReplicationManager(DEFAULT_SUFFIX) + repl.test_replication(m1, m2) + repl.test_replication(m2, m1) + + +def test_fetch_bindDnGroup(topo_m2): + """Check the bindDNGroup is fetched on first replication session + + :id: 5f1b1f59-6744-4260-b091-c82d22130025 + :setup: 2 Master Instances + :steps: + 1. Create a replication bound user and group, but the user *not* member of the group + 2. Check that replication is working + 3. Some preparation is required because of lib389 magic that already define a replication via group + - define the group as groupDN for replication and 60sec as fetch interval + - pause RA in both direction + - Define the user as bindDn of the RAs + 4. restart servers. + It sets the fetch time to 0, so next session will refetch the group + 5. Before resuming RA, add user to groupDN (on both side as replication is not working at that time) + 6. trigger an update and check replication is working and + there is no failure logged on supplier side 'does not have permission to supply replication updates to the replica' + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + """ + + # If you need any test suite initialization, + # please, write additional fixture for that (including finalizer). + # Topology for suites are predefined in lib389/topologies.py. + + # If you need host, port or any other data about instance, + # Please, use the instance object attributes for that (for example, topo.ms["master1"].serverid) + M1 = topo_m2.ms['master1'] + M2 = topo_m2.ms['master2'] + + # Enable replication log level. Not really necessary + M1.modify_s('cn=config', [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', b'8192')]) + M2.modify_s('cn=config', [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', b'8192')]) + + # Create a group and a user + PEOPLE = "ou=People,%s" % SUFFIX + PASSWD = 'password' + REPL_MGR_BOUND_DN = 'repl_mgr_bound_dn' + + uid = REPL_MGR_BOUND_DN.encode() + users = UserAccounts(M1, PEOPLE, rdn=None) + user_props = TEST_USER_PROPERTIES.copy() + user_props.update({'uid': uid, 'cn': uid, 'sn': '_%s' % uid, 'userpassword': PASSWD.encode(), 'description': b'value creation'}) + create_user = users.create(properties=user_props) + + groups_M1 = Groups(M1, DEFAULT_SUFFIX) + group_properties = { + 'cn': 'group1', + 'description': 'testgroup'} + group_M1 = groups_M1.create(properties=group_properties) + group_M2 = Group(M2, group_M1.dn) + assert(not group_M1.is_member(create_user.dn)) + + # Check that M1 and M2 are in sync + repl = ReplicationManager(DEFAULT_SUFFIX) + repl.wait_for_replication(M1, M2, timeout=20) + + # Define the group as the replication manager and fetch interval as 60sec + replicas = Replicas(M1) + replica = replicas.list()[0] + replica.apply_mods([(ldap.MOD_REPLACE, 'nsDS5ReplicaBindDnGroupCheckInterval', '60'), + (ldap.MOD_REPLACE, 'nsDS5ReplicaBindDnGroup', group_M1.dn)]) + + replicas = Replicas(M2) + replica = replicas.list()[0] + replica.apply_mods([(ldap.MOD_REPLACE, 'nsDS5ReplicaBindDnGroupCheckInterval', '60'), + (ldap.MOD_REPLACE, 'nsDS5ReplicaBindDnGroup', group_M1.dn)]) + + # Then pause the replication agreement to prevent them trying to acquire + # while the user is not member of the group + topo_m2.pause_all_replicas() + + # Define the user as the bindDN of the RAs + for inst in (M1, M2): + agmts = Agreements(inst) + agmt = agmts.list()[0] + agmt.replace('nsDS5ReplicaBindDN', create_user.dn.encode()) + agmt.replace('nsds5ReplicaCredentials', PASSWD.encode()) + + # Key step + # The restart will fetch the group/members define in the replica + # + # The user NOT member of the group replication will not work until bindDNcheckInterval + # + # With the fix, the first fetch is not taken into account (fetch time=0) + # so on the first session, the group will be fetched + M1.restart() + M2.restart() + + # Replication being broken here we need to directly do the same update. + # Sorry not found another solution except total update + group_M1.add_member(create_user.dn) + group_M2.add_member(create_user.dn) + + topo_m2.resume_all_replicas() + + # trigger updates to be sure to have a replication session, giving some time + M1.modify_s(create_user.dn, [(ldap.MOD_ADD, 'description', b'value_1_1')]) + M2.modify_s(create_user.dn, [(ldap.MOD_ADD, 'description', b'value_2_2')]) + time.sleep(10) + + # Check replication is working + ents = M1.search_s(create_user.dn, ldap.SCOPE_BASE, '(objectclass=*)') + for ent in ents: + assert (ent.hasAttr('description')) + found = 0 + for val in ent.getValues('description'): + if (val == b'value_1_1'): + found = found + 1 + elif (val == b'value_2_2'): + found = found + 1 + assert (found == 2) + + ents = M2.search_s(create_user.dn, ldap.SCOPE_BASE, '(objectclass=*)') + for ent in ents: + assert (ent.hasAttr('description')) + found = 0 + for val in ent.getValues('description'): + if (val == b'value_1_1'): + found = found + 1 + elif (val == b'value_2_2'): + found = found + 1 + assert (found == 2) + + # Check in the logs that the member was detected in the group although + # at startup it was not member of the group + regex = re.compile("does not have permission to supply replication updates to the replica.") + errorlog_M1 = open(M1.errlog, "r") + errorlog_M2 = open(M1.errlog, "r") + + # Find the last restart position + restart_location_M1 = find_start_location(errorlog_M1, 2) + assert (restart_location_M1 != -1) + restart_location_M2 = find_start_location(errorlog_M2, 2) + assert (restart_location_M2 != -1) + + # Then check there is no failure to authenticate + count = pattern_errorlog(errorlog_M1, regex, start_location=restart_location_M1) + assert(count <= 1) + count = pattern_errorlog(errorlog_M2, regex, start_location=restart_location_M2) + assert(count <= 1) + + +def test_plugin_bind_dn_tracking_and_replication(topo_m2): + """Testing nsslapd-plugin-binddn-tracking does not cause issues around + access control and reconfiguring replication/repl agmt. + + :id: dd689d03-69b8-4bf9-a06e-2acd19d5e2c9 + :setup: 2 master topology + :steps: + 1. Turn on plugin binddn tracking + 2. Add some users + 3. Make an update as a user + 4. Make an update to the replica config + 5. Make an update to the repliocation agreement + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + """ + + m1 = topo_m2.ms["master1"] + + # Turn on bind dn tracking + m1.config.set('nsslapd-plugin-binddn-tracking', 'on') + + # Add two users + users = UserAccounts(m1, DEFAULT_SUFFIX) + user1 = users.create_test_user(uid=1011) + user1.set('userpassword', PASSWORD) + user2 = users.create_test_user(uid=1012) + + # Add an aci + acival = '(targetattr ="cn")(version 3.0;acl "Test bind dn tracking"' + \ + ';allow (all) (userdn = "ldap:///{}");)'.format(user1.dn) + Domain(m1, DEFAULT_SUFFIX).add('aci', acival) + + # Bind as user and make an update + user1.rebind(PASSWORD) + user2.set('cn', 'new value') + dm = DirectoryManager(m1) + dm.rebind() + + # modify replica + replica = Replicas(m1).get(DEFAULT_SUFFIX) + replica.set(REPL_PROTOCOL_TIMEOUT, "30") + + # modify repl agmt + agmt = replica.get_agreements().list()[0] + agmt.set(REPL_PROTOCOL_TIMEOUT, "20") + + [email protected] [email protected] +def test_moving_entry_make_online_init_fail(topo_m2): + """ + Moving an entry could make the online init fail + + :id: e3895be7-884a-4e9f-80e3-24e9a5167c9e + :setup: Two masters replication setup + :steps: + 1. Generate DIT_0 + 2. Generate password policy for DIT_0 + 3. Create users for DIT_0 + 4. Turn idx % 2 == 0 users into tombstones + 5. Generate DIT_1 + 6. Move 'ou=OU0,ou=OU0,dc=example,dc=com' to DIT_1 + 7. Move 'ou=OU0,dc=example,dc=com' to DIT_1 + 8. Move idx % 2 == 1 users to 'ou=OU0,ou=OU0,ou=OU1,dc=example,dc=com' + 9. Init replicas + 10. Number of entries should match on both masters + + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + 8. Success + 9. Success + 10. Success + """ + + M1 = topo_m2.ms["master1"] + M2 = topo_m2.ms["master2"] + + log.info("Generating DIT_0") + idx = 0 + add_ou_entry(M1, idx, DEFAULT_SUFFIX) + log.info("Created entry: ou=OU0, dc=example, dc=com") + + ou0 = 'ou=OU%d' % idx + first_parent = '%s,%s' % (ou0, DEFAULT_SUFFIX) + add_ou_entry(M1, idx, first_parent) + log.info("Created entry: ou=OU0, ou=OU0, dc=example, dc=com") + + add_ldapsubentry(M1, first_parent) + + ou_name = 'ou=OU%d,ou=OU%d' % (idx, idx) + second_parent = 'ou=OU%d,%s' % (idx, first_parent) + for idx in range(0, 9): + add_user_entry(M1, idx, ou_name) + if idx % 2 == 0: + log.info("Turning tuser%d into a tombstone entry" % idx) + del_user_entry(M1, idx, ou_name) + + log.info('%s => %s => %s => 10 USERS' % (DEFAULT_SUFFIX, first_parent, second_parent)) + + log.info("Generating DIT_1") + idx = 1 + add_ou_entry(M1, idx, DEFAULT_SUFFIX) + log.info("Created entry: ou=OU1,dc=example,dc=com") + + third_parent = 'ou=OU%d,%s' % (idx, DEFAULT_SUFFIX) + add_ou_entry(M1, idx, third_parent) + log.info("Created entry: ou=OU1, ou=OU1, dc=example, dc=com") + + add_ldapsubentry(M1, third_parent) + + log.info("Moving %s to DIT_1" % second_parent) + OrganizationalUnits(M1, second_parent).get('OU0').rename(ou0, newsuperior=third_parent) + + log.info("Moving %s to DIT_1" % first_parent) + fourth_parent = '%s,%s' % (ou0, third_parent) + OrganizationalUnits(M1, first_parent).get('OU0').rename(ou0, newsuperior=fourth_parent) + + fifth_parent = '%s,%s' % (ou0, fourth_parent) + + ou_name = 'ou=OU0,ou=OU1' + log.info("Moving USERS to %s" % fifth_parent) + for idx in range(0, 9): + if idx % 2 == 1: + rename_entry(M1, idx, ou_name, fifth_parent) + + log.info('%s => %s => %s => %s => 10 USERS' % (DEFAULT_SUFFIX, third_parent, fourth_parent, fifth_parent)) + + log.info("Run Initialization.") + repl = ReplicationManager(DEFAULT_SUFFIX) + repl.wait_for_replication(M1, M2, timeout=5) + + m1entries = M1.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, + '(|(objectclass=ldapsubentry)(objectclass=nstombstone)(nsuniqueid=*))') + m2entries = M2.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, + '(|(objectclass=ldapsubentry)(objectclass=nstombstone)(nsuniqueid=*))') + + log.info("m1entry count - %d", len(m1entries)) + log.info("m2entry count - %d", len(m2entries)) + + assert len(m1entries) == len(m2entries) + + +def get_keepalive_entries(instance, replica): + # Returns the keep alive entries that exists with the suffix of the server instance + try: + entries = instance.search_s(replica.get_suffix(), ldap.SCOPE_ONELEVEL, + "(&(objectclass=ldapsubentry)(cn=repl keep alive*))", + ['cn', 'nsUniqueId', 'modifierTimestamp']) + except ldap.LDAPError as e: + log.fatal('Failed to retrieve keepalive entry (%s) on instance %s: error %s' % (dn, instance, str(e))) + assert False + # No error, so lets log the keepalive entries + if log.isEnabledFor(logging.DEBUG): + for ret in entries: + log.debug("Found keepalive entry:\n"+str(ret)); + return entries + + +def verify_keepalive_entries(topo, expected): + # Check that keep alive entries exists (or not exists) for every masters on every masters + # Note: The testing method is quite basic: counting that there is one keepalive entry per master. + # that is ok for simple test cases like test_online_init_should_create_keepalive_entries but + # not for the general case as keep alive associated with no more existing master may exists + # (for example after: db2ldif / demote a master / ldif2db / init other masters) + # ==> if the function is somehow pushed in lib389, a check better than simply counting the entries + # should be done. + for masterId in topo.ms: + master = topo.ms[masterId] + for replica in Replicas(master).list(): + if (replica.get_role() != ReplicaRole.MASTER): + continue + replica_info = f'master: {masterId} RID: {replica.get_rid()} suffix: {replica.get_suffix()}' + log.debug(f'Checking keepAliveEntries on {replica_info}') + keepaliveEntries = get_keepalive_entries(master, replica); + expectedCount = len(topo.ms) if expected else 0 + foundCount = len(keepaliveEntries) + if (foundCount == expectedCount): + log.debug(f'Found {foundCount} keepalive entries as expected on {replica_info}.') + else: + log.error(f'{foundCount} Keepalive entries are found ' + f'while {expectedCount} were expected on {replica_info}.') + assert False + + +def test_online_init_should_create_keepalive_entries(topo_m2): + """Check that keep alive entries are created when initializinf a master from another one + + :id: d5940e71-d18a-4b71-aaf7-b9185361fffe + :setup: Two masters replication setup + :steps: + 1. Generate ldif without replication data + 2 Init both masters from that ldif + 3 Check that keep alive entries does not exists + 4 Perform on line init of master2 from master1 + 5 Check that keep alive entries exists + :expectedresults: + 1. No error while generating ldif + 2. No error while importing the ldif file + 3. No keepalive entrie should exists on any masters + 4. No error while initializing master2 + 5. All keepalive entries should exist on every masters + + """ + + repl = ReplicationManager(DEFAULT_SUFFIX) + m1 = topo_m2.ms["master1"] + m2 = topo_m2.ms["master2"] + # Step 1: Generate ldif without replication data + m1.stop() + m2.stop() + ldif_file = '%s/norepl.ldif' % m1.get_ldif_dir() + m1.db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX], + excludeSuffixes=None, repl_data=False, + outputfile=ldif_file, encrypt=False) + # Remove replication metadata that are still in the ldif + _remove_replication_data(ldif_file) + + # Step 2: Init both masters from that ldif + m1.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file) + m2.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file) + m1.start() + m2.start() + + """ Replica state is now as if CLI setup has been done using: + dsconf master1 replication enable --suffix "${SUFFIX}" --role master + dsconf master2 replication enable --suffix "${SUFFIX}" --role master + dsconf master1 replication create-manager --name "${REPLICATION_MANAGER_NAME}" --passwd "${REPLICATION_MANAGER_PASSWORD}" + dsconf master2 replication create-manager --name "${REPLICATION_MANAGER_NAME}" --passwd "${REPLICATION_MANAGER_PASSWORD}" + dsconf master1 repl-agmt create --suffix "${SUFFIX}" + dsconf master2 repl-agmt create --suffix "${SUFFIX}" + """ + + # Step 3: No keepalive entrie should exists on any masters + verify_keepalive_entries(topo_m2, False) + + # Step 4: Perform on line init of master2 from master1 + agmt = Agreements(m1).list()[0] + agmt.begin_reinit() + (done, error) = agmt.wait_reinit() + assert done is True + assert error is False + + # Step 5: All keepalive entries should exists on every masters + # Verify the keep alive entry once replication is in sync + # (that is the step that fails when bug is not fixed) + repl.wait_for_ruv(m2,m1) + verify_keepalive_entries(topo_m2, True); + + [email protected] [email protected] +def test_online_reinit_may_hang(topo_with_sigkill): + """Online reinitialization may hang when the first + entry of the DB is RUV entry instead of the suffix + + :id: cded6afa-66c0-4c65-9651-993ba3f7a49c + :setup: 2 Master Instances + :steps: + 1. Export the database + 2. Move RUV entry to the top in the ldif file + 3. Import the ldif file + 4. Online replica initializaton + :expectedresults: + 1. Ldif file should be created successfully + 2. RUV entry should be on top in the ldif file + 3. Import should be successful + 4. Server should not hang and consume 100% CPU + """ + M1 = topo_with_sigkill.ms["master1"] + M2 = topo_with_sigkill.ms["master2"] + M1.stop() + ldif_file = '%s/master1.ldif' % M1.get_ldif_dir() + M1.db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX], + excludeSuffixes=None, repl_data=True, + outputfile=ldif_file, encrypt=False) + _move_ruv(ldif_file) + M1.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file) + M1.start() + # After this server may hang + agmt = Agreements(M1).list()[0] + agmt.begin_reinit() + (done, error) = agmt.wait_reinit() + assert done is True + assert error is False + repl = ReplicationManager(DEFAULT_SUFFIX) + repl.test_replication_topology(topo_with_sigkill) + + if DEBUGGING: + # Add debugging steps(if any)... + pass + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/replication/regression_m2c2_test.py b/dirsrvtests/tests/suites/replication/regression_m2c2_test.py new file mode 100644 index 000000000..bd457c55b --- /dev/null +++ b/dirsrvtests/tests/suites/replication/regression_m2c2_test.py @@ -0,0 +1,330 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2021 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import logging +import pytest +from lib389.utils import * +from lib389._constants import * +from lib389.replica import Replicas, ReplicationManager +from lib389.agreement import Agreements +from lib389.dseldif import * +from lib389.topologies import topology_m2c2 as topo_m2c2 + + +pytestmark = pytest.mark.tier1 + +NEW_SUFFIX_NAME = 'test_repl' +NEW_SUFFIX = 'o={}'.format(NEW_SUFFIX_NAME) +NEW_BACKEND = 'repl_base' +CHANGELOG = 'cn=changelog,{}'.format(DN_USERROOT_LDBM) +MAXAGE_ATTR = 'nsslapd-changelogmaxage' +MAXAGE_STR = '30' +TRIMINTERVAL_STR = '5' +TRIMINTERVAL = 'nsslapd-changelogtrim-interval' + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + + +def get_agreement(agmts, consumer): + # Get agreement towards consumer among the agremment list + for agmt in agmts.list(): + if (agmt.get_attr_val_utf8('nsDS5ReplicaPort') == str(consumer.port) and + agmt.get_attr_val_utf8('nsDS5ReplicaHost') == consumer.host): + return agmt + return None + + +def test_ruv_url_not_added_if_different_uuid(topo_m2c2): + """Check that RUV url is not updated if RUV generation uuid are different + + :id: 7cc30a4e-0ffd-4758-8f00-e500279af344 + :setup: Two masters + two consumers replication setup + :steps: + 1. Generate ldif without replication data + 2. Init both masters from that ldif + (to clear the ruvs and generates different generation uuid) + 3. Perform on line init from master1 to consumer1 + and from master2 to consumer2 + 4. Perform update on both masters + 5. Check that c1 RUV does not contains URL towards m2 + 6. Check that c2 RUV does contains URL towards m2 + 7. Perform on line init from master1 to master2 + 8. Perform update on master2 + 9. Check that c1 RUV does contains URL towards m2 + :expectedresults: + 1. No error while generating ldif + 2. No error while importing the ldif file + 3. No error and Initialization done. + 4. No error + 5. master2 replicaid should not be in the consumer1 RUV + 6. master2 replicaid should be in the consumer2 RUV + 7. No error and Initialization done. + 8. No error + 9. master2 replicaid should be in the consumer1 RUV + + """ + + # Variables initialization + repl = ReplicationManager(DEFAULT_SUFFIX) + + m1 = topo_m2c2.ms["master1"] + m2 = topo_m2c2.ms["master2"] + c1 = topo_m2c2.cs["consumer1"] + c2 = topo_m2c2.cs["consumer2"] + + replica_m1 = Replicas(m1).get(DEFAULT_SUFFIX) + replica_m2 = Replicas(m2).get(DEFAULT_SUFFIX) + replica_c1 = Replicas(c1).get(DEFAULT_SUFFIX) + replica_c2 = Replicas(c2).get(DEFAULT_SUFFIX) + + replicid_m2 = replica_m2.get_rid() + + agmts_m1 = Agreements(m1, replica_m1.dn) + agmts_m2 = Agreements(m2, replica_m2.dn) + + m1_m2 = get_agreement(agmts_m1, m2) + m1_c1 = get_agreement(agmts_m1, c1) + m1_c2 = get_agreement(agmts_m1, c2) + m2_m1 = get_agreement(agmts_m2, m1) + m2_c1 = get_agreement(agmts_m2, c1) + m2_c2 = get_agreement(agmts_m2, c2) + + # Step 1: Generate ldif without replication data + m1.stop() + m2.stop() + ldif_file = '%s/norepl.ldif' % m1.get_ldif_dir() + m1.db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX], + excludeSuffixes=None, repl_data=False, + outputfile=ldif_file, encrypt=False) + # Remove replication metadata that are still in the ldif + # _remove_replication_data(ldif_file) + + # Step 2: Init both masters from that ldif + m1.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file) + m2.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file) + m1.start() + m2.start() + + # Step 3: Perform on line init from master1 to consumer1 + # and from master2 to consumer2 + m1_c1.begin_reinit() + m2_c2.begin_reinit() + (done, error) = m1_c1.wait_reinit() + assert done is True + assert error is False + (done, error) = m2_c2.wait_reinit() + assert done is True + assert error is False + + # Step 4: Perform update on both masters + repl.test_replication(m1, c1) + repl.test_replication(m2, c2) + + # Step 5: Check that c1 RUV does not contains URL towards m2 + ruv = replica_c1.get_ruv() + log.debug(f"c1 RUV: {ruv}") + url = ruv._rid_url.get(replica_m2.get_rid()) + if url is None: + log.debug(f"No URL for RID {replica_m2.get_rid()} in RUV") + else: + log.debug(f"URL for RID {replica_m2.get_rid()} in RUV is {url}") + log.error(f"URL for RID {replica_m2.get_rid()} found in RUV") + # Note: this assertion fails if issue 2054 is not fixed. + assert False + + # Step 6: Check that c2 RUV does contains URL towards m2 + ruv = replica_c2.get_ruv() + log.debug(f"c1 RUV: {ruv} {ruv._rids} ") + url = ruv._rid_url.get(replica_m2.get_rid()) + if url is None: + log.error(f"No URL for RID {replica_m2.get_rid()} in RUV") + assert False + else: + log.debug(f"URL for RID {replica_m2.get_rid()} in RUV is {url}") + + # Step 7: Perform on line init from master1 to master2 + m1_m2.begin_reinit() + (done, error) = m1_m2.wait_reinit() + assert done is True + assert error is False + + # Step 8: Perform update on master2 + repl.test_replication(m2, c1) + + # Step 9: Check that c1 RUV does contains URL towards m2 + ruv = replica_c1.get_ruv() + log.debug(f"c1 RUV: {ruv} {ruv._rids} ") + url = ruv._rid_url.get(replica_m2.get_rid()) + if url is None: + log.error(f"No URL for RID {replica_m2.get_rid()} in RUV") + assert False + else: + log.debug(f"URL for RID {replica_m2.get_rid()} in RUV is {url}") + + +def test_csngen_state_not_updated_if_different_uuid(topo_m2c2): + """Check that csngen remote offset is not updated if RUV generation uuid are different + + :id: 77694b8e-22ae-11eb-89b2-482ae39447e5 + :setup: Two masters + two consumers replication setup + :steps: + 1. Disable m1<->m2 agreement to avoid propagate timeSkew + 2. Generate ldif without replication data + 3. Increase time skew on master2 + 4. Init both masters from that ldif + (to clear the ruvs and generates different generation uuid) + 5. Perform on line init from master1 to consumer1 and master2 to consumer2 + 6. Perform update on both masters + 7: Check that c1 has no time skew + 8: Check that c2 has time skew + 9. Init master2 from master1 + 10. Perform update on master2 + 11. Check that c1 has time skew + :expectedresults: + 1. No error + 2. No error while generating ldif + 3. No error + 4. No error while importing the ldif file + 5. No error and Initialization done. + 6. No error + 7. c1 time skew should be lesser than threshold + 8. c2 time skew should be higher than threshold + 9. No error and Initialization done. + 10. No error + 11. c1 time skew should be higher than threshold + + """ + + # Variables initialization + repl = ReplicationManager(DEFAULT_SUFFIX) + + m1 = topo_m2c2.ms["master1"] + m2 = topo_m2c2.ms["master2"] + c1 = topo_m2c2.cs["consumer1"] + c2 = topo_m2c2.cs["consumer2"] + + replica_m1 = Replicas(m1).get(DEFAULT_SUFFIX) + replica_m2 = Replicas(m2).get(DEFAULT_SUFFIX) + replica_c1 = Replicas(c1).get(DEFAULT_SUFFIX) + replica_c2 = Replicas(c2).get(DEFAULT_SUFFIX) + + replicid_m2 = replica_m2.get_rid() + + agmts_m1 = Agreements(m1, replica_m1.dn) + agmts_m2 = Agreements(m2, replica_m2.dn) + + m1_m2 = get_agreement(agmts_m1, m2) + m1_c1 = get_agreement(agmts_m1, c1) + m1_c2 = get_agreement(agmts_m1, c2) + m2_m1 = get_agreement(agmts_m2, m1) + m2_c1 = get_agreement(agmts_m2, c1) + m2_c2 = get_agreement(agmts_m2, c2) + + # Step 1: Disable m1<->m2 agreement to avoid propagate timeSkew + m1_m2.pause() + m2_m1.pause() + + # Step 2: Generate ldif without replication data + m1.stop() + m2.stop() + ldif_file = '%s/norepl.ldif' % m1.get_ldif_dir() + m1.db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX], + excludeSuffixes=None, repl_data=False, + outputfile=ldif_file, encrypt=False) + # Remove replication metadata that are still in the ldif + # _remove_replication_data(ldif_file) + + # Step 3: Increase time skew on master2 + timeSkew = 6*3600 + # We can modify master2 time skew + # But the time skew on the consumer may be smaller + # depending on when the cnsgen generation time is updated + # and when first csn get replicated. + # Since we use timeSkew has threshold value to detect + # whether there are time skew or not, + # lets add a significative margin (longer than the test duration) + # to avoid any risk of erroneous failure + timeSkewMargin = 300 + DSEldif(m2)._increaseTimeSkew(DEFAULT_SUFFIX, timeSkew+timeSkewMargin) + + # Step 4: Init both masters from that ldif + m1.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file) + m2.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file) + m1.start() + m2.start() + + # Step 5: Perform on line init from master1 to consumer1 + # and from master2 to consumer2 + m1_c1.begin_reinit() + m2_c2.begin_reinit() + (done, error) = m1_c1.wait_reinit() + assert done is True + assert error is False + (done, error) = m2_c2.wait_reinit() + assert done is True + assert error is False + + # Step 6: Perform update on both masters + repl.test_replication(m1, c1) + repl.test_replication(m2, c2) + + # Step 7: Check that c1 has no time skew + # Stop server to insure that dse.ldif is uptodate + c1.stop() + c1_nsState = DSEldif(c1).readNsState(DEFAULT_SUFFIX)[0] + c1_timeSkew = int(c1_nsState['time_skew']) + log.debug(f"c1 time skew: {c1_timeSkew}") + if (c1_timeSkew >= timeSkew): + log.error(f"c1 csngen state has unexpectedly been synchronized with m2: time skew {c1_timeSkew}") + assert False + c1.start() + + # Step 8: Check that c2 has time skew + # Stop server to insure that dse.ldif is uptodate + c2.stop() + c2_nsState = DSEldif(c2).readNsState(DEFAULT_SUFFIX)[0] + c2_timeSkew = int(c2_nsState['time_skew']) + log.debug(f"c2 time skew: {c2_timeSkew}") + if (c2_timeSkew < timeSkew): + log.error(f"c2 csngen state has not been synchronized with m2: time skew {c2_timeSkew}") + assert False + c2.start() + + # Step 9: Perform on line init from master1 to master2 + m1_c1.pause() + m1_m2.resume() + m1_m2.begin_reinit() + (done, error) = m1_m2.wait_reinit() + assert done is True + assert error is False + + # Step 10: Perform update on master2 + repl.test_replication(m2, c1) + + # Step 11: Check that c1 has time skew + # Stop server to insure that dse.ldif is uptodate + c1.stop() + c1_nsState = DSEldif(c1).readNsState(DEFAULT_SUFFIX)[0] + c1_timeSkew = int(c1_nsState['time_skew']) + log.debug(f"c1 time skew: {c1_timeSkew}") + if (c1_timeSkew < timeSkew): + log.error(f"c1 csngen state has not been synchronized with m2: time skew {c1_timeSkew}") + assert False + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/replication/regression_m3_test.py b/dirsrvtests/tests/suites/replication/regression_m3_test.py new file mode 100644 index 000000000..cb8d82765 --- /dev/null +++ b/dirsrvtests/tests/suites/replication/regression_m3_test.py @@ -0,0 +1,172 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2021 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import time +import logging +import ldap +import pytest +from lib389.idm.user import TEST_USER_PROPERTIES, UserAccounts +from lib389.utils import * +from lib389._constants import * +from lib389.replica import Changelog5 +from lib389.dseldif import * +from lib389.topologies import topology_m3 as topo_m3 + + +pytestmark = pytest.mark.tier1 + +NEW_SUFFIX_NAME = 'test_repl' +NEW_SUFFIX = 'o={}'.format(NEW_SUFFIX_NAME) +NEW_BACKEND = 'repl_base' +CHANGELOG = 'cn=changelog,{}'.format(DN_USERROOT_LDBM) +MAXAGE_ATTR = 'nsslapd-changelogmaxage' +MAXAGE_STR = '30' +TRIMINTERVAL_STR = '5' +TRIMINTERVAL = 'nsslapd-changelogtrim-interval' + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + + +def test_cleanallruv_repl(topo_m3): + """Test that cleanallruv could not break replication if anchor csn in ruv originated + in deleted replica + + :id: 46faba9a-897e-45b8-98dc-aec7fa8cec9a + :setup: 3 Masters + :steps: + 1. Configure error log level to 8192 in all masters + 2. Modify nsslapd-changelogmaxage=30 and nsslapd-changelogtrim-interval=5 for M1 and M2 + 3. Add test users to 3 masters + 4. Launch ClearRuv but withForce + 5. Check the users after CleanRUV, because of changelog trimming, it will effect the CLs + :expectedresults: + 1. Error logs should be configured successfully + 2. Modify should be successful + 3. Test users should be added successfully + 4. ClearRuv should be launched successfully + 5. Users should be present according to the changelog trimming effect + """ + + M1 = topo_m3.ms["master1"] + M2 = topo_m3.ms["master2"] + M3 = topo_m3.ms["master3"] + + log.info("Change the error log levels for all masters") + for s in (M1, M2, M3): + s.config.replace('nsslapd-errorlog-level', "8192") + + log.info("Get the replication agreements for all 3 masters") + m1_m2 = M1.agreement.list(suffix=SUFFIX, consumer_host=M2.host, consumer_port=M2.port) + m1_m3 = M1.agreement.list(suffix=SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + m3_m1 = M3.agreement.list(suffix=SUFFIX, consumer_host=M1.host, consumer_port=M1.port) + + log.info("Modify nsslapd-changelogmaxage=30 and nsslapd-changelogtrim-interval=5 for M1 and M2") + if ds_supports_new_changelog(): + CHANGELOG = 'cn=changelog,{}'.format(DN_USERROOT_LDBM) + + # set_value(M1, MAXAGE_ATTR, MAXAGE_STR) + try: + M1.modify_s(CHANGELOG, [(ldap.MOD_REPLACE, MAXAGE_ATTR, ensure_bytes(MAXAGE_STR))]) + except ldap.LDAPError as e: + log.error('Failed to add ' + MAXAGE_ATTR, + ': ' + MAXAGE_STR + ' to ' + CHANGELOG + ': error {}'.format(get_ldap_error_msg(e,'desc'))) + assert False + + # set_value(M2, TRIMINTERVAL, TRIMINTERVAL_STR) + try: + M2.modify_s(CHANGELOG, [(ldap.MOD_REPLACE, TRIMINTERVAL, ensure_bytes(TRIMINTERVAL_STR))]) + except ldap.LDAPError as e: + log.error('Failed to add ' + TRIMINTERVAL, + ': ' + TRIMINTERVAL_STR + ' to ' + CHANGELOG + ': error {}'.format(get_ldap_error_msg(e,'desc'))) + assert False + else: + log.info("Get the changelog enteries for M1 and M2") + changelog_m1 = Changelog5(M1) + changelog_m1.set_max_age(MAXAGE_STR) + changelog_m1.set_trim_interval(TRIMINTERVAL_STR) + + log.info("Add test users to 3 masters") + users_m1 = UserAccounts(M1, DEFAULT_SUFFIX) + users_m2 = UserAccounts(M2, DEFAULT_SUFFIX) + users_m3 = UserAccounts(M3, DEFAULT_SUFFIX) + user_props = TEST_USER_PROPERTIES.copy() + + user_props.update({'uid': "testuser10"}) + user10 = users_m1.create(properties=user_props) + + user_props.update({'uid': "testuser20"}) + user20 = users_m2.create(properties=user_props) + + user_props.update({'uid': "testuser30"}) + user30 = users_m3.create(properties=user_props) + + # ::important:: the testuser31 is the oldest csn in M2, + # because it will be cleared by changelog trimming + user_props.update({'uid': "testuser31"}) + user31 = users_m3.create(properties=user_props) + + user_props.update({'uid': "testuser11"}) + user11 = users_m1.create(properties=user_props) + + user_props.update({'uid': "testuser21"}) + user21 = users_m2.create(properties=user_props) + # this is to trigger changelog trim and interval values + time.sleep(40) + + # Here M1, M2, M3 should have 11,21,31 and 10,20,30 are CL cleared + M2.stop() + M1.agreement.pause(m1_m2[0].dn) + user_props.update({'uid': "testuser32"}) + user32 = users_m3.create(properties=user_props) + + user_props.update({'uid': "testuser33"}) + user33 = users_m3.create(properties=user_props) + + user_props.update({'uid': "testuser12"}) + user12 = users_m1.create(properties=user_props) + + M3.agreement.pause(m3_m1[0].dn) + M3.agreement.resume(m3_m1[0].dn) + time.sleep(40) + + # Here because of changelog trimming testusers 31 and 32 are CL cleared + # ClearRuv is launched but with Force + M3.stop() + M1.tasks.cleanAllRUV(suffix=SUFFIX, replicaid='3', + force=True, args={TASK_WAIT: False}) + + # here M1 should clear 31 + M2.start() + M1.agreement.pause(m1_m2[0].dn) + M1.agreement.resume(m1_m2[0].dn) + time.sleep(10) + + # Check the users after CleanRUV + expected_m1_users = [user31.dn, user11.dn, user21.dn, user32.dn, user33.dn, user12.dn] + expected_m1_users = [x.lower() for x in expected_m1_users] + expected_m2_users = [user31.dn, user11.dn, user21.dn, user12.dn] + expected_m2_users = [x.lower() for x in expected_m2_users] + + current_m1_users = [user.dn for user in users_m1.list()] + current_m1_users = [x.lower() for x in current_m1_users] + current_m2_users = [user.dn for user in users_m2.list()] + current_m2_users = [x.lower() for x in current_m2_users] + + assert set(expected_m1_users).issubset(current_m1_users) + assert set(expected_m2_users).issubset(current_m2_users) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE)
0
46fb7cee43a396ed9f58b2d6a7495c6dba4948c4
389ds/389-ds-base
Issue 51016 - Fix memory leaks in changelog5_init and perfctrs_init Bug Description: Memory Leaks are detected by ASAN in changelog5_init and perfctrs_init functions. Fix Description: For perfctrs_init, free existing memory before initializing new memory which will be assigned to the existing stucts. For changelog5_init, run cl5Cleanup instead of cl5Close for BE preop. https://pagure.io/389-ds-base/issue/51016 Reviewed by: lkrispen (Thanks!)
commit 46fb7cee43a396ed9f58b2d6a7495c6dba4948c4 Author: Simon Pichugin <[email protected]> Date: Thu Apr 9 09:34:31 2020 +0200 Issue 51016 - Fix memory leaks in changelog5_init and perfctrs_init Bug Description: Memory Leaks are detected by ASAN in changelog5_init and perfctrs_init functions. Fix Description: For perfctrs_init, free existing memory before initializing new memory which will be assigned to the existing stucts. For changelog5_init, run cl5Cleanup instead of cl5Close for BE preop. https://pagure.io/389-ds-base/issue/51016 Reviewed by: lkrispen (Thanks!) diff --git a/ldap/servers/plugins/replication/repl5_init.c b/ldap/servers/plugins/replication/repl5_init.c index 5badd7ca7..5a748e35a 100644 --- a/ldap/servers/plugins/replication/repl5_init.c +++ b/ldap/servers/plugins/replication/repl5_init.c @@ -338,7 +338,7 @@ multimaster_bepreop_init(Slapi_PBlock *pb) if (slapi_pblock_set(pb, SLAPI_PLUGIN_VERSION, SLAPI_PLUGIN_VERSION_01) != 0 || slapi_pblock_set(pb, SLAPI_PLUGIN_DESCRIPTION, (void *)&multimasterbepreopdesc) != 0 || - slapi_pblock_set(pb, SLAPI_PLUGIN_BE_PRE_CLOSE_FN, (void *)cl5Close) != 0 || + slapi_pblock_set(pb, SLAPI_PLUGIN_BE_PRE_CLOSE_FN, (void *)cl5Cleanup) != 0 || slapi_pblock_set(pb, SLAPI_PLUGIN_BE_PRE_BACKUP_FN, (void *)cl5WriteRUV) != 0) { slapi_log_err(SLAPI_LOG_PLUGIN, repl_plugin_name, "multimaster_bepreop_init - Failed\n"); rc = -1; diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c index 712deefd6..51a28923f 100644 --- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c +++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c @@ -1293,6 +1293,14 @@ bdb_start(struct ldbminfo *li, int dbmode) return return_value; } + /* We need to free the memory to avoid a leak + * Also, we have to evaluate if the performance counter + * should be preserved or not for database restore. + * Look - https://pagure.io/389-ds-base/issue/51020 + */ + if (conf->perf_private) { + perfctrs_terminate(&conf->perf_private, pEnv->bdb_DB_ENV); + } /* Now open the performance counters stuff */ perfctrs_init(li, &(conf->perf_private)); if (getenv(TXN_TESTING)) {
0
142900b2757378bfbff34e3f390fcb1a292eea91
389ds/389-ds-base
Bug 551198 - LDAPI: incorrect logging to access log https://bugzilla.redhat.com/show_bug.cgi?id=551198 Resolves: bug 551198 Bug Description: LDAPI: incorrect logging to access log Reviewed by: nkinder (Thanks!) Branch: HEAD Fix Description: The connection logging code was not ldapi/unix socket aware. Now we check for the socket type, and check to see if there is a proper path name in the path field. The "server" side of the socket seems not to get the path name set correctly - not sure why, but it doesn't really matter, since the client side path name does seem to be set correctly. The access log will contain the server side path and the client side path, so something like "from local to /var/run/slapd-foo.socket" Platforms tested: RHEL5 x86_64, Fedora 11 x86_64 Flag Day: no Doc impact: no
commit 142900b2757378bfbff34e3f390fcb1a292eea91 Author: Rich Megginson <[email protected]> Date: Mon Mar 1 15:03:30 2010 -0700 Bug 551198 - LDAPI: incorrect logging to access log https://bugzilla.redhat.com/show_bug.cgi?id=551198 Resolves: bug 551198 Bug Description: LDAPI: incorrect logging to access log Reviewed by: nkinder (Thanks!) Branch: HEAD Fix Description: The connection logging code was not ldapi/unix socket aware. Now we check for the socket type, and check to see if there is a proper path name in the path field. The "server" side of the socket seems not to get the path name set correctly - not sure why, but it doesn't really matter, since the client side path name does seem to be set correctly. The access log will contain the server side path and the client side path, so something like "from local to /var/run/slapd-foo.socket" Platforms tested: RHEL5 x86_64, Fedora 11 x86_64 Flag Day: no Doc impact: no diff --git a/ldap/servers/slapd/connection.c b/ldap/servers/slapd/connection.c index 5c8decbe5..8686d16e9 100644 --- a/ldap/servers/slapd/connection.c +++ b/ldap/servers/slapd/connection.c @@ -242,7 +242,22 @@ connection_reset(Connection* conn, int ns, PRNetAddr * from, int fromLen, int is * get peer address (IP address of this client) */ slapi_ch_free( (void**)&conn->cin_addr ); /* just to be conservative */ - if ( ((from->ipv6.ip.pr_s6_addr32[0] != 0) || /* from contains non zeros */ + if ( from->raw.family == PR_AF_LOCAL ) { /* ldapi */ + conn->cin_addr = (PRNetAddr *) slapi_ch_malloc( sizeof( PRNetAddr ) ); + PL_strncpyz(buf_ip, from->local.path, sizeof(from->local.path)); + memcpy( conn->cin_addr, from, sizeof( PRNetAddr ) ); + if (!buf_ip[0]) { + PR_GetPeerName( conn->c_prfd, from ); + PL_strncpyz(buf_ip, from->local.path, sizeof(from->local.path)); + memcpy( conn->cin_addr, from, sizeof( PRNetAddr ) ); + } + if (!buf_ip[0]) { + /* cannot derive local address */ + /* need something for logging */ + PL_strncpyz(buf_ip, "local", sizeof(buf_ip)); + } + str_ip = buf_ip; + } else if ( ((from->ipv6.ip.pr_s6_addr32[0] != 0) || /* from contains non zeros */ (from->ipv6.ip.pr_s6_addr32[1] != 0) || (from->ipv6.ip.pr_s6_addr32[2] != 0) || (from->ipv6.ip.pr_s6_addr32[3] != 0)) || @@ -261,7 +276,6 @@ connection_reset(Connection* conn, int ns, PRNetAddr * from, int fromLen, int is } buf_ip[ sizeof( buf_ip ) - 1 ] = '\0'; str_ip = buf_ip; - } else { /* try syscall since "from" was not given and PR_GetPeerName failed */ /* a corner case */ @@ -307,7 +321,13 @@ connection_reset(Connection* conn, int ns, PRNetAddr * from, int fromLen, int is conn->cin_destaddr = (PRNetAddr *) slapi_ch_malloc( sizeof( PRNetAddr ) ); memset( conn->cin_destaddr, 0, sizeof( PRNetAddr )); if (PR_GetSockName( conn->c_prfd, conn->cin_destaddr ) == 0) { - if ( PR_IsNetAddrType( conn->cin_destaddr, PR_IpAddrV4Mapped ) ) { + if ( conn->cin_destaddr->raw.family == PR_AF_LOCAL ) { /* ldapi */ + PL_strncpyz(buf_destip, conn->cin_destaddr->local.path, + sizeof(conn->cin_destaddr->local.path)); + if (!buf_destip[0]) { + PL_strncpyz(buf_destip, "unknown local file", sizeof(buf_destip)); + } + } else if ( PR_IsNetAddrType( conn->cin_destaddr, PR_IpAddrV4Mapped ) ) { PRNetAddr v4destaddr; memset( &v4destaddr, 0, sizeof( v4destaddr ) ); v4destaddr.inet.family = PR_AF_INET;
0
aec1b449419e7e65cce542bc8d0a2c74db252d5a
389ds/389-ds-base
Issue 4673 - Update Rust crates Description: Update the bare minimum rust dependencies so that a build will complete Relates: https://github.com/389ds/389-ds-base/issues/4673 Reviewed by: mreynolds
commit aec1b449419e7e65cce542bc8d0a2c74db252d5a Author: Mark Reynolds <[email protected]> Date: Tue Mar 16 09:53:53 2021 -0400 Issue 4673 - Update Rust crates Description: Update the bare minimum rust dependencies so that a build will complete Relates: https://github.com/389ds/389-ds-base/issues/4673 Reviewed by: mreynolds diff --git a/src/Cargo.lock b/src/Cargo.lock index a8f1a7bbd..ffeb2895c 100644 --- a/src/Cargo.lock +++ b/src/Cargo.lock @@ -2,9 +2,9 @@ # It is not intended for manual editing. [[package]] name = "ahash" -version = "0.6.3" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "796540673305a66d127804eef19ad696f1f204b8c1025aaca4958c17eab32877" +checksum = "7f200cbb1e856866d9eade941cf3aa0c5d7dd36f74311c4273b494f4ef036957" dependencies = [ "getrandom 0.2.2", "once_cell", @@ -60,9 +60,9 @@ checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" [[package]] name = "byteorder" -version = "1.4.2" +version = "1.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae44d1a3d5a19df61dd0c8beb138458ac2a53a7ac09eba97d55592540004306b" +checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" [[package]] name = "cbindgen" @@ -83,9 +83,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.0.66" +version = "1.0.67" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c0496836a84f8d0495758516b8621a622beb77c0fed418570e50764093ced48" +checksum = "e3c69b077ad434294d3ce9f1f6143a2a4b89a8a2d54ef813d85003a4fd1137fd" dependencies = [ "jobserver", ] @@ -113,9 +113,9 @@ dependencies = [ [[package]] name = "concread" -version = "0.2.7" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1eef33d95d263ebf9423049e76de365a2788e42c2ea48fd5a9fdb98596ae09cb" +checksum = "89989b4bab859bb212ee8fa97de461b717636a40f680977c4e32ccfee7d2f1e5" dependencies = [ "ahash", "crossbeam", @@ -127,12 +127,6 @@ dependencies = [ "smallvec", ] -[[package]] -name = "const_fn" -version = "0.4.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28b9d6de7f49e22cf97ad17fc4036ece69300032f45f78f30b4a4482cdc3f4a6" - [[package]] name = "crossbeam" version = "0.8.0" @@ -170,12 +164,11 @@ dependencies = [ [[package]] name = "crossbeam-epoch" -version = "0.9.1" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1aaa739f95311c2c7887a76863f500026092fb1dce0161dab577e559ef3569d" +checksum = "2584f639eb95fea8c798496315b297cf81b9b58b6d30ab066a75455333cf4b12" dependencies = [ "cfg-if", - "const_fn", "crossbeam-utils", "lazy_static", "memoffset", @@ -194,9 +187,9 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.1" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02d96d1e189ef58269ebe5b97953da3274d83a93af647c2ddd6f9dab28cedb8d" +checksum = "e7e9d99fa91428effe99c5c6d4634cdeba32b8cf784fc428a2a687f61a952c49" dependencies = [ "autocfg", "cfg-if", @@ -315,9 +308,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.86" +version = "0.2.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7282d924be3275cec7f6756ff4121987bc6481325397dde6ba3e7802b1a8b1c" +checksum = "538c092e5586f4cdd7dd8078c4a79220e3e168880218124dcbce860f0ea938c6" [[package]] name = "librnsslapd" @@ -381,9 +374,9 @@ dependencies = [ [[package]] name = "num-bigint" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e9a41747ae4633fce5adffb4d2e81ffc5e89593cb19917f8fb2cc5ff76507bf" +checksum = "7d0a3d5e207573f948a9e5376662aa743a2ea13f7c50a554d7af443a73fbfeba" dependencies = [ "autocfg", "num-integer", @@ -443,29 +436,29 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.5.2" +version = "1.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13bd41f508810a131401606d54ac32a467c97172d74ba7662562ebba5ad07fa0" +checksum = "af8b08b04175473088b46763e51ee54da5f9a164bc162f615b91bc179dbf15a3" [[package]] name = "openssl" -version = "0.10.32" +version = "0.10.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "038d43985d1ddca7a9900630d8cd031b56e4794eecc2e9ea39dd17aa04399a70" +checksum = "a61075b62a23fef5a29815de7536d940aa35ce96d18ce0cc5076272db678a577" dependencies = [ "bitflags", "cfg-if", "foreign-types", - "lazy_static", "libc", + "once_cell", "openssl-sys", ] [[package]] name = "openssl-sys" -version = "0.9.60" +version = "0.9.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "921fc71883267538946025deffb622905ecad223c28efbfdef9bb59a0175f3e6" +checksum = "313752393519e876837e09e1fa183ddef0be7735868dced3196f4472d536277f" dependencies = [ "autocfg", "cc", @@ -487,14 +480,14 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ccb628cad4f84851442432c60ad8e1f607e29752d0bf072cbd0baf28aa34272" +checksum = "fa7a782938e745763fe6907fc6ba86946d72f49fe7e21de074e08128a99fb018" dependencies = [ "cfg-if", "instant", "libc", - "redox_syscall 0.1.57", + "redox_syscall", "smallvec", "winapi", ] @@ -560,9 +553,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.8" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "991431c3519a3f36861882da93630ce66b52918dcf1b8e2fd66b397fc96f28df" +checksum = "c3d0b9745dc2debf507c8422de05d7226cc1f0644216dfdfead988f9b1ab32a7" dependencies = [ "proc-macro2", ] @@ -591,9 +584,9 @@ dependencies = [ [[package]] name = "rand_core" -version = "0.6.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c026d7df8b298d90ccbbc5190bd04d85e159eaf5576caeacf8741da93ccbd2e5" +checksum = "34cf66eb183df1c5876e2dcf6b13d57340741e8dc255b48e40a26de954d06ae7" dependencies = [ "getrandom 0.2.2", ] @@ -609,15 +602,9 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.1.57" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41cc0f7e4d5d4544e8861606a285bb08d3e70712ccc7d2b84d7c0ccfaf4b05ce" - -[[package]] -name = "redox_syscall" -version = "0.2.4" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05ec8ca9416c5ea37062b502703cd7fcb207736bc294f6e0cf367ac6fc234570" +checksum = "94341e4e44e24f6b591b59e47a8a027df12e008d73fd5672dbea9cc22f4507d9" dependencies = [ "bitflags", ] @@ -645,18 +632,18 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" [[package]] name = "serde" -version = "1.0.123" +version = "1.0.124" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92d5161132722baa40d802cc70b15262b98258453e85e5d1d365c757c73869ae" +checksum = "bd761ff957cb2a45fbb9ab3da6512de9de55872866160b23c25f1a841e99d29f" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.123" +version = "1.0.124" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9391c295d64fc0abb2c556bad848f33cb8296276b1ad2677d1ae1ace4f258f31" +checksum = "1800f7693e94e186f5e25a28291ae1570da908aff7d97a095dec1e56ff99069b" dependencies = [ "proc-macro2", "quote", @@ -665,9 +652,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.62" +version = "1.0.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea1c6153794552ea7cf7cf63b1231a25de00ec90db326ba6264440fa08e31486" +checksum = "799e97dc9fdae36a5c8b8f2cae9ce2ee9fdce2058c57a93e6099d919fd982f79" dependencies = [ "itoa", "ryu", @@ -704,9 +691,9 @@ checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" [[package]] name = "syn" -version = "1.0.60" +version = "1.0.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c700597eca8a5a762beb35753ef6b94df201c81cca676604f547495a0d7f0081" +checksum = "3fd9d1e9976102a03c542daa2eff1b43f9d72306342f3f8b3ed5fb8908195d6f" dependencies = [ "proc-macro2", "quote", @@ -722,7 +709,7 @@ dependencies = [ "cfg-if", "libc", "rand", - "redox_syscall 0.2.4", + "redox_syscall", "remove_dir_all", "winapi", ] @@ -780,9 +767,9 @@ checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191" [[package]] name = "version_check" -version = "0.9.2" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5a972e5669d67ba988ce3dc826706fb0a8b01471c088cb0b6110b805cc36aed" +checksum = "5fecdca9a5291cc2b8dcf7dc02453fee791a280f3743cb0905f8822ae463b3fe" [[package]] name = "wasi"
0
7a7bc7872c779ea5171cf02743cc89ca578496d2
389ds/389-ds-base
Issue 50706 - Missing lib389 dependency - packaging Description: In 15789e8 I introduced a new dependency for lib389, thinking it's part of python's standard library, but it's not. We need to explicitly mention it in the spec file, requirements.txt and setup.py. Fixes: https://pagure.io/389-ds-base/issue/50706 Reviewed by: mhonek (Thanks!)
commit 7a7bc7872c779ea5171cf02743cc89ca578496d2 Author: Viktor Ashirov <[email protected]> Date: Tue Nov 12 12:14:01 2019 +0100 Issue 50706 - Missing lib389 dependency - packaging Description: In 15789e8 I introduced a new dependency for lib389, thinking it's part of python's standard library, but it's not. We need to explicitly mention it in the spec file, requirements.txt and setup.py. Fixes: https://pagure.io/389-ds-base/issue/50706 Reviewed by: mhonek (Thanks!) diff --git a/rpm/389-ds-base.spec.in b/rpm/389-ds-base.spec.in index c2a0b618d..e60b0f3c5 100644 --- a/rpm/389-ds-base.spec.in +++ b/rpm/389-ds-base.spec.in @@ -130,6 +130,7 @@ BuildRequires: python%{python3_pkgversion}-argcomplete BuildRequires: python%{python3_pkgversion}-argparse-manpage BuildRequires: python%{python3_pkgversion}-policycoreutils BuildRequires: python%{python3_pkgversion}-libselinux +BuildRequires: python%{python3_pkgversion}-packaging # For cockpit BuildRequires: rsync diff --git a/src/lib389/requirements.txt b/src/lib389/requirements.txt index 2b2582d64..5cce1d04b 100644 --- a/src/lib389/requirements.txt +++ b/src/lib389/requirements.txt @@ -6,3 +6,4 @@ six argcomplete argparse-manpage python-ldap +packaging diff --git a/src/lib389/setup.py b/src/lib389/setup.py index 61de441e4..f2e404333 100644 --- a/src/lib389/setup.py +++ b/src/lib389/setup.py @@ -82,6 +82,7 @@ setup( 'argcomplete', 'argparse-manpage', 'python-ldap', + 'packaging', ], cmdclass={
0
3bb3f65605384549c4e203983e4283e3e3bc2318
389ds/389-ds-base
Issue 22 - Specify a basedn parameter for IDM modules Description: We need a way to create our IDM users and groups in a custom suffix. Now it is hard coded to 'ou=people' for users, 'ou=groups' for groups, etc. Also, it is important to do not break current functionality. CLI IDM depends on it. The parameter should remain optional, with a default set to 'ou=people' for users, 'ou=groups' for groups, etc. Also, fix test module names, so they can be successfully discovered by pytest. https://pagure.io/lib389/issue/22 Reviewed by: wibrown (Thanks!)
commit 3bb3f65605384549c4e203983e4283e3e3bc2318 Author: Simon Pichugin <[email protected]> Date: Tue Apr 11 22:47:14 2017 +0200 Issue 22 - Specify a basedn parameter for IDM modules Description: We need a way to create our IDM users and groups in a custom suffix. Now it is hard coded to 'ou=people' for users, 'ou=groups' for groups, etc. Also, it is important to do not break current functionality. CLI IDM depends on it. The parameter should remain optional, with a default set to 'ou=people' for users, 'ou=groups' for groups, etc. Also, fix test module names, so they can be successfully discovered by pytest. https://pagure.io/lib389/issue/22 Reviewed by: wibrown (Thanks!) diff --git a/src/lib389/lib389/idm/group.py b/src/lib389/lib389/idm/group.py index e4c102b8a..eabebfb7c 100644 --- a/src/lib389/lib389/idm/group.py +++ b/src/lib389/lib389/idm/group.py @@ -36,14 +36,14 @@ class Group(DSLdapObject): self.remove('member', dn) class Groups(DSLdapObjects): - def __init__(self, instance, basedn, batch=False): + def __init__(self, instance, basedn, batch=False, rdn='ou=Groups'): super(Groups, self).__init__(instance, batch) self._objectclasses = [ 'groupOfNames', ] self._filterattrs = [RDN] self._childobject = Group - self._basedn = 'ou=Groups,%s' % basedn + self._basedn = '{},{}'.format(rdn, basedn) class UniqueGroup(DSLdapObject): # WARNING!!! @@ -71,14 +71,14 @@ class UniqueGroup(DSLdapObject): class UniqueGroups(DSLdapObjects): # WARNING!!! # Use group, not unique group!!! - def __init__(self, instance, basedn, batch=False): + def __init__(self, instance, basedn, batch=False, rdn='ou=Groups'): super(UniqueGroups, self).__init__(instance, batch) self._objectclasses = [ 'groupOfUniqueNames', ] self._filterattrs = [RDN] self._childobject = UniqueGroup - self._basedn = 'ou=Groups,%s' % basedn + self._basedn = '{},{}'.format(rdn, basedn) diff --git a/src/lib389/lib389/idm/posixgroup.py b/src/lib389/lib389/idm/posixgroup.py index aebfb5f20..c3ad1c220 100644 --- a/src/lib389/lib389/idm/posixgroup.py +++ b/src/lib389/lib389/idm/posixgroup.py @@ -36,7 +36,7 @@ class PosixGroup(DSLdapObject): class PosixGroups(DSLdapObjects): - def __init__(self, instance, basedn, batch=False): + def __init__(self, instance, basedn, batch=False, rdn='ou=Groups'): super(PosixGroups, self).__init__(instance, batch) self._objectclasses = [ 'groupOfNames', @@ -44,6 +44,6 @@ class PosixGroups(DSLdapObjects): ] self._filterattrs = [RDN] self._childobject = PosixGroup - self._basedn = 'ou=Groups,%s' % basedn + self._basedn = '{},{}'.format(rdn, basedn) diff --git a/src/lib389/lib389/idm/services.py b/src/lib389/lib389/idm/services.py index b59e34588..129dad235 100644 --- a/src/lib389/lib389/idm/services.py +++ b/src/lib389/lib389/idm/services.py @@ -25,12 +25,12 @@ class ServiceAccount(DSLdapObject): self._protected = False class ServiceAccounts(DSLdapObjects): - def __init__(self, instance, basedn, batch=False): + def __init__(self, instance, basedn, batch=False, rdn='ou=Services'): super(ServiceAccounts, self).__init__(instance, batch) self._objectclasses = [ 'netscapeServer', ] self._filterattrs = [RDN] self._childobject = ServiceAccount - self._basedn = 'ou=Services,%s' % basedn + self._basedn = '{},{}'.format(rdn, basedn) diff --git a/src/lib389/lib389/idm/user.py b/src/lib389/lib389/idm/user.py index 5b7e4be9a..d422ed993 100644 --- a/src/lib389/lib389/idm/user.py +++ b/src/lib389/lib389/idm/user.py @@ -48,7 +48,7 @@ class UserAccount(DSLdapObject): # Can't I actually just set, and it will hash? class UserAccounts(DSLdapObjects): - def __init__(self, instance, basedn, batch=False): + def __init__(self, instance, basedn, batch=False, rdn='ou=People'): super(UserAccounts, self).__init__(instance, batch) self._objectclasses = [ 'account', @@ -59,5 +59,5 @@ class UserAccounts(DSLdapObjects): ] self._filterattrs = [RDN] self._childobject = UserAccount - self._basedn = 'ou=People,%s' % basedn + self._basedn = '{},{}'.format(rdn, basedn) diff --git a/src/lib389/lib389/tests/cli/adm_instance.py b/src/lib389/lib389/tests/cli/adm_instance_test.py similarity index 100% rename from src/lib389/lib389/tests/cli/adm_instance.py rename to src/lib389/lib389/tests/cli/adm_instance_test.py diff --git a/src/lib389/lib389/tests/cli/conf_backend.py b/src/lib389/lib389/tests/cli/conf_backend_test.py similarity index 100% rename from src/lib389/lib389/tests/cli/conf_backend.py rename to src/lib389/lib389/tests/cli/conf_backend_test.py diff --git a/src/lib389/lib389/tests/cli/conf_plugin.py b/src/lib389/lib389/tests/cli/conf_plugin_test.py similarity index 100% rename from src/lib389/lib389/tests/cli/conf_plugin.py rename to src/lib389/lib389/tests/cli/conf_plugin_test.py diff --git a/src/lib389/lib389/tests/idm/services.py b/src/lib389/lib389/tests/idm/services_test.py similarity index 100% rename from src/lib389/lib389/tests/idm/services.py rename to src/lib389/lib389/tests/idm/services_test.py diff --git a/src/lib389/lib389/tests/idm/users_and_groups.py b/src/lib389/lib389/tests/idm/user_and_group_test.py similarity index 100% rename from src/lib389/lib389/tests/idm/users_and_groups.py rename to src/lib389/lib389/tests/idm/user_and_group_test.py
0
0a71c3db0f5a6a09f0dd3d736cd173aefe582613
389ds/389-ds-base
Issue: 51070 - Port Import TET module to python3 part2 Bug Description: Port Import TET module to python3 part2 Fixes: https://pagure.io/389-ds-base/issue/51070 Author: aborah Reviewed by: Viktor Ashirov
commit 0a71c3db0f5a6a09f0dd3d736cd173aefe582613 Author: Anuj Borah <[email protected]> Date: Fri May 22 17:02:05 2020 +0530 Issue: 51070 - Port Import TET module to python3 part2 Bug Description: Port Import TET module to python3 part2 Fixes: https://pagure.io/389-ds-base/issue/51070 Author: aborah Reviewed by: Viktor Ashirov diff --git a/dirsrvtests/tests/suites/import/import_test.py b/dirsrvtests/tests/suites/import/import_test.py index 1e4872dad..a9dcc7e14 100644 --- a/dirsrvtests/tests/suites/import/import_test.py +++ b/dirsrvtests/tests/suites/import/import_test.py @@ -13,7 +13,7 @@ Will test Import (Offline/Online) import os import pytest import time -import re +import shutil import glob import os from lib389.topologies import topology_st as topo @@ -22,6 +22,7 @@ from lib389.dbgen import dbgen_users from lib389.tasks import ImportTask from lib389.index import Indexes from lib389.monitor import Monitor +from lib389.backend import Backends from lib389.config import LDBMConfig from lib389.utils import ds_is_newer from lib389.idm.user import UserAccount, UserAccounts @@ -64,7 +65,9 @@ def _import_clean(request, topo): ldif_dir = topo.standalone.get_ldif_dir() import_ldif = ldif_dir + '/basic_import.ldif' - os.remove(import_ldif) + if os.path.exists(import_ldif): + os.remove(import_ldif) + request.addfinalizer(finofaci) @@ -116,6 +119,28 @@ def _import_online(topo, no_no): _search_for_user(topo, no_no) +def _create_bogus_ldif(topo): + """ + Will create bogus ldifs + """ + ldif_dir = topo.standalone.get_ldif_dir() + line1 = r'dn: cn=Eladio \"A\"\, Santabarbara\, (A\, B\, C),ou=Accounting, dc=example,dc=com' + line2 = """objectClass: top + objectClass: person + objectClass: organizationalPerson + objectClass: inetOrgPerson + cn: Eladio "A", Santabarbara, (A, B, C) + cn: Eladio Santabarbara + sn: Santabarbara + givenName: Eladio + ou: Accounting""" + with open(f'{ldif_dir}/bogus.dif', 'w') as out: + out.write(f'{line1}{line2}') + out.close() + import_ldif1 = ldif_dir + '/bogus.ldif' + return import_ldif1 + + def test_import_with_index(topo, _import_clean): """ Add an index, then import via cn=tasks @@ -168,6 +193,170 @@ def test_crash_on_ldif2db(topo, _import_clean): _import_offline(topo, 5) [email protected] +def test_ldif2db_allows_entries_without_a_parent_to_be_imported(topo, _import_clean): + """Should reject import of entries that's missing parent suffix + + :id: 27195cea-9c0e-11ea-800b-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Import the offending LDIF data - offline + 2. Violates schema, ending line + :expected results: + 1. Operation successful + 2. Operation Fail + """ + import_ldif1 = _create_bogus_ldif(topo) + # Import the offending LDIF data - offline + topo.standalone.stop() + topo.standalone.ldif2db('userRoot', None, None, None, import_ldif1) + # which violates schema, ending line + topo.standalone.searchErrorsLog('import_producer - import userRoot: Skipping entry ' + '"dc=example,dc=com" which violates schema') + topo.standalone.start() + + +def test_issue_a_warning_if_the_cache_size_is_smaller(topo, _import_clean): + """Report during startup if nsslapd-cachememsize is too small + + :id: 1aa8cbda-9c0e-11ea-9297-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Set nsslapd-cache-autosize to 0 + 2. Change cachememsize + 3. Check that cachememsize is sufficiently small + 4. Import some users to make id2entry.db big + 5. Warning message should be there in error logs + :expected results: + 1. Operation successful + 2. Operation successful + 3. Operation successful + 4. Operation successful + 5. Operation successful + """ + config = LDBMConfig(topo.standalone) + backend = Backends(topo.standalone).list()[0] + # Set nsslapd-cache-autosize to 0 + config.replace('nsslapd-cache-autosize', '0') + # Change cachememsize + backend.replace('nsslapd-cachememsize', '1') + # Check that cachememsize is sufficiently small + assert int(backend.get_attr_val_utf8('nsslapd-cachememsize')) < 1500000 + # Import some users to make id2entry.db big + _import_offline(topo, 20) + # warning message should look like + assert topo.standalone.searchErrorsLog('INFO - ldbm_instance_config_cachememsize_set - ' + 'force a minimal value 512000') + + [email protected](scope="function") +def _toggle_private_import_mem(request, topo): + config = LDBMConfig(topo.standalone) + config.replace_many( + ('nsslapd-db-private-import-mem', 'on'), + ('nsslapd-import-cache-autosize', '0')) + + def finofaci(): + # nsslapd-import-cache-autosize: off and + # nsslapd-db-private-import-mem: off + config.replace_many( + ('nsslapd-db-private-import-mem', 'off')) + request.addfinalizer(finofaci) + + +def test_fast_slow_import(topo, _toggle_private_import_mem, _import_clean): + """With nsslapd-db-private-import-mem: on is faster import. + + :id: 3044331c-9c0e-11ea-ac9f-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Let's set nsslapd-db-private-import-mem:on, nsslapd-import-cache-autosize: 0 + 2. Measure offline import time duration total_time1 + 3. Now nsslapd-db-private-import-mem:off + 4. Measure offline import time duration total_time2 + 5. total_time1 < total_time2 + 6. Set nsslapd-db-private-import-mem:on, nsslapd-import-cache-autosize: -1 + 7. Measure offline import time duration total_time1 + 8. Now nsslapd-db-private-import-mem:off + 9. Measure offline import time duration total_time2 + 10. total_time1 < total_time2 + :expected results: + 1. Operation successful + 2. Operation successful + 3. Operation successful + 4. Operation successful + 5. Operation successful + 6. Operation successful + 7. Operation successful + 8. Operation successful + 9. Operation successful + 10. Operation successful + """ + # Let's set nsslapd-db-private-import-mem:on, nsslapd-import-cache-autosize: 0 + config = LDBMConfig(topo.standalone) + # Measure offline import time duration total_time1 + total_time1 = _import_offline(topo, 20) + # Now nsslapd-db-private-import-mem:off + config.replace('nsslapd-db-private-import-mem', 'off') + accounts = Accounts(topo.standalone, DEFAULT_SUFFIX) + for i in accounts.filter('(uid=*)'): + UserAccount(topo.standalone, i.dn).delete() + # Measure offline import time duration total_time2 + total_time2 = _import_offline(topo, 20) + # total_time1 < total_time2 + assert total_time1 < total_time2 + # Set nsslapd-db-private-import-mem:on, nsslapd-import-cache-autosize: -1 + config.replace_many( + ('nsslapd-db-private-import-mem', 'on'), + ('nsslapd-import-cache-autosize', '-1')) + for i in accounts.filter('(uid=*)'): + UserAccount(topo.standalone, i.dn).delete() + # Measure offline import time duration total_time1 + total_time1 = _import_offline(topo, 20) + # Now nsslapd-db-private-import-mem:off + config.replace('nsslapd-db-private-import-mem', 'off') + for i in accounts.filter('(uid=*)'): + UserAccount(topo.standalone, i.dn).delete() + # Measure offline import time duration total_time2 + total_time2 = _import_offline(topo, 20) + # total_time1 < total_time2 + assert total_time1 < total_time2 + + [email protected] +def test_entry_with_escaped_characters_fails_to_import_and_index(topo, _import_clean): + """If missing entry_id is found, skip it and continue reading the primary db to be re indexed. + + :id: 358c938c-9c0e-11ea-adbc-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Import the example data from ldif. + 2. Remove some of the other entries that were successfully imported. + 3. Now re-index the database. + 4. Should not return error. + :expected results: + 1. Operation successful + 2. Operation successful + 3. Operation successful + 4. Operation successful + """ + # Import the example data from ldif + _import_offline(topo, 10) + count = 0 + # Remove some of the other entries that were successfully imported + for user1 in [user for user in Accounts(topo.standalone, DEFAULT_SUFFIX).list() if user.dn.startswith('uid')]: + if count <= 2: + UserAccount(topo.standalone, user1.dn).delete() + count += 1 + # Now re-index the database + topo.standalone.stop() + topo.standalone.db2index() + topo.standalone.start() + # Should not return error. + assert not topo.standalone.searchErrorsLog('error') + assert not topo.standalone.searchErrorsLog('foreman fifo error') + + if __name__ == '__main__': # Run isolated # -s for DEBUG mode
0
06e96874bbd844d48d775293ca8a891ed0108d28
389ds/389-ds-base
Issue 5534 - Add copyright text to the repository files Description: We need to have copyright texts around our files and some of it is missing. This commit adds the copyright to tests and lib389. Also, add an automatic generator in the create_test.py script. Fixes: https://github.com/389ds/389-ds-base/issues/5534 Reviewed by: @mreynolds389, @progier389 (Thanks!)
commit 06e96874bbd844d48d775293ca8a891ed0108d28 Author: Simon Pichugin <[email protected]> Date: Thu Nov 17 08:31:50 2022 -0800 Issue 5534 - Add copyright text to the repository files Description: We need to have copyright texts around our files and some of it is missing. This commit adds the copyright to tests and lib389. Also, add an automatic generator in the create_test.py script. Fixes: https://github.com/389ds/389-ds-base/issues/5534 Reviewed by: @mreynolds389, @progier389 (Thanks!) diff --git a/dirsrvtests/check_for_duplicate_ids.py b/dirsrvtests/check_for_duplicate_ids.py index 3b7b24327..55ae97bc7 100644 --- a/dirsrvtests/check_for_duplicate_ids.py +++ b/dirsrvtests/check_for_duplicate_ids.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# import os import subprocess import sys diff --git a/dirsrvtests/conftest.py b/dirsrvtests/conftest.py index d173de39a..cd1a969d5 100644 --- a/dirsrvtests/conftest.py +++ b/dirsrvtests/conftest.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# import subprocess import logging import pytest diff --git a/dirsrvtests/create_test.py b/dirsrvtests/create_test.py index ad87c3b0b..5331e7bac 100755 --- a/dirsrvtests/create_test.py +++ b/dirsrvtests/create_test.py @@ -12,6 +12,7 @@ import argparse, argcomplete import argcomplete +import datetime import optparse import os import re @@ -34,7 +35,7 @@ def display_usage(): '-s|--suite <suite name> ' + '[ i|--instances <number of standalone instances> ' + '[ -m|--suppliers <number of suppliers> -h|--hubs <number of hubs> ' + - '-c|--consumers <number of consumers> ] -o|--outputfile ]\n') + '-c|--consumers <number of consumers> ] -o|--outputfile -C|--copyright <name of the entity>]\n') print('If only "-t" is provided then a single standalone instance is ' + 'created. Or you can create a test suite script using ' + '"-s|--suite" instead of using "-t|--ticket". The "-i" option ' + @@ -158,6 +159,7 @@ if len(sys.argv) > 0: parser.add_argument('-o', '--filename', default=None, help="Custom test script file name") parser.add_argument('-u', '--uuid', action='store_true', help="Display a test case uuid to used for new test functions in script") + parser.add_argument('-C', '--copyright', default="Red Hat, Inc.", help="Add a copyright section in the beginning of the file") argcomplete.autocomplete(parser) args = parser.parse_args() @@ -242,6 +244,20 @@ if len(sys.argv) > 0: print("Can\'t open file:", filename) exit(1) + # Write the copyright section + if args.copyright: + today = datetime.date.today() + current_year = today.year + + TEST.write('# --- BEGIN COPYRIGHT BLOCK ---\n') + TEST.write('# Copyright (C) {} {}\n'.format(current_year, args.copyright)) + TEST.write('# All rights reserved.\n') + TEST.write('#\n') + TEST.write('# License: GPL (version 3 or any later version).\n') + TEST.write('# See LICENSE for details.\n') + TEST.write('# --- END COPYRIGHT BLOCK ---\n') + TEST.write('#\n') + # Write the imports if my_topology[0]: topology_import = 'from lib389.topologies import {} as topo\n'.format(my_topology[1]) diff --git a/dirsrvtests/tests/stress/reliabilty/reliab_conn_test.py b/dirsrvtests/tests/stress/reliabilty/reliab_conn_test.py index 70b73a257..35e377f33 100644 --- a/dirsrvtests/tests/stress/reliabilty/reliab_conn_test.py +++ b/dirsrvtests/tests/stress/reliabilty/reliab_conn_test.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# import os import sys import time diff --git a/dirsrvtests/tests/stress/replication/mmr_01_4m-2h-4c_test.py b/dirsrvtests/tests/stress/replication/mmr_01_4m-2h-4c_test.py index 94896bc38..e6e959cf4 100644 --- a/dirsrvtests/tests/stress/replication/mmr_01_4m-2h-4c_test.py +++ b/dirsrvtests/tests/stress/replication/mmr_01_4m-2h-4c_test.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# import os import sys import time diff --git a/dirsrvtests/tests/stress/replication/mmr_01_4m_test.py b/dirsrvtests/tests/stress/replication/mmr_01_4m_test.py index 1a7521a73..c9fd71ed2 100644 --- a/dirsrvtests/tests/stress/replication/mmr_01_4m_test.py +++ b/dirsrvtests/tests/stress/replication/mmr_01_4m_test.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# import os import sys import time diff --git a/dirsrvtests/tests/suites/automember_plugin/automember_abort_test.py b/dirsrvtests/tests/suites/automember_plugin/automember_abort_test.py index af1990ab5..98f4f2376 100644 --- a/dirsrvtests/tests/suites/automember_plugin/automember_abort_test.py +++ b/dirsrvtests/tests/suites/automember_plugin/automember_abort_test.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# import logging import pytest import os diff --git a/dirsrvtests/tests/suites/automember_plugin/automember_mod_test.py b/dirsrvtests/tests/suites/automember_plugin/automember_mod_test.py index 13d96f324..8d25384bf 100644 --- a/dirsrvtests/tests/suites/automember_plugin/automember_mod_test.py +++ b/dirsrvtests/tests/suites/automember_plugin/automember_mod_test.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# import logging import pytest import os diff --git a/dirsrvtests/tests/suites/automember_plugin/automember_test.py b/dirsrvtests/tests/suites/automember_plugin/automember_test.py index b34747e09..e1976bd78 100644 --- a/dirsrvtests/tests/suites/automember_plugin/automember_test.py +++ b/dirsrvtests/tests/suites/automember_plugin/automember_test.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2017 alisha17 <[email protected]> +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# import logging import pytest import os diff --git a/dirsrvtests/tests/suites/backups/backup_test.py b/dirsrvtests/tests/suites/backups/backup_test.py index 4319ae70e..11e60c760 100644 --- a/dirsrvtests/tests/suites/backups/backup_test.py +++ b/dirsrvtests/tests/suites/backups/backup_test.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# import logging import pytest import os diff --git a/dirsrvtests/tests/suites/clu/dsctl_tls_test.py b/dirsrvtests/tests/suites/clu/dsctl_tls_test.py index 4910448c5..d292b7f26 100644 --- a/dirsrvtests/tests/suites/clu/dsctl_tls_test.py +++ b/dirsrvtests/tests/suites/clu/dsctl_tls_test.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# import logging import pytest import ssl diff --git a/dirsrvtests/tests/suites/clu/dsrc_test.py b/dirsrvtests/tests/suites/clu/dsrc_test.py index 1b27700ec..d238463a6 100644 --- a/dirsrvtests/tests/suites/clu/dsrc_test.py +++ b/dirsrvtests/tests/suites/clu/dsrc_test.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# import logging import pytest import os diff --git a/dirsrvtests/tests/suites/clu/schema_test.py b/dirsrvtests/tests/suites/clu/schema_test.py index 6ba541548..d210a2c8f 100644 --- a/dirsrvtests/tests/suites/clu/schema_test.py +++ b/dirsrvtests/tests/suites/clu/schema_test.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# import logging import pytest import os diff --git a/dirsrvtests/tests/suites/config/compact_test.py b/dirsrvtests/tests/suites/config/compact_test.py index 4bbffb748..2ddae0fa9 100644 --- a/dirsrvtests/tests/suites/config/compact_test.py +++ b/dirsrvtests/tests/suites/config/compact_test.py @@ -5,7 +5,11 @@ # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- +<<<<<<< HEAD +======= +# +>>>>>>> 4a5f6a8c2 (Issue 5534 - Add copyright text to the repository files) import logging import pytest import os diff --git a/dirsrvtests/tests/suites/get_effective_rights/acceptance_test.py b/dirsrvtests/tests/suites/get_effective_rights/acceptance_test.py index 139f9966c..bec9110ed 100644 --- a/dirsrvtests/tests/suites/get_effective_rights/acceptance_test.py +++ b/dirsrvtests/tests/suites/get_effective_rights/acceptance_test.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# import pytest import logging from ldap.controls import GetEffectiveRightsControl diff --git a/dirsrvtests/tests/suites/ldapi/ldapi_test.py b/dirsrvtests/tests/suites/ldapi/ldapi_test.py index 0dfa57aeb..a101f1de5 100644 --- a/dirsrvtests/tests/suites/ldapi/ldapi_test.py +++ b/dirsrvtests/tests/suites/ldapi/ldapi_test.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# import logging import pytest import os diff --git a/dirsrvtests/tests/suites/lib389/config_compare_test.py b/dirsrvtests/tests/suites/lib389/config_compare_test.py index 84f55acfa..9ed4da6c8 100644 --- a/dirsrvtests/tests/suites/lib389/config_compare_test.py +++ b/dirsrvtests/tests/suites/lib389/config_compare_test.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 William Brown <[email protected]> +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# import os import pytest diff --git a/dirsrvtests/tests/suites/lib389/idm/user_compare_i2_test.py b/dirsrvtests/tests/suites/lib389/idm/user_compare_i2_test.py index ccde0f6b0..f2e270e54 100644 --- a/dirsrvtests/tests/suites/lib389/idm/user_compare_i2_test.py +++ b/dirsrvtests/tests/suites/lib389/idm/user_compare_i2_test.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# import os import pytest from lib389._constants import DEFAULT_SUFFIX diff --git a/dirsrvtests/tests/suites/lib389/idm/user_compare_m2Repl_test.py b/dirsrvtests/tests/suites/lib389/idm/user_compare_m2Repl_test.py index bf8175ae0..54e104259 100644 --- a/dirsrvtests/tests/suites/lib389/idm/user_compare_m2Repl_test.py +++ b/dirsrvtests/tests/suites/lib389/idm/user_compare_m2Repl_test.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 William Brown <[email protected]> +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# import os import pytest from lib389._constants import DEFAULT_SUFFIX diff --git a/dirsrvtests/tests/suites/lib389/idm/user_compare_st_test.py b/dirsrvtests/tests/suites/lib389/idm/user_compare_st_test.py index 4703bb4fc..d1db545b6 100644 --- a/dirsrvtests/tests/suites/lib389/idm/user_compare_st_test.py +++ b/dirsrvtests/tests/suites/lib389/idm/user_compare_st_test.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# import os import pytest from lib389._constants import DEFAULT_SUFFIX diff --git a/dirsrvtests/tests/suites/logging/logging_config_test.py b/dirsrvtests/tests/suites/logging/logging_config_test.py index 94bb884a2..c9b630c69 100644 --- a/dirsrvtests/tests/suites/logging/logging_config_test.py +++ b/dirsrvtests/tests/suites/logging/logging_config_test.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# import logging import pytest import os diff --git a/dirsrvtests/tests/suites/mapping_tree/be_del_and_default_naming_attr_test.py b/dirsrvtests/tests/suites/mapping_tree/be_del_and_default_naming_attr_test.py index 18928724c..d71d2a81d 100644 --- a/dirsrvtests/tests/suites/mapping_tree/be_del_and_default_naming_attr_test.py +++ b/dirsrvtests/tests/suites/mapping_tree/be_del_and_default_naming_attr_test.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# import logging import pytest import os diff --git a/dirsrvtests/tests/suites/memberof_plugin/fixup_test.py b/dirsrvtests/tests/suites/memberof_plugin/fixup_test.py index 9566e144c..7681cdc58 100644 --- a/dirsrvtests/tests/suites/memberof_plugin/fixup_test.py +++ b/dirsrvtests/tests/suites/memberof_plugin/fixup_test.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# import ldap import logging import pytest diff --git a/dirsrvtests/tests/suites/monitor/monitor_test.py b/dirsrvtests/tests/suites/monitor/monitor_test.py index 8684bfac3..0f9d551db 100644 --- a/dirsrvtests/tests/suites/monitor/monitor_test.py +++ b/dirsrvtests/tests/suites/monitor/monitor_test.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# import logging import pytest import os diff --git a/dirsrvtests/tests/suites/password/pw_expired_access_test.py b/dirsrvtests/tests/suites/password/pw_expired_access_test.py index fb0afb190..c83b6a472 100644 --- a/dirsrvtests/tests/suites/password/pw_expired_access_test.py +++ b/dirsrvtests/tests/suites/password/pw_expired_access_test.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# import ldap import logging import pytest diff --git a/dirsrvtests/tests/suites/password/pwdPolicy_controls_test.py b/dirsrvtests/tests/suites/password/pwdPolicy_controls_test.py index ab1974c02..0015a1fb5 100644 --- a/dirsrvtests/tests/suites/password/pwdPolicy_controls_test.py +++ b/dirsrvtests/tests/suites/password/pwdPolicy_controls_test.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# import logging import pytest import os diff --git a/dirsrvtests/tests/suites/password/pwdPolicy_logging_test.py b/dirsrvtests/tests/suites/password/pwdPolicy_logging_test.py index 6754fc0f3..cd19cc9e4 100644 --- a/dirsrvtests/tests/suites/password/pwdPolicy_logging_test.py +++ b/dirsrvtests/tests/suites/password/pwdPolicy_logging_test.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# import ldap import logging import pytest diff --git a/dirsrvtests/tests/suites/password/pwdPolicy_token_test.py b/dirsrvtests/tests/suites/password/pwdPolicy_token_test.py index bf495063d..ae4eb300f 100644 --- a/dirsrvtests/tests/suites/password/pwdPolicy_token_test.py +++ b/dirsrvtests/tests/suites/password/pwdPolicy_token_test.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# import logging import pytest import os diff --git a/dirsrvtests/tests/suites/plugins/deref_aci_test.py b/dirsrvtests/tests/suites/plugins/deref_aci_test.py index ee64ff19f..acb89ea81 100644 --- a/dirsrvtests/tests/suites/plugins/deref_aci_test.py +++ b/dirsrvtests/tests/suites/plugins/deref_aci_test.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# import os import logging import pytest diff --git a/dirsrvtests/tests/suites/replication/changelog_encryption_test.py b/dirsrvtests/tests/suites/replication/changelog_encryption_test.py index 892150d50..dc16fcee7 100644 --- a/dirsrvtests/tests/suites/replication/changelog_encryption_test.py +++ b/dirsrvtests/tests/suites/replication/changelog_encryption_test.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# import logging import pytest import os diff --git a/dirsrvtests/tests/suites/replication/changelog_trimming_test.py b/dirsrvtests/tests/suites/replication/changelog_trimming_test.py index 0c4f2e3e2..ea8a68854 100644 --- a/dirsrvtests/tests/suites/replication/changelog_trimming_test.py +++ b/dirsrvtests/tests/suites/replication/changelog_trimming_test.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# import logging import pytest import os diff --git a/dirsrvtests/tests/suites/replication/multiple_changelogs_test.py b/dirsrvtests/tests/suites/replication/multiple_changelogs_test.py index 93ba8b7ab..ea24c7acd 100644 --- a/dirsrvtests/tests/suites/replication/multiple_changelogs_test.py +++ b/dirsrvtests/tests/suites/replication/multiple_changelogs_test.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# import ldap import logging import pytest diff --git a/dirsrvtests/tests/suites/replication/promote_demote_test.py b/dirsrvtests/tests/suites/replication/promote_demote_test.py index 040fff3ba..c72e93287 100644 --- a/dirsrvtests/tests/suites/replication/promote_demote_test.py +++ b/dirsrvtests/tests/suites/replication/promote_demote_test.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# import logging import pytest import os diff --git a/dirsrvtests/tests/suites/replication/repl_agmt_bootstrap_test.py b/dirsrvtests/tests/suites/replication/repl_agmt_bootstrap_test.py index fbd126960..0697cdc8d 100644 --- a/dirsrvtests/tests/suites/replication/repl_agmt_bootstrap_test.py +++ b/dirsrvtests/tests/suites/replication/repl_agmt_bootstrap_test.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# import logging import pytest import os diff --git a/dirsrvtests/tests/suites/replication/replica_config_test.py b/dirsrvtests/tests/suites/replication/replica_config_test.py index e3c1d7ff4..9e8146f4c 100644 --- a/dirsrvtests/tests/suites/replication/replica_config_test.py +++ b/dirsrvtests/tests/suites/replication/replica_config_test.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# import logging import pytest import copy diff --git a/dirsrvtests/tests/suites/resource_limits/fdlimits_test.py b/dirsrvtests/tests/suites/resource_limits/fdlimits_test.py index cdba156b6..2d8cd9ac9 100644 --- a/dirsrvtests/tests/suites/resource_limits/fdlimits_test.py +++ b/dirsrvtests/tests/suites/resource_limits/fdlimits_test.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# import logging import pytest import os diff --git a/dirsrvtests/tests/suites/retrocl/retrocl_indexing_test.py b/dirsrvtests/tests/suites/retrocl/retrocl_indexing_test.py index b1dfe962c..1e9ee27c7 100644 --- a/dirsrvtests/tests/suites/retrocl/retrocl_indexing_test.py +++ b/dirsrvtests/tests/suites/retrocl/retrocl_indexing_test.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# import logging import pytest import os diff --git a/dirsrvtests/tests/suites/rewriters/adfilter_test.py b/dirsrvtests/tests/suites/rewriters/adfilter_test.py index 5bef46ef3..369bdfe08 100644 --- a/dirsrvtests/tests/suites/rewriters/adfilter_test.py +++ b/dirsrvtests/tests/suites/rewriters/adfilter_test.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# import pytest import glob import base64 diff --git a/dirsrvtests/tests/suites/rewriters/basic_test.py b/dirsrvtests/tests/suites/rewriters/basic_test.py index db86c88f9..9e0b5e473 100644 --- a/dirsrvtests/tests/suites/rewriters/basic_test.py +++ b/dirsrvtests/tests/suites/rewriters/basic_test.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# import pytest import glob from lib389.tasks import * diff --git a/dirsrvtests/tests/suites/state/mmt_state_test.py b/dirsrvtests/tests/suites/state/mmt_state_test.py index 49fb3354d..7f10b4bdf 100644 --- a/dirsrvtests/tests/suites/state/mmt_state_test.py +++ b/dirsrvtests/tests/suites/state/mmt_state_test.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# import os import logging import ldap diff --git a/dirsrvtests/tests/suites/tls/cipher_test.py b/dirsrvtests/tests/suites/tls/cipher_test.py index 872396179..523e2bd20 100644 --- a/dirsrvtests/tests/suites/tls/cipher_test.py +++ b/dirsrvtests/tests/suites/tls/cipher_test.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# import pytest import os from lib389.config import Encryption diff --git a/dirsrvtests/tests/suites/tls/ecdsa_test.py b/dirsrvtests/tests/suites/tls/ecdsa_test.py index 7b7b3a7b1..df8897748 100644 --- a/dirsrvtests/tests/suites/tls/ecdsa_test.py +++ b/dirsrvtests/tests/suites/tls/ecdsa_test.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# import logging import pytest import os diff --git a/dirsrvtests/tests/suites/tls/ssl_version_test.py b/dirsrvtests/tests/suites/tls/ssl_version_test.py index 28fcff593..f0dde0803 100644 --- a/dirsrvtests/tests/suites/tls/ssl_version_test.py +++ b/dirsrvtests/tests/suites/tls/ssl_version_test.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# import logging import pytest import os diff --git a/dirsrvtests/tests/suites/upgrade/upgrade_repl_plugin_test.py b/dirsrvtests/tests/suites/upgrade/upgrade_repl_plugin_test.py index 31c20b700..ac140b0ce 100644 --- a/dirsrvtests/tests/suites/upgrade/upgrade_repl_plugin_test.py +++ b/dirsrvtests/tests/suites/upgrade/upgrade_repl_plugin_test.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# import ldap import logging import pytest diff --git a/dirsrvtests/tests/tickets/ticket47931_test.py b/dirsrvtests/tests/tickets/ticket47931_test.py index 238799263..03d5c7106 100644 --- a/dirsrvtests/tests/tickets/ticket47931_test.py +++ b/dirsrvtests/tests/tickets/ticket47931_test.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# import threading import time import pytest diff --git a/dirsrvtests/tests/tickets/ticket47976_test.py b/dirsrvtests/tests/tickets/ticket47976_test.py index 47ea709fc..efdaeb1fe 100644 --- a/dirsrvtests/tests/tickets/ticket47976_test.py +++ b/dirsrvtests/tests/tickets/ticket47976_test.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# import pytest from lib389.tasks import * from lib389.utils import * diff --git a/dirsrvtests/tests/tickets/ticket48212_test.py b/dirsrvtests/tests/tickets/ticket48212_test.py index ee58a82fe..3746859dc 100644 --- a/dirsrvtests/tests/tickets/ticket48212_test.py +++ b/dirsrvtests/tests/tickets/ticket48212_test.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# import pytest from lib389.tasks import * from lib389.utils import * diff --git a/dirsrvtests/tests/tickets/ticket48214_test.py b/dirsrvtests/tests/tickets/ticket48214_test.py index a2e71f53d..1d15239ef 100644 --- a/dirsrvtests/tests/tickets/ticket48214_test.py +++ b/dirsrvtests/tests/tickets/ticket48214_test.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# import logging import pytest diff --git a/dirsrvtests/tests/tickets/ticket48233_test.py b/dirsrvtests/tests/tickets/ticket48233_test.py index 3eee70fe7..6c6c504c1 100644 --- a/dirsrvtests/tests/tickets/ticket48233_test.py +++ b/dirsrvtests/tests/tickets/ticket48233_test.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# import pytest from lib389.utils import * from lib389.topologies import topology_st diff --git a/dirsrvtests/tests/tickets/ticket48266_test.py b/dirsrvtests/tests/tickets/ticket48266_test.py index 740f80c64..5c033c0f3 100644 --- a/dirsrvtests/tests/tickets/ticket48266_test.py +++ b/dirsrvtests/tests/tickets/ticket48266_test.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# import pytest from lib389.tasks import * from lib389.utils import * diff --git a/dirsrvtests/tests/tickets/ticket48270_test.py b/dirsrvtests/tests/tickets/ticket48270_test.py index 3dcbfbff3..3d1e7fffa 100644 --- a/dirsrvtests/tests/tickets/ticket48270_test.py +++ b/dirsrvtests/tests/tickets/ticket48270_test.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# import pytest from lib389.tasks import * from lib389.utils import * diff --git a/dirsrvtests/tests/tickets/ticket48272_test.py b/dirsrvtests/tests/tickets/ticket48272_test.py index 5d79d2817..35aba7e14 100644 --- a/dirsrvtests/tests/tickets/ticket48272_test.py +++ b/dirsrvtests/tests/tickets/ticket48272_test.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# import pytest from lib389.tasks import * from lib389.utils import * diff --git a/dirsrvtests/tests/tickets/ticket48312_test.py b/dirsrvtests/tests/tickets/ticket48312_test.py index d7da6a5ed..0403a025c 100644 --- a/dirsrvtests/tests/tickets/ticket48312_test.py +++ b/dirsrvtests/tests/tickets/ticket48312_test.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# import pytest from lib389.tasks import * from lib389.utils import * diff --git a/dirsrvtests/tests/tickets/ticket48325_test.py b/dirsrvtests/tests/tickets/ticket48325_test.py index 672d2b211..a1d89cd58 100644 --- a/dirsrvtests/tests/tickets/ticket48325_test.py +++ b/dirsrvtests/tests/tickets/ticket48325_test.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# import pytest from lib389.utils import * from lib389.tasks import * diff --git a/dirsrvtests/tests/tickets/ticket48342_test.py b/dirsrvtests/tests/tickets/ticket48342_test.py index fedc30157..3fbab2613 100644 --- a/dirsrvtests/tests/tickets/ticket48342_test.py +++ b/dirsrvtests/tests/tickets/ticket48342_test.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# import pytest from lib389.tasks import * from lib389.utils import * diff --git a/dirsrvtests/tests/tickets/ticket48354_test.py b/dirsrvtests/tests/tickets/ticket48354_test.py index 73cf307e0..34f3c4198 100644 --- a/dirsrvtests/tests/tickets/ticket48354_test.py +++ b/dirsrvtests/tests/tickets/ticket48354_test.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# import pytest from lib389.utils import * from lib389.topologies import topology_st diff --git a/dirsrvtests/tests/tickets/ticket48370_test.py b/dirsrvtests/tests/tickets/ticket48370_test.py index 836888f79..3b5d89e54 100644 --- a/dirsrvtests/tests/tickets/ticket48370_test.py +++ b/dirsrvtests/tests/tickets/ticket48370_test.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# import pytest from lib389.tasks import * from lib389.utils import * diff --git a/dirsrvtests/tests/tickets/ticket48383_test.py b/dirsrvtests/tests/tickets/ticket48383_test.py index 7d52a8f94..3ae53eb44 100644 --- a/dirsrvtests/tests/tickets/ticket48383_test.py +++ b/dirsrvtests/tests/tickets/ticket48383_test.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# import random import string diff --git a/dirsrvtests/tests/tickets/ticket48497_test.py b/dirsrvtests/tests/tickets/ticket48497_test.py index df10b6b12..0e27232ee 100644 --- a/dirsrvtests/tests/tickets/ticket48497_test.py +++ b/dirsrvtests/tests/tickets/ticket48497_test.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# import pytest from lib389.tasks import * from lib389.utils import * diff --git a/dirsrvtests/tests/tickets/ticket48637_test.py b/dirsrvtests/tests/tickets/ticket48637_test.py index d33c861d2..fbb731b2b 100644 --- a/dirsrvtests/tests/tickets/ticket48637_test.py +++ b/dirsrvtests/tests/tickets/ticket48637_test.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# import pytest from lib389.tasks import * from lib389.utils import * diff --git a/dirsrvtests/tests/tickets/ticket48665_test.py b/dirsrvtests/tests/tickets/ticket48665_test.py index 4216a3b1e..1781cd1e1 100644 --- a/dirsrvtests/tests/tickets/ticket48665_test.py +++ b/dirsrvtests/tests/tickets/ticket48665_test.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# import pytest from lib389.utils import * from lib389.topologies import topology_st diff --git a/dirsrvtests/tests/tickets/ticket48745_test.py b/dirsrvtests/tests/tickets/ticket48745_test.py index f0bcaa16f..ce07b34f5 100644 --- a/dirsrvtests/tests/tickets/ticket48745_test.py +++ b/dirsrvtests/tests/tickets/ticket48745_test.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# import pytest from lib389.tasks import * from lib389.utils import * diff --git a/dirsrvtests/tests/tickets/ticket48746_test.py b/dirsrvtests/tests/tickets/ticket48746_test.py index f574c49cb..8b2b72ac0 100644 --- a/dirsrvtests/tests/tickets/ticket48746_test.py +++ b/dirsrvtests/tests/tickets/ticket48746_test.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# import pytest from lib389.tasks import * from lib389.utils import * diff --git a/dirsrvtests/tests/tickets/ticket48798_test.py b/dirsrvtests/tests/tickets/ticket48798_test.py index a5132925c..2c036787e 100644 --- a/dirsrvtests/tests/tickets/ticket48798_test.py +++ b/dirsrvtests/tests/tickets/ticket48798_test.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# from subprocess import check_output import pytest diff --git a/dirsrvtests/tests/tickets/ticket48799_test.py b/dirsrvtests/tests/tickets/ticket48799_test.py index e7efc3b92..a3962206c 100644 --- a/dirsrvtests/tests/tickets/ticket48799_test.py +++ b/dirsrvtests/tests/tickets/ticket48799_test.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# import pytest from lib389.tasks import * from lib389.utils import * diff --git a/dirsrvtests/tests/tickets/ticket48808_test.py b/dirsrvtests/tests/tickets/ticket48808_test.py index 7ac5a76dd..646a9a7f8 100644 --- a/dirsrvtests/tests/tickets/ticket48808_test.py +++ b/dirsrvtests/tests/tickets/ticket48808_test.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# from random import sample import pytest diff --git a/dirsrvtests/tests/tickets/ticket48844_test.py b/dirsrvtests/tests/tickets/ticket48844_test.py index 59e4cbd29..04da4ef7f 100644 --- a/dirsrvtests/tests/tickets/ticket48844_test.py +++ b/dirsrvtests/tests/tickets/ticket48844_test.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# import pytest from lib389.tasks import * from lib389.utils import * diff --git a/dirsrvtests/tests/tickets/ticket48893_test.py b/dirsrvtests/tests/tickets/ticket48893_test.py index 7b811d9ca..f31e26a49 100644 --- a/dirsrvtests/tests/tickets/ticket48893_test.py +++ b/dirsrvtests/tests/tickets/ticket48893_test.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# import pytest from lib389.utils import * from lib389.topologies import topology_st diff --git a/dirsrvtests/tests/tickets/ticket48916_test.py b/dirsrvtests/tests/tickets/ticket48916_test.py index 07ac8cc82..b36305cf0 100644 --- a/dirsrvtests/tests/tickets/ticket48916_test.py +++ b/dirsrvtests/tests/tickets/ticket48916_test.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# import pytest from lib389.tasks import * from lib389.utils import * diff --git a/dirsrvtests/tests/tickets/ticket48956_test.py b/dirsrvtests/tests/tickets/ticket48956_test.py index a2a1b3ad3..53e97d624 100644 --- a/dirsrvtests/tests/tickets/ticket48956_test.py +++ b/dirsrvtests/tests/tickets/ticket48956_test.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# import pytest import subprocess from lib389.tasks import * diff --git a/dirsrvtests/tests/tickets/ticket48973_test.py b/dirsrvtests/tests/tickets/ticket48973_test.py index 5adca3dd2..2fd70045f 100644 --- a/dirsrvtests/tests/tickets/ticket48973_test.py +++ b/dirsrvtests/tests/tickets/ticket48973_test.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# import os import sys import time diff --git a/dirsrvtests/tests/tickets/ticket49008_test.py b/dirsrvtests/tests/tickets/ticket49008_test.py index 1306b723b..2f3e09500 100644 --- a/dirsrvtests/tests/tickets/ticket49008_test.py +++ b/dirsrvtests/tests/tickets/ticket49008_test.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# import pytest from lib389.tasks import * from lib389.utils import * diff --git a/dirsrvtests/tests/tickets/ticket49020_test.py b/dirsrvtests/tests/tickets/ticket49020_test.py index b9652ba9a..f83f0ecb8 100644 --- a/dirsrvtests/tests/tickets/ticket49020_test.py +++ b/dirsrvtests/tests/tickets/ticket49020_test.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# import time import ldap import logging diff --git a/dirsrvtests/tests/tickets/ticket49039_test.py b/dirsrvtests/tests/tickets/ticket49039_test.py index 8938b142f..0313f69a3 100644 --- a/dirsrvtests/tests/tickets/ticket49039_test.py +++ b/dirsrvtests/tests/tickets/ticket49039_test.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# import time import ldap import logging diff --git a/dirsrvtests/tests/tickets/ticket49073_test.py b/dirsrvtests/tests/tickets/ticket49073_test.py index 5ed2255ef..5f75797ff 100644 --- a/dirsrvtests/tests/tickets/ticket49073_test.py +++ b/dirsrvtests/tests/tickets/ticket49073_test.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# import pytest from lib389.tasks import * from lib389.utils import * diff --git a/dirsrvtests/tests/tickets/ticket49076_test.py b/dirsrvtests/tests/tickets/ticket49076_test.py index 74b6312de..ac5ebd5e4 100644 --- a/dirsrvtests/tests/tickets/ticket49076_test.py +++ b/dirsrvtests/tests/tickets/ticket49076_test.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# import time import ldap import logging diff --git a/dirsrvtests/tests/tickets/ticket49095_test.py b/dirsrvtests/tests/tickets/ticket49095_test.py index 3c49fcae4..a8e359890 100644 --- a/dirsrvtests/tests/tickets/ticket49095_test.py +++ b/dirsrvtests/tests/tickets/ticket49095_test.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# import time import ldap import logging diff --git a/dirsrvtests/tests/tickets/ticket49122_test.py b/dirsrvtests/tests/tickets/ticket49122_test.py index 651cd50c9..34f63d1dc 100644 --- a/dirsrvtests/tests/tickets/ticket49122_test.py +++ b/dirsrvtests/tests/tickets/ticket49122_test.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# import time import ldap import logging diff --git a/dirsrvtests/tests/tickets/ticket49184_test.py b/dirsrvtests/tests/tickets/ticket49184_test.py index 4ec78b3a0..d9c5bb39b 100644 --- a/dirsrvtests/tests/tickets/ticket49184_test.py +++ b/dirsrvtests/tests/tickets/ticket49184_test.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# import time import ldap import logging diff --git a/dirsrvtests/tests/tickets/ticket49192_test.py b/dirsrvtests/tests/tickets/ticket49192_test.py index 21be4eb26..0330a8be7 100644 --- a/dirsrvtests/tests/tickets/ticket49192_test.py +++ b/dirsrvtests/tests/tickets/ticket49192_test.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# import time import ldap import logging diff --git a/dirsrvtests/tests/tickets/ticket49227_test.py b/dirsrvtests/tests/tickets/ticket49227_test.py index c828c2d91..a58c62730 100644 --- a/dirsrvtests/tests/tickets/ticket49227_test.py +++ b/dirsrvtests/tests/tickets/ticket49227_test.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# import os import time import ldap diff --git a/dirsrvtests/tests/tickets/ticket49249_test.py b/dirsrvtests/tests/tickets/ticket49249_test.py index 83d2259a8..6d2d2ecae 100644 --- a/dirsrvtests/tests/tickets/ticket49249_test.py +++ b/dirsrvtests/tests/tickets/ticket49249_test.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# import time import ldap import logging diff --git a/dirsrvtests/tests/tickets/ticket49386_test.py b/dirsrvtests/tests/tickets/ticket49386_test.py index 31c9257a6..c6a59ea6a 100644 --- a/dirsrvtests/tests/tickets/ticket49386_test.py +++ b/dirsrvtests/tests/tickets/ticket49386_test.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# import logging import pytest import os diff --git a/dirsrvtests/tests/tickets/ticket49412_test.py b/dirsrvtests/tests/tickets/ticket49412_test.py index 9882820b7..895e02c9f 100644 --- a/dirsrvtests/tests/tickets/ticket49412_test.py +++ b/dirsrvtests/tests/tickets/ticket49412_test.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# import logging import pytest import os diff --git a/dirsrvtests/tests/tickets/ticket49441_test.py b/dirsrvtests/tests/tickets/ticket49441_test.py index 7beda62bc..39cf881c7 100644 --- a/dirsrvtests/tests/tickets/ticket49441_test.py +++ b/dirsrvtests/tests/tickets/ticket49441_test.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# import logging import pytest import os diff --git a/dirsrvtests/tests/tickets/ticket49460_test.py b/dirsrvtests/tests/tickets/ticket49460_test.py index 0132adaa2..224969f75 100644 --- a/dirsrvtests/tests/tickets/ticket49460_test.py +++ b/dirsrvtests/tests/tickets/ticket49460_test.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# import time import ldap import logging diff --git a/dirsrvtests/tests/tickets/ticket49463_test.py b/dirsrvtests/tests/tickets/ticket49463_test.py index e1b35abd1..d924ec434 100644 --- a/dirsrvtests/tests/tickets/ticket49463_test.py +++ b/dirsrvtests/tests/tickets/ticket49463_test.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# import time import ldap import logging diff --git a/dirsrvtests/tests/tickets/ticket49471_test.py b/dirsrvtests/tests/tickets/ticket49471_test.py index c75d7a788..058a74160 100644 --- a/dirsrvtests/tests/tickets/ticket49471_test.py +++ b/dirsrvtests/tests/tickets/ticket49471_test.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# import logging import pytest import os diff --git a/dirsrvtests/tests/tickets/ticket49540_test.py b/dirsrvtests/tests/tickets/ticket49540_test.py index 74bb7db3b..5711eee25 100644 --- a/dirsrvtests/tests/tickets/ticket49540_test.py +++ b/dirsrvtests/tests/tickets/ticket49540_test.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# import logging import pytest import os diff --git a/dirsrvtests/tests/tickets/ticket49658_test.py b/dirsrvtests/tests/tickets/ticket49658_test.py index f9c4a9cfc..c3a1db43f 100644 --- a/dirsrvtests/tests/tickets/ticket49658_test.py +++ b/dirsrvtests/tests/tickets/ticket49658_test.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# import logging import pytest import os diff --git a/dirsrvtests/tests/tickets/ticket49788_test.py b/dirsrvtests/tests/tickets/ticket49788_test.py index 663795726..b755d22c4 100644 --- a/dirsrvtests/tests/tickets/ticket49788_test.py +++ b/dirsrvtests/tests/tickets/ticket49788_test.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2018 Dj Padzensky <[email protected]> +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# import logging import time diff --git a/dirsrvtests/tests/tickets/ticket50078_test.py b/dirsrvtests/tests/tickets/ticket50078_test.py index a03e5720c..96362f9b9 100644 --- a/dirsrvtests/tests/tickets/ticket50078_test.py +++ b/dirsrvtests/tests/tickets/ticket50078_test.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# import pytest from lib389.utils import * from lib389.topologies import topology_m1h1c1 diff --git a/src/lib389/lib389/_mapped_object_lint.py b/src/lib389/lib389/_mapped_object_lint.py index b0eb2d002..49efdfdfb 100644 --- a/src/lib389/lib389/_mapped_object_lint.py +++ b/src/lib389/lib389/_mapped_object_lint.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# from abc import ABC, abstractmethod from functools import partial from inspect import signature diff --git a/src/lib389/lib389/cli_ctl/tls.py b/src/lib389/lib389/cli_ctl/tls.py index c5a896aac..e4b208e65 100644 --- a/src/lib389/lib389/cli_ctl/tls.py +++ b/src/lib389/lib389/cli_ctl/tls.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# import os from lib389.nss_ssl import NssSsl, CERT_NAME, CA_NAME from lib389.cli_base import _warn diff --git a/src/lib389/lib389/tests/aci_parse_test.py b/src/lib389/lib389/tests/aci_parse_test.py index 55a8ed6c3..f12a2724f 100644 --- a/src/lib389/lib389/tests/aci_parse_test.py +++ b/src/lib389/lib389/tests/aci_parse_test.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# from lib389._entry import EntryAci from lib389.utils import * import pytest diff --git a/src/lib389/lib389/tests/cli/conf_plugins/automember_test.py b/src/lib389/lib389/tests/cli/conf_plugins/automember_test.py index 249709cab..90b16ec3d 100644 --- a/src/lib389/lib389/tests/cli/conf_plugins/automember_test.py +++ b/src/lib389/lib389/tests/cli/conf_plugins/automember_test.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2017 alisha17 <[email protected]> +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# import pytest import ldap diff --git a/src/lib389/lib389/tests/cli/conf_pwpolicy_test.py b/src/lib389/lib389/tests/cli/conf_pwpolicy_test.py index f7285c7f0..3e7a3813b 100644 --- a/src/lib389/lib389/tests/cli/conf_pwpolicy_test.py +++ b/src/lib389/lib389/tests/cli/conf_pwpolicy_test.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# import io import sys import pytest diff --git a/src/lib389/lib389/tests/config.py b/src/lib389/lib389/tests/config.py index 4887b614f..dc98d4ce7 100644 --- a/src/lib389/lib389/tests/config.py +++ b/src/lib389/lib389/tests/config.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# import logging logging.basicConfig(level=logging.DEBUG) diff --git a/src/lib389/lib389/tests/idm/services_test.py b/src/lib389/lib389/tests/idm/services_test.py index f9bbe873f..193a1c7ef 100644 --- a/src/lib389/lib389/tests/idm/services_test.py +++ b/src/lib389/lib389/tests/idm/services_test.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# import os import sys import time diff --git a/src/lib389/lib389/tests/idm/user_and_group_test.py b/src/lib389/lib389/tests/idm/user_and_group_test.py index 09ff74e97..8adeacd54 100644 --- a/src/lib389/lib389/tests/idm/user_and_group_test.py +++ b/src/lib389/lib389/tests/idm/user_and_group_test.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# import os import sys import time diff --git a/src/lib389/lib389/tests/mapped_object_lint_test.py b/src/lib389/lib389/tests/mapped_object_lint_test.py index a4ca0ea3c..28b2ac887 100644 --- a/src/lib389/lib389/tests/mapped_object_lint_test.py +++ b/src/lib389/lib389/tests/mapped_object_lint_test.py @@ -1,3 +1,11 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# from typing import List import pytest diff --git a/src/lib389/setup.py b/src/lib389/setup.py index 9bc7e1b9d..aa05756e4 100644 --- a/src/lib389/setup.py +++ b/src/lib389/setup.py @@ -52,7 +52,7 @@ setup( long_description=long_description, url='http://www.port389.org/docs/389ds/FAQ/upstream-test-framework.html', - author='Red Hat Inc., and William Brown', + author='Red Hat, Inc., and William Brown', author_email='[email protected]', classifiers=[
0
b4940b547e06f545e19e14cba8c0fd8311f7f8ae
389ds/389-ds-base
Resolves: bug 230498 Bug Description: allow ds_newinst with ldapi and no serverport Reviewed by: nkinder, nhosoi (Thanks!) Fix Description: Two new fields have been added to the ds_newinst .inf files: ldapifilepath - the full path and file name of the server ldapi file start_server - if present and has a value of 0, this tells ds_newinst not to start the server - default is 1 The ds_newinst code has been changed to allow an empty or "0" value servport if an ldapifilepath is given (and ENABLE_LDAPI is defined). Either a valid server port or an ldapifilepath must be provided, or both. In addition, I changed ds_newinst.pl to accept a .inf file given on stdin. Platforms tested: RHEL4, FC6 Flag Day: no Doc impact: We will have to document ldapi support on the wiki.
commit b4940b547e06f545e19e14cba8c0fd8311f7f8ae Author: Rich Megginson <[email protected]> Date: Thu Mar 1 03:34:23 2007 +0000 Resolves: bug 230498 Bug Description: allow ds_newinst with ldapi and no serverport Reviewed by: nkinder, nhosoi (Thanks!) Fix Description: Two new fields have been added to the ds_newinst .inf files: ldapifilepath - the full path and file name of the server ldapi file start_server - if present and has a value of 0, this tells ds_newinst not to start the server - default is 1 The ds_newinst code has been changed to allow an empty or "0" value servport if an ldapifilepath is given (and ENABLE_LDAPI is defined). Either a valid server port or an ldapifilepath must be provided, or both. In addition, I changed ds_newinst.pl to accept a .inf file given on stdin. Platforms tested: RHEL4, FC6 Flag Day: no Doc impact: We will have to document ldapi support on the wiki. diff --git a/ldap/admin/src/create_instance.c b/ldap/admin/src/create_instance.c index d0c34bc07..c6ce6dcf9 100644 --- a/ldap/admin/src/create_instance.c +++ b/ldap/admin/src/create_instance.c @@ -253,7 +253,6 @@ void set_defaults(char *sroot, char *hn, server_config_s *conf) conf->servname = hn; conf->bindaddr = ""; - conf->servport = "80"; conf->cfg_sspt = NULL; conf->suitespot3x_uid = NULL; conf->cfg_sspt_uid = NULL; @@ -331,6 +330,7 @@ void set_defaults(char *sroot, char *hn, server_config_s *conf) /* ----------------- Sanity check a server configuration ------------------ */ char *create_instance_checkport(char *, char *); +char *create_instance_checkports(server_config_s *cf); char *create_instance_checkuser(char *); int create_instance_numbers(char *); int create_instance_exists(char *fn, int type); @@ -441,7 +441,7 @@ static char *sanity_check(server_config_s *cf, char *param_name) */ if (!needToStartServer(cf)) { - if( (t = create_instance_checkport(cf->bindaddr, cf->servport)) ) + if( (t = create_instance_checkports(cf))) { PL_strncpyz(param_name, "servport", BIG_LINE); return t; @@ -1418,6 +1418,20 @@ int tryuser(char *user) /* --------------------------- create_instance_check* ---------------------------- */ +char *create_instance_checkports(server_config_s *cf) +{ + /* allow port 0 if ldapifilepath is specified */ +#if defined(ENABLE_LDAPI) + if (!cf->ldapifilepath || strcmp(cf->servport, "0")) { +#endif + return create_instance_checkport(cf->bindaddr, cf->servport); +#if defined(ENABLE_LDAPI) + } +#endif + + return NULL; +} + char *create_instance_checkport(char *addr, char *sport) { @@ -2687,7 +2701,11 @@ char *ds_gen_confs(char *sroot, server_config_s *cf, char *cs_path) fprintf(f, "nsslapd-ssl-check-hostname: on\n"); fprintf(f, "nsslapd-port: %s\n", cf->servport); #if defined(ENABLE_LDAPI) - fprintf(f, "nsslapd-ldapifilepath: %s/%s-%s.socket\n", cf->run_dir, PRODUCT_NAME, cf->servid); + if (cf->ldapifilepath) { + fprintf(f, "nsslapd-ldapifilepath: %s\n", cf->ldapifilepath); + } else { + fprintf(f, "nsslapd-ldapifilepath: %s/%s-%s.socket\n", cf->run_dir, PRODUCT_NAME, cf->servid); + } fprintf(f, "nsslapd-ldapilisten: on\n"); #if defined(ENABLE_AUTOBIND) fprintf(f, "nsslapd-ldapiautobind: on\n"); @@ -4003,9 +4021,10 @@ static char *install_ds(char *sroot, server_config_s *cf, char *param_name) it or if we are configuring the server to serve as the repository for SuiteSpot (Mission Control) information Only attempt to start the server if the port is not in use + In order to start the server, there must either be an ldapifilepath + specified or a valid port. If the port is not "0" it must be valid. */ - if(needToStartServer(cf) && - !(t = create_instance_checkport(cf->bindaddr, cf->servport))) + if(needToStartServer(cf) && !(t = create_instance_checkports(cf))) { PR_snprintf(big_line, sizeof(big_line),"SERVER_NAMES=slapd-%s",cf->servid); putenv(big_line); @@ -4366,12 +4385,33 @@ int parse_form(server_config_s *cf) } cf->bindaddr = ds_a_get_cgi_var("bindaddr", NULL, NULL); - if (!(cf->servport = ds_a_get_cgi_var("servport", "Server Port", - "Please specify the TCP port number for this server."))) - { +#if defined(ENABLE_LDAPI) + temp = ds_a_get_cgi_var("ldapifilepath", NULL, NULL); + if (NULL != temp) { + cf->ldapifilepath = PL_strdup(temp); + } +#endif + + temp = ds_a_get_cgi_var("servport", NULL, NULL); + if (!temp +#if defined(ENABLE_LDAPI) + && !cf->ldapifilepath +#endif + ) { +#if defined(ENABLE_LDAPI) + ds_show_message("error: either servport or ldapifilepath must be specified."); +#else + ds_show_message("error: servport must be specified."); +#endif return 1; } + if (NULL != temp) { + cf->servport = PL_strdup(temp); + } else { + cf->servport = PL_strdup("0"); + } + cf->cfg_sspt = ds_a_get_cgi_var("cfg_sspt", NULL, NULL); cf->cfg_sspt_uid = ds_a_get_cgi_var("cfg_sspt_uid", NULL, NULL); if (cf->cfg_sspt_uid && *(cf->cfg_sspt_uid) && diff --git a/ldap/admin/src/create_instance.h b/ldap/admin/src/create_instance.h index 27f9bdaf7..d3babf2af 100644 --- a/ldap/admin/src/create_instance.h +++ b/ldap/admin/src/create_instance.h @@ -182,6 +182,9 @@ typedef struct { char *cert_dir; char *sasl_path; char *prefix; +#if defined(ENABLE_LDAPI) + char *ldapifilepath; +#endif } server_config_s; diff --git a/ldap/admin/src/ds_newinst.pl.in b/ldap/admin/src/ds_newinst.pl.in index 11124bd70..b037a50e8 100644 --- a/ldap/admin/src/ds_newinst.pl.in +++ b/ldap/admin/src/ds_newinst.pl.in @@ -44,7 +44,8 @@ use File::Basename; sub usage { my $msg = shift; print "Error: $msg\n"; - print "Usage: $0 filename.inf\n"; + print "Usage: $0 [-|filename.inf]\n"; + print "Use - to read from stdin\n"; exit 1 } @@ -136,7 +137,7 @@ sub addAndCheck { } my $filename = $ARGV[0]; -usage("$filename not found") if (! -f $filename); +usage("$filename not found") if ($filename ne "-" && ! -f $filename); my $curSection; # each key in the table is a section name @@ -145,8 +146,14 @@ my $curSection; # and the value is the config param value my %table = (); -open(IN, $filename); -while (<IN>) { +my $fh; +if ($filename eq "-") { + $fh = \*STDIN; +} else { + open(IN, $filename); + $fh = \*IN; +} +while (<$fh>) { # e.g. [General] if (/^\[(.*?)\]/) { $curSection = $1; @@ -158,7 +165,9 @@ while (<IN>) { $table{$curSection}->{$1} = $2; } } -close IN; +if ($filename ne "-") { + close IN; +} #printhash (\%table); @@ -171,12 +180,29 @@ my $package_name = "@package_name@"; addAndCheck(\%cgiargs, "sroot", \%table, "General", "ServerRoot"); addAndCheck(\%cgiargs, "servname", \%table, "General", "FullMachineName"); addAndCheck(\%cgiargs, "servuser", \%table, "General", "SuiteSpotUserID"); -addAndCheck(\%cgiargs, "servport", \%table, "slapd", "ServerPort"); addAndCheck(\%cgiargs, "rootdn", \%table, "slapd", "RootDN"); addAndCheck(\%cgiargs, "rootpw", \%table, "slapd", "RootDNPwd"); addAndCheck(\%cgiargs, "servid", \%table, "slapd", "ServerIdentifier"); addAndCheck(\%cgiargs, "suffix", \%table, "slapd", "Suffix"); +# either servport or ldapifilepath must be specified - the server must +# listen to something . . . +my $canlisten = 0; +if (defined($table{"slapd"}->{"ServerPort"}) && + $table{"slapd"}->{"ServerPort"} > 0) { + $canlisten = 1; + $cgiargs{"servport"} = $table{"slapd"}->{"ServerPort"}; +} else { + $cgiargs{"servport"} = "0"; # 0 means do not listen +} +if (defined($table{"slapd"}->{"ldapifilepath"})) { + $canlisten = 1; + $cgiargs{"ldapifilepath"} = $table{"slapd"}->{"ldapifilepath"}; +} +if (! $canlisten) { + usage("Either ServerPort or ldapifilepath must be specified in the slapd section of $filename"); +} + # the following items are optional $cgiargs{"lock_dir"} = $table{"slapd"}->{"lock_dir"}; @@ -253,7 +279,11 @@ $cgiargs{install_ldif_file} = $table{slapd}->{InstallLdifFile}; # if for some reason you do not want the server started after instance creation # the following line can be commented out - NOTE that if you are creating the # Configuration DS, it will be started anyway -$cgiargs{start_server} = 1; +if (defined($table{"slapd"}->{"start_server"})) { + $cgiargs{start_server} = $table{"slapd"}->{"start_server"}; +} else { # default is on + $cgiargs{start_server} = 1; +} my $sroot = $cgiargs{sroot};
0
99ce798de45f4e70085a447b59b4ea29c20de424
389ds/389-ds-base
Resolves: #216983 Summary: Make random password generation work with policies Changes: 1) Generate a password that meets the current password syntax rules. 2) Report errors when Min8Bit is set or MinCategories > 4
commit 99ce798de45f4e70085a447b59b4ea29c20de424 Author: Noriko Hosoi <[email protected]> Date: Tue Nov 28 18:02:07 2006 +0000 Resolves: #216983 Summary: Make random password generation work with policies Changes: 1) Generate a password that meets the current password syntax rules. 2) Report errors when Min8Bit is set or MinCategories > 4 diff --git a/ldap/servers/slapd/passwd_extop.c b/ldap/servers/slapd/passwd_extop.c index 33fffda8e..55d81c8ae 100644 --- a/ldap/servers/slapd/passwd_extop.c +++ b/ldap/servers/slapd/passwd_extop.c @@ -205,33 +205,223 @@ static int passwd_modify_userpassword(Slapi_Entry *targetEntry, const char *newP return ret; } -/* Generate a new random password */ -static int passwd_modify_generate_passwd(char **genpasswd) +/* Generate a new, basic random password */ +static int passwd_modify_generate_basic_passwd( int passlen, char **genpasswd ) { - unsigned char data[ LDAP_EXTOP_PASSMOD_RANDOM_BYTES ]; - char enc[ 1 + LDIF_BASE64_LEN( LDAP_EXTOP_PASSMOD_GEN_PASSWD_LEN ) ]; + unsigned char *data = NULL; + char *enc = NULL; + int datalen = LDAP_EXTOP_PASSMOD_RANDOM_BYTES; + int enclen = LDAP_EXTOP_PASSMOD_GEN_PASSWD_LEN + 1; - if (genpasswd == NULL) { + if ( genpasswd == NULL ) { return LDAP_OPERATIONS_ERROR; } + if ( passlen > 0 ) { + datalen = passlen * 3 / 4 + 1; + enclen = datalen * 4; /* allocate the large enough space */ + } + + data = (unsigned char *)slapi_ch_calloc( datalen, 1 ); + enc = (char *)slapi_ch_calloc( enclen, 1 ); + /* get random bytes from NSS */ - PK11_GenerateRandom( data, LDAP_EXTOP_PASSMOD_RANDOM_BYTES ); + PK11_GenerateRandom( data, datalen ); /* b64 encode the random bytes to get a password made up - * of printable characters. ldif_base64_encode() will + * of printable characters. ldif_base64_encode() will * zero-terminate the string */ - (void)ldif_base64_encode( data, enc, LDAP_EXTOP_PASSMOD_RANDOM_BYTES, -1 ); + (void)ldif_base64_encode( data, enc, passlen, -1 ); + + /* This will get freed by the caller */ + *genpasswd = slapi_ch_malloc( 1 + passlen ); + + /* trim the password to the proper length */ + PL_strncpyz( *genpasswd, enc, passlen + 1 ); + + slapi_ch_free( (void **)&data ); + slapi_ch_free_string( &enc ); + + return LDAP_SUCCESS; +} + +/* Generate a new, password-policy-based random password */ +static int passwd_modify_generate_policy_passwd( passwdPolicy *pwpolicy, + char **genpasswd, char **errMesg ) +{ + unsigned char *data = NULL; + int passlen = 0; + int tmplen = 0; + enum { + idx_minuppers = 0, + idx_minlowers, + idx_mindigits, + idx_minspecials, + idx_end + }; + int my_policy[idx_end]; + struct { + int chr_start; + int chr_range; + } chr_table[] = { /* NOTE: the above enum order */ + { 65, 26 }, /* [ A - Z ] */ + { 97, 26 }, /* [ a - z ] */ + { 48, 10 }, /* [ 0 - 9 ] */ + { 58, 7 } /* [ : - @ ] */ + }; +#define gen_policy_pw_getchar(n, idx) \ +( chr_table[(idx)].chr_start + (n) % chr_table[(idx)].chr_range ) + int i; + + if ( genpasswd == NULL ) { + return LDAP_OPERATIONS_ERROR; + } + + my_policy[idx_mindigits] = pwpolicy->pw_mindigits; + my_policy[idx_minuppers] = pwpolicy->pw_minuppers; + my_policy[idx_minlowers] = pwpolicy->pw_minlowers; + my_policy[idx_minspecials] = pwpolicy->pw_minspecials; + + /* if only minalphas is set, divide it into minuppers and minlowers. */ + if ( pwpolicy->pw_minalphas > 0 && + ( my_policy[idx_minuppers] == 0 && my_policy[idx_minlowers] == 0 )) { + unsigned int x = (unsigned int)time(NULL); + my_policy[idx_minuppers] = slapi_rand_r(&x) % pwpolicy->pw_minalphas; + my_policy[idx_minlowers] = pwpolicy->pw_minalphas - my_policy[idx_minuppers]; + } + + if ( pwpolicy->pw_mincategories ) { + int categories = 0; + for ( i = 0; i < idx_end; i++ ) { + if ( my_policy[i] > 0 ) { + categories++; + } + } + if ( pwpolicy->pw_mincategories > categories ) { + categories = pwpolicy->pw_mincategories; + for ( i = 0; i < idx_end; i++ ) { + if ( my_policy[i] == 0 ) { + /* force to add a policy to match the pw_mincategories */ + my_policy[i] = 1; + } + if ( --categories == 0 ) { + break; + } + } + if ( categories > 0 ) { + /* password generator does not support passwordMin8Bit */ + LDAPDebug( LDAP_DEBUG_ANY, + "Unable to generate a password that meets the current " + "password syntax rules. A minimum categories setting " + "of %d is not supported with random password generation.\n", + pwpolicy->pw_mincategories, 0, 0 ); + *errMesg = "Unable to generate new random password. Please contact the Administrator."; + return LDAP_CONSTRAINT_VIOLATION; + } + } + } + + /* get the password length */ + tmplen = 0; + for ( i = 0; i < idx_end; i++ ) { + tmplen += my_policy[i]; + } + passlen = tmplen; + if ( passlen < pwpolicy->pw_minlength ) { + passlen = pwpolicy->pw_minlength; + } + if ( passlen < LDAP_EXTOP_PASSMOD_GEN_PASSWD_LEN ) { + passlen = LDAP_EXTOP_PASSMOD_GEN_PASSWD_LEN; + } + + data = (unsigned char *)slapi_ch_calloc( passlen, 1 ); + + /* get random bytes from NSS */ + PK11_GenerateRandom( data, passlen ); + + /* if password length is longer the sum of my_policy's, + let them share the burden */ + if ( passlen > tmplen ) { + unsigned int x = (unsigned int)time(NULL); + int delta = passlen - tmplen; + for ( i = 0; i < delta; i++ ) { + my_policy[(x = slapi_rand_r(&x)) % idx_end]++; + } + } /* This will get freed by the caller */ - *genpasswd = slapi_ch_malloc( 1 + LDAP_EXTOP_PASSMOD_GEN_PASSWD_LEN); + *genpasswd = slapi_ch_malloc( 1 + passlen ); + + for ( i = 0; i < passlen; i++ ) { + int idx = data[i] % idx_end; + int isfirst = 1; + /* choose a category based on the random value */ + while ( my_policy[idx] <= 0 ) { + if ( ++idx == idx_end ) { + idx = 0; /* if no rule is found, default is uppercase */ + if ( !isfirst ) { + break; + } + isfirst = 0; + } + } + my_policy[idx]--; + (*genpasswd)[i] = gen_policy_pw_getchar(data[i], idx); + } + (*genpasswd)[passlen] = '\0'; - /* trim the password to the proper length. */ - PL_strncpyz( *genpasswd, enc, 1 + LDAP_EXTOP_PASSMOD_GEN_PASSWD_LEN ); + slapi_ch_free( (void **)&data ); return LDAP_SUCCESS; } +/* Generate a new random password */ +static int passwd_modify_generate_passwd( passwdPolicy *pwpolicy, + char **genpasswd, char **errMesg ) +{ + int minalphalen = 0; + int passlen = LDAP_EXTOP_PASSMOD_GEN_PASSWD_LEN; + int rval = LDAP_SUCCESS; + + if ( genpasswd == NULL ) { + return LDAP_OPERATIONS_ERROR; + } + if ( pwpolicy->pw_min8bit > 0 ) { + LDAPDebug( LDAP_DEBUG_ANY, "Unable to generate a password that meets " + "the current password syntax rules. 8-bit syntax " + "restrictions are not supported with random password " + "generation.\n", 0, 0, 0 ); + *errMesg = "Unable to generate new random password. Please contact the Administrator."; + return LDAP_CONSTRAINT_VIOLATION; + } + + if ( pwpolicy->pw_minalphas || pwpolicy->pw_minuppers || + pwpolicy->pw_minlowers || pwpolicy->pw_mindigits || + pwpolicy->pw_minspecials || pwpolicy->pw_maxrepeats || + pwpolicy->pw_mincategories > 2 ) { + rval = passwd_modify_generate_policy_passwd( pwpolicy, genpasswd, + errMesg ); + } else { + /* find out the minimum length to fulfill the passwd policy + requirements */ + minalphalen = pwpolicy->pw_minuppers + pwpolicy->pw_minlowers; + if ( minalphalen < pwpolicy->pw_minalphas ) { + minalphalen = pwpolicy->pw_minalphas; + } + passlen = minalphalen + pwpolicy->pw_mindigits + + pwpolicy->pw_minspecials + pwpolicy->pw_min8bit; + if ( passlen < pwpolicy->pw_minlength ) { + passlen = pwpolicy->pw_minlength; + } + if ( passlen < LDAP_EXTOP_PASSMOD_GEN_PASSWD_LEN ) { + passlen = LDAP_EXTOP_PASSMOD_GEN_PASSWD_LEN; + } + rval = passwd_modify_generate_basic_passwd( passlen, genpasswd ); + } + + return rval; +} + /* Password Modify Extended operation plugin function */ int @@ -415,12 +605,22 @@ parse_req_done: * a random one and return it to the user in a response. */ if (newPasswd == NULL || *newPasswd == '\0') { + passwdPolicy *pwpolicy; + int rval; /* Do a free of newPasswd here to be safe, otherwise we may leak 1 byte */ slapi_ch_free_string( &newPasswd ); + + pwpolicy = new_passwdPolicy( pb, dn ); + /* Generate a new password */ - if (passwd_modify_generate_passwd( &newPasswd ) != LDAP_SUCCESS) { - errMesg = "Error generating new password.\n"; + rval = passwd_modify_generate_passwd( pwpolicy, &newPasswd, &errMesg ); + + delete_passwdPolicy(&pwpolicy); + + if (rval != LDAP_SUCCESS) { + if (!errMesg) + errMesg = "Error generating new password.\n"; rc = LDAP_OPERATIONS_ERROR; goto free_and_return; }
0
f4f255d2d355ba3252d20d4c09a5f89f02556021
389ds/389-ds-base
Bump version to 389-ds-base-2.1.0
commit f4f255d2d355ba3252d20d4c09a5f89f02556021 Author: Mark Reynolds <[email protected]> Date: Mon Aug 9 17:51:33 2021 -0400 Bump version to 389-ds-base-2.1.0 diff --git a/VERSION.sh b/VERSION.sh index 89a967f21..017d4818d 100644 --- a/VERSION.sh +++ b/VERSION.sh @@ -9,8 +9,8 @@ vendor="389 Project" # PACKAGE_VERSION is constructed from these VERSION_MAJOR=2 -VERSION_MINOR=0 -VERSION_MAINT=7 +VERSION_MINOR=1 +VERSION_MAINT=0 # NOTE: VERSION_PREREL is automatically set for builds made out of a git tree VERSION_PREREL= VERSION_DATE=$(date -u +%Y%m%d)
0
8eaa3c605b74e78fc45172f0e0544e65017090c4
389ds/389-ds-base
console .2 is still compatible with 389 .3 for now
commit 8eaa3c605b74e78fc45172f0e0544e65017090c4 Author: Rich Megginson <[email protected]> Date: Mon May 21 09:54:42 2012 -0600 console .2 is still compatible with 389 .3 for now diff --git a/VERSION.sh b/VERSION.sh index 5e93c7adb..e7c8e85b5 100644 --- a/VERSION.sh +++ b/VERSION.sh @@ -50,4 +50,5 @@ PACKAGE_BUGREPORT="${PACKAGE_BUGREPORT}enter_bug.cgi?product=$brand" PACKAGE_STRING="$PACKAGE_TARNAME $PACKAGE_VERSION" # the version of the ds console package that this directory server # is compatible with -CONSOLE_VERSION=$VERSION_MAJOR.$VERSION_MINOR.6 +# console .2 is still compatible with 389 .3 for now +CONSOLE_VERSION=$VERSION_MAJOR.2.6
0
d522c921115d38c6f7e459812601f80b1dc88cb5
389ds/389-ds-base
Issue 3069 - Support ECDSA private keys for TLS (#5365)
commit d522c921115d38c6f7e459812601f80b1dc88cb5 Author: progier389 <[email protected]> Date: Wed Jul 13 17:02:02 2022 +0200 Issue 3069 - Support ECDSA private keys for TLS (#5365) diff --git a/dirsrvtests/tests/suites/tls/ecdsa_test.py b/dirsrvtests/tests/suites/tls/ecdsa_test.py new file mode 100644 index 000000000..7b7b3a7b1 --- /dev/null +++ b/dirsrvtests/tests/suites/tls/ecdsa_test.py @@ -0,0 +1,206 @@ +import logging +import pytest +import os +import subprocess +from lib389.utils import ds_is_older +from lib389._constants import DN_DM, PW_DM +from lib389.topologies import topology_st as topo +from tempfile import TemporaryDirectory + +pytestmark = pytest.mark.tier1 + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + +script_content=""" +#!/bin/bash +set -e # Exit if a command fails +set -x # Log the commands + +cd {dir} +inst={instname} +url={url} +rootdn="{rootdn}" +rootpw="{rootpw}" + +################################ +###### GENERATE CA CERT ######## +################################ + +echo " +[ req ] +distinguished_name = req_distinguished_name +policy = policy_match +x509_extensions = v3_ca + +# For the CA policy +[ policy_match ] +countryName = optional +stateOrProvinceName = optional +organizationName = optional +organizationalUnitName = optional +commonName = supplied +emailAddress = optional + +[ req_distinguished_name ] +countryName = Country Name (2 letter code) +countryName_default = FR +countryName_min = 2 +countryName_max = 2 + +stateOrProvinceName = State or Province Name (full name) +stateOrProvinceName_default = test + +localityName = Locality Name (eg, city) + +0.organizationName = Organization Name (eg, company) +0.organizationName_default = test-ECDSA-CA + +organizationalUnitName = Organizational Unit Name (eg, section) +#organizationalUnitName_default = + +commonName = Common Name (e.g. server FQDN or YOUR name) +commonName_max = 64 + + +[ v3_ca ] +subjectKeyIdentifier = hash +authorityKeyIdentifier = keyid:always,issuer +basicConstraints = critical,CA:true +#nsComment = "OpenSSL Generated Certificate" +keyUsage=critical, keyCertSign +" >ca.conf + + +openssl ecparam -genkey -name prime256v1 -out ca.key +openssl req -x509 -new -sha256 -key ca.key -nodes -days 3650 -config ca.conf -subj "/CN=`hostname`/O=test-ECDSA-CA/C=FR" -out ca.pem -keyout ca.key +openssl x509 -outform der -in ca.pem -out ca.crt + +openssl x509 -text -in ca.pem + +#################################### +###### GENERATE SERVER CERT ######## +#################################### + +echo " +[ req ] +distinguished_name = req_distinguished_name +policy = policy_match +x509_extensions = v3_cert + +# For the cert policy +[ policy_match ] +countryName = optional +stateOrProvinceName = optional +organizationName = optional +organizationalUnitName = optional +commonName = supplied +emailAddress = optional + +[ req_distinguished_name ] +countryName = Country Name (2 letter code) +countryName_default = FR +countryName_min = 2 +countryName_max = 2 + +stateOrProvinceName = State or Province Name (full name) + +localityName = Locality Name (eg, city) + +0.organizationName = Organization Name (eg, company) +0.organizationName_default = test-ECDSA + +organizationalUnitName = Organizational Unit Name (eg, section) +#organizationalUnitName_default = + +commonName = Common Name (e.g. server FQDN or YOUR name) +commonName_max = 64 + + +[ v3_cert ] +basicConstraints = critical,CA:false +subjectAltName=DNS:`hostname` +keyUsage=digitalSignature, nonRepudiation, keyEncipherment, dataEncipherment +#nsComment = "OpenSSL Generated Certificate" +extendedKeyUsage=clientAuth, serverAuth +nsCertType=client, server +" >cert.conf + +openssl ecparam -genkey -name prime256v1 -out cert.key +openssl req -new -sha256 -key cert.key -nodes -config cert.conf -subj "/CN=`hostname`/O=test-ECDSA/C=FR" -out cert.csr +openssl x509 -req -sha256 -days 3650 -extensions v3_cert -extfile cert.conf -in cert.csr -CA ca.pem -CAkey ca.key -CAcreateserial -out cert.pem +openssl pkcs12 -export -inkey cert.key -in cert.pem -name ecdsacert -out cert.p12 -passout pass:secret12 + +openssl x509 -text -in cert.pem + + +############################# +###### INSTALL CERTS ######## +############################# + +certdbdir=$PREFIX/etc/dirsrv/slapd-$inst +rm -f $certdbdir/cert9.db $certdbdir/key4.db +certutil -N -d $certdbdir -f $certdbdir/pwdfile.txt + +certutil -A -n Self-Signed-CA -t CT,, -f $certdbdir/pwdfile.txt -d $certdbdir -a -i ca.pem + +dsctl $inst tls import-server-key-cert cert.pem cert.key + +dsctl $inst restart + + +######################### +###### TEST CERT ######## +######################### +LDAPTLS_CACERT=$PWD/ca.pem ldapsearch -x -H $url -D "$rootdn" -w "$rootpw" -b "" -s base +""" + + +def test_ecdsa(topo): + """Specify a test case purpose or name here + + :id: 7902f37c-01d3-11ed-b65c-482ae39447e5 + :setup: Standalone Instance + :steps: + 1. Generate the test script + 2. Run the test script + 3. Check that ldapsearch returned the namingcontext + :expectedresults: + 1. No error + 2. No error and exit code should be 0 + 3. namingcontext should be in the script output + """ + + inst=topo.standalone + inst.enable_tls() + with TemporaryDirectory() as dir: + scriptname = f"{dir}/doit" + scriptname = "/tmp/doit" + d = { + 'dir': dir, + 'instname': inst.serverid, + 'url': f"ldaps://localhost:{inst.sslport}", + 'rootdn': DN_DM, + 'rootpw': PW_DM, + } + with open(scriptname, 'w') as f: + f.write(script_content.format(**d)) + res = subprocess.run(('/bin/bash', scriptname), stdout=subprocess.PIPE, stderr=subprocess.STDOUT, encoding='utf-8') + assert res + log.info(res.stdout) + res.check_returncode() + # If ldapsearch is successful then defaultnamingcontext should be in res.stdout + assert "defaultnamingcontext" in res.stdout + + + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main(["-s", CURRENT_FILE]) diff --git a/ldap/servers/slapd/ssl.c b/ldap/servers/slapd/ssl.c index e6e6c1e73..ef9e97fb9 100644 --- a/ldap/servers/slapd/ssl.c +++ b/ldap/servers/slapd/ssl.c @@ -2688,7 +2688,7 @@ bail: * @param subject subject out */ static SECStatus -extractRSAKeysAndSubject( +extractKeysAndSubject( const char *nickname, PK11SlotInfo *slot, secuPWData *pwdata, @@ -2698,9 +2698,10 @@ extractRSAKeysAndSubject( { PRErrorCode rv = SECFailure; CERTCertificate *cert = PK11_FindCertFromNickname((char *)nickname, NULL); + int keytype = -1; if (!cert) { rv = PR_GetError(); - slapi_log_err(SLAPI_LOG_ERR, "extractRSAKeysAndSubject", + slapi_log_err(SLAPI_LOG_ERR, "extractKeysAndSubject", "Failed extract cert with %s, (%d-%s, %d).\n", nickname, rv, slapd_pr_strerror(rv), PR_GetOSError()); goto bail; @@ -2709,7 +2710,7 @@ extractRSAKeysAndSubject( *pubkey = CERT_ExtractPublicKey(cert); if (!*pubkey) { rv = PR_GetError(); - slapi_log_err(SLAPI_LOG_ERR, "extractRSAKeysAndSubject", + slapi_log_err(SLAPI_LOG_ERR, "extractKeysAndSubject", "Could not get public key from cert for %s, (%d-%s, %d)\n", nickname, rv, slapd_pr_strerror(rv), PR_GetOSError()); goto bail; @@ -2718,24 +2719,30 @@ extractRSAKeysAndSubject( *privkey = PK11_FindKeyByDERCert(slot, cert, pwdata); if (!*privkey) { rv = PR_GetError(); - slapi_log_err(SLAPI_LOG_ERR, "extractRSAKeysAndSubject", + slapi_log_err(SLAPI_LOG_ERR, "extractKeysAndSubject", "Unable to find the key with PK11_FindKeyByDERCert for %s, (%d-%s, %d)\n", nickname, rv, slapd_pr_strerror(rv), PR_GetOSError()); *privkey = PK11_FindKeyByAnyCert(cert, &pwdata); if (!*privkey) { rv = PR_GetError(); - slapi_log_err(SLAPI_LOG_ERR, "extractRSAKeysAndSubject", + slapi_log_err(SLAPI_LOG_ERR, "extractKeysAndSubject", "Unable to find the key with PK11_FindKeyByAnyCert for %s, (%d-%s, %d)\n", nickname, rv, slapd_pr_strerror(rv), PR_GetOSError()); goto bail; } } - PR_ASSERT(((*privkey)->keyType) == rsaKey); + keytype = (*privkey)->keyType; + if (keytype != rsaKey && keytype != ecKey) { + slapi_log_err(SLAPI_LOG_ERR, "extractKeysAndSubject", + "Unexpected key algorythm in certificate: %s. Only rsa and ec keys are supported.\n", nickname); + goto bail; + } + *subject = CERT_AsciiToName(cert->subjectName); if (!*subject) { - slapi_log_err(SLAPI_LOG_ERR, "extractRSAKeysAndSubject", + slapi_log_err(SLAPI_LOG_ERR, "extractKeysAndSubject", "Improperly formatted name: \"%s\"\n", cert->subjectName); goto bail; @@ -2921,7 +2928,7 @@ slapd_extract_key(Slapi_Entry *entry, char *token __attribute__((unused)), PK11S keyfile, PR_GetError(), PR_GetOSError()); goto bail; } - rv = extractRSAKeysAndSubject(personality, slot, &pwdata, &privkey, &pubkey, &subject); + rv = extractKeysAndSubject(personality, slot, &pwdata, &privkey, &pubkey, &subject); if (rv != SECSuccess) { #if defined(ENCRYPTEDKEY) slapi_log_err(SLAPI_LOG_ERR, "slapd_extract_key",
0
a6312f47afa98b5bf87a5442966251e036cac3b0
389ds/389-ds-base
ignore patch files
commit a6312f47afa98b5bf87a5442966251e036cac3b0 Author: Rich Megginson <[email protected]> Date: Thu Nov 21 09:34:16 2013 -0700 ignore patch files diff --git a/src/lib389/.gitignore b/src/lib389/.gitignore index b6b14e2ab..1aa28328d 100644 --- a/src/lib389/.gitignore +++ b/src/lib389/.gitignore @@ -3,4 +3,4 @@ logs/ *.pyc - +*.patch
0
f14fb8b6f71d2a87b650064f15099a2cc2dfd38a
389ds/389-ds-base
Ticket #48133 - Non tombstone entry which dn starting with "nsuniqueid=...," cannot be deleted Bug Description: Trying to delete an entry which DN starts with "nsuniqueid=...," but no objectclass=nsTombstone fails with "Turning a tombstone into a tombstone!", which is indeed not. Fix Description: This patch checks the entry and if it does not have "objectclass=nsTombstone", the entry is not treated as a tombstone. Also, if the DN already has the entry's nsuniqueid at the beginning, it does not get appended to avoid the duplicate. Note: Adding an entry which DN starts with "nsuniqueid" and no nsTombstone objectclass fails since such an entry is rejected in check_rdn_for_created_attrs called from do_add. https://fedorahosted.org/389/ticket/48133 Reviewed by [email protected] (Thank you, Rich!!)
commit f14fb8b6f71d2a87b650064f15099a2cc2dfd38a Author: Noriko Hosoi <[email protected]> Date: Fri Mar 13 16:31:43 2015 -0700 Ticket #48133 - Non tombstone entry which dn starting with "nsuniqueid=...," cannot be deleted Bug Description: Trying to delete an entry which DN starts with "nsuniqueid=...," but no objectclass=nsTombstone fails with "Turning a tombstone into a tombstone!", which is indeed not. Fix Description: This patch checks the entry and if it does not have "objectclass=nsTombstone", the entry is not treated as a tombstone. Also, if the DN already has the entry's nsuniqueid at the beginning, it does not get appended to avoid the duplicate. Note: Adding an entry which DN starts with "nsuniqueid" and no nsTombstone objectclass fails since such an entry is rejected in check_rdn_for_created_attrs called from do_add. https://fedorahosted.org/389/ticket/48133 Reviewed by [email protected] (Thank you, Rich!!) diff --git a/ldap/servers/slapd/back-ldbm/ldbm_delete.c b/ldap/servers/slapd/back-ldbm/ldbm_delete.c index 498c3c1b2..619ff4a1e 100644 --- a/ldap/servers/slapd/back-ldbm/ldbm_delete.c +++ b/ldap/servers/slapd/back-ldbm/ldbm_delete.c @@ -594,7 +594,8 @@ ldbm_back_delete( Slapi_PBlock *pb ) char *tombstone_dn; Slapi_Value *tomb_value; - if (slapi_is_special_rdn(edn, RDN_IS_TOMBSTONE)) { + if (slapi_entry_attr_hasvalue(e->ep_entry, SLAPI_ATTR_OBJECTCLASS, SLAPI_ATTR_VALUE_TOMBSTONE) && + slapi_is_special_rdn(edn, RDN_IS_TOMBSTONE)) { slapi_log_error(SLAPI_LOG_FATAL, "ldbm_back_delete", "conn=%lu op=%d Turning a tombstone into a tombstone! \"%s\"; e: 0x%p, cache_state: 0x%x, refcnt: %d\n", conn_id, op_id, edn, e, e->ep_state, e->ep_refcnt); @@ -602,7 +603,21 @@ ldbm_back_delete( Slapi_PBlock *pb ) retval = -1; goto error_return; } - tombstone_dn = compute_entry_tombstone_dn(edn, childuniqueid); + if (!childuniqueid) { + slapi_log_error(SLAPI_LOG_FATAL, "ldbm_back_delete", + "conn=%lu op=%d No nsUniqueId in the entry \"%s\"; e: 0x%p, cache_state: 0x%x, refcnt: %d\n", + conn_id, op_id, edn, e, e->ep_state, e->ep_refcnt); + ldap_result_code= LDAP_OPERATIONS_ERROR; + retval = -1; + goto error_return; + } + if ((0 == PL_strncmp(edn + sizeof(SLAPI_ATTR_UNIQUEID), childuniqueid, strlen(childuniqueid))) && + (*(edn + SLAPI_ATTR_UNIQUEID_LENGTH + slapi_uniqueIDSize() + 1/*=*/) == ',')) { + /* The DN already starts with "nsuniqueid=...," */ + tombstone_dn = slapi_ch_strdup(edn); + } else { + tombstone_dn = compute_entry_tombstone_dn(edn, childuniqueid); + } slapi_sdn_set_ndn_byval(&nscpEntrySDN, slapi_sdn_get_ndn(slapi_entry_get_sdn(e->ep_entry)));
0
085c6d494f90231f2e572a668ab601c321bffb01
389ds/389-ds-base
Ticket 47592 - automember plugin task memory leaks The search pblock was not destroyed at the end of the export and rebuild tasks. https://fedorahosted.org/389/ticket/47592 Reviewed by: richm(Thanks!)
commit 085c6d494f90231f2e572a668ab601c321bffb01 Author: Mark Reynolds <[email protected]> Date: Wed Nov 27 10:58:43 2013 -0500 Ticket 47592 - automember plugin task memory leaks The search pblock was not destroyed at the end of the export and rebuild tasks. https://fedorahosted.org/389/ticket/47592 Reviewed by: richm(Thanks!) diff --git a/ldap/servers/plugins/automember/automember.c b/ldap/servers/plugins/automember/automember.c index e662e5811..e58eb1e05 100644 --- a/ldap/servers/plugins/automember/automember.c +++ b/ldap/servers/plugins/automember/automember.c @@ -2223,7 +2223,6 @@ void automember_rebuild_task_thread(void *arg){ } } automember_config_unlock(); - slapi_free_search_results_internal(search_pb); out: if (plugin_is_betxn && fixup_pb) { @@ -2234,6 +2233,8 @@ out: } slapi_pblock_destroy(fixup_pb); } + slapi_free_search_results_internal(search_pb); + slapi_pblock_destroy(search_pb); if(result){ /* error */ @@ -2433,9 +2434,11 @@ void automember_export_task_thread(void *arg){ } } automember_config_unlock(); - slapi_free_search_results_internal(search_pb); out: + slapi_free_search_results_internal(search_pb); + slapi_pblock_destroy(search_pb); + if(ldif_fd){ PR_Close(ldif_fd); }
0
c493fb4f7526760aee6aa17edd8dbec9738c6749
389ds/389-ds-base
fix a couple of minor coverity issues 12432 Copy into fixed size buffer In msgIdAdd(): A source buffer of statically unknown size is copied into a fixed size destination buffer 12428 Copy into fixed size buffer In doDeleteEntry(): A source buffer of statically unknown size is copied into a fixed size destination buffer Reviewed by: mreynold (Thanks!)
commit c493fb4f7526760aee6aa17edd8dbec9738c6749 Author: Rich Megginson <[email protected]> Date: Wed Feb 1 11:55:24 2012 -0700 fix a couple of minor coverity issues 12432 Copy into fixed size buffer In msgIdAdd(): A source buffer of statically unknown size is copied into a fixed size destination buffer 12428 Copy into fixed size buffer In doDeleteEntry(): A source buffer of statically unknown size is copied into a fixed size destination buffer Reviewed by: mreynold (Thanks!) diff --git a/ldap/servers/slapd/tools/ldclt/ldapfct.c b/ldap/servers/slapd/tools/ldclt/ldapfct.c index 76fc9c2ad..6e6676445 100644 --- a/ldap/servers/slapd/tools/ldclt/ldapfct.c +++ b/ldap/servers/slapd/tools/ldclt/ldapfct.c @@ -3668,9 +3668,8 @@ doDeleteEntry ( */ if (buildRandomRdnOrFilter (tttctx) < 0) return (-1); - strcpy (delDn, tttctx->bufFilter); - strcat (delDn, ","); - strcat (delDn, tttctx->bufBaseDN); + snprintf (delDn, sizeof(delDn), "%s,%s", tttctx->bufFilter, tttctx->bufBaseDN); + delDn[sizeof(delDn)-1] = '\0'; ret = ldap_delete_ext (tttctx->ldapCtx, delDn, NULL, NULL, &msgid); if (ret < 0) diff --git a/ldap/servers/slapd/tools/ldclt/threadMain.c b/ldap/servers/slapd/tools/ldclt/threadMain.c index 1d2ed59f5..a335b19b2 100644 --- a/ldap/servers/slapd/tools/ldclt/threadMain.c +++ b/ldap/servers/slapd/tools/ldclt/threadMain.c @@ -626,7 +626,8 @@ msgIdAdd ( */ tttctx->lastMsgId->next = NULL; tttctx->lastMsgId->msgid = msgid; - strcpy (tttctx->lastMsgId->str, str); + strncpy (tttctx->lastMsgId->str, str, sizeof(tttctx->lastMsgId->str)); + tttctx->lastMsgId->str[sizeof(tttctx->lastMsgId->str)-1] = '\0'; strncpy (tttctx->lastMsgId->dn, dn, sizeof(tttctx->lastMsgId->dn)); tttctx->lastMsgId->dn[sizeof(tttctx->lastMsgId->dn)-1] = '\0'; tttctx->lastMsgId->attribs = attribs;
0
b1f434e3f5b8d699909bc1e54465718641a296d3
389ds/389-ds-base
Ticket 49002 - Remove memset on allocation Bug Description: Memset is slow, and has cause us some issues. c99 supports allocing 0 structs with {0}, and we can also use calloc when needed. Calloc is signifigantly faster that malloc + memset. Fix Description: Remove memset where possible. We can't remove it universally due to some struct reuse, and some libraries needing it, but this reduction cleans the code greatly, and should give us a perf improvement. https://fedorahosted.org/389/ticket/49002 Author: wibrown Review by: mreynolds (Thanks!)
commit b1f434e3f5b8d699909bc1e54465718641a296d3 Author: William Brown <[email protected]> Date: Mon Nov 28 11:41:54 2016 +1000 Ticket 49002 - Remove memset on allocation Bug Description: Memset is slow, and has cause us some issues. c99 supports allocing 0 structs with {0}, and we can also use calloc when needed. Calloc is signifigantly faster that malloc + memset. Fix Description: Remove memset where possible. We can't remove it universally due to some struct reuse, and some libraries needing it, but this reduction cleans the code greatly, and should give us a perf improvement. https://fedorahosted.org/389/ticket/49002 Author: wibrown Review by: mreynolds (Thanks!) diff --git a/ldap/servers/plugins/acl/aclparse.c b/ldap/servers/plugins/acl/aclparse.c index a1dae1900..d4cecfa75 100644 --- a/ldap/servers/plugins/acl/aclparse.c +++ b/ldap/servers/plugins/acl/aclparse.c @@ -1562,9 +1562,7 @@ __aclp__init_targetattr (aci_t *aci, char *attr_val, char **errbuf) * * The attribute goes in the attrTarget list. */ - attr = (Targetattr *) slapi_ch_malloc (sizeof (Targetattr)); - memset (attr, 0, sizeof(Targetattr)); - + attr = (Targetattr *) slapi_ch_calloc (1, sizeof (Targetattr)); /* strip double quotes */ lenstr = strlen(str); if (*str == '"' && *(str + lenstr - 1) == '"') { @@ -2150,8 +2148,7 @@ static int process_filter_list( Targetattrfilter ***input_attrFilterArray, * */ - attrfilter = (Targetattrfilter *) slapi_ch_malloc (sizeof (Targetattrfilter)); - memset (attrfilter, 0, sizeof(Targetattrfilter)); + attrfilter = (Targetattrfilter *) slapi_ch_calloc (1, sizeof (Targetattrfilter)); if (strstr( str,":") != NULL) { if ( __acl_init_targetattrfilter( attrfilter, str ) != 0 ) { diff --git a/ldap/servers/plugins/memberof/memberof.c b/ldap/servers/plugins/memberof/memberof.c index 9af9082fe..3b3855995 100644 --- a/ldap/servers/plugins/memberof/memberof.c +++ b/ldap/servers/plugins/memberof/memberof.c @@ -2267,12 +2267,10 @@ int memberof_test_membership_callback(Slapi_Entry *e, void *callback_data) candidate_array = (Slapi_Value**) - slapi_ch_malloc(sizeof(Slapi_Value*)*total); - memset(candidate_array, 0, sizeof(Slapi_Value*)*total); + slapi_ch_calloc(1, sizeof(Slapi_Value*)*total); member_array = (Slapi_Value**) - slapi_ch_malloc(sizeof(Slapi_Value*)*total); - memset(member_array, 0, sizeof(Slapi_Value*)*total); + slapi_ch_calloc(1, sizeof(Slapi_Value*)*total); hint = slapi_attr_first_value(attr, &val); diff --git a/ldap/servers/plugins/referint/referint.c b/ldap/servers/plugins/referint/referint.c index e6af414e4..9feb91b75 100644 --- a/ldap/servers/plugins/referint/referint.c +++ b/ldap/servers/plugins/referint/referint.c @@ -1500,7 +1500,8 @@ referint_thread_func(void *arg) int my_fgetc(PRFileDesc *stream) { - static char buf[READ_BUFSIZE] = "\0"; + /* This is equivalent to memset of 0, but statically defined. */ + static char buf[READ_BUFSIZE] = {0}; static int position = READ_BUFSIZE; int retval; int err; @@ -1508,7 +1509,6 @@ int my_fgetc(PRFileDesc *stream) /* check if we need to load the buffer */ if( READ_BUFSIZE == position ) { - memset(buf, '\0', READ_BUFSIZE); if( ( err = PR_Read(stream, buf, READ_BUFSIZE) ) >= 0) { /* it read some data */; diff --git a/ldap/servers/plugins/retrocl/retrocl_cn.c b/ldap/servers/plugins/retrocl/retrocl_cn.c index ec81bc532..98c14b303 100644 --- a/ldap/servers/plugins/retrocl/retrocl_cn.c +++ b/ldap/servers/plugins/retrocl/retrocl_cn.c @@ -171,7 +171,7 @@ int retrocl_get_changenumbers(void) */ time_t retrocl_getchangetime( int type, int *err ) { - cnumRet cr; + cnumRet cr = {0}; time_t ret; if ( type != SLAPI_SEQ_FIRST && type != SLAPI_SEQ_LAST ) { @@ -180,7 +180,6 @@ time_t retrocl_getchangetime( int type, int *err ) } return NO_TIME; } - memset( &cr, '\0', sizeof( cnumRet )); slapi_seq_callback( RETROCL_CHANGELOG_DN, type, (char *)attr_changenumber, /* cast away const */ NULL, diff --git a/ldap/servers/plugins/rever/pbe.c b/ldap/servers/plugins/rever/pbe.c index 45b1f97f9..16982c423 100644 --- a/ldap/servers/plugins/rever/pbe.c +++ b/ldap/servers/plugins/rever/pbe.c @@ -184,12 +184,13 @@ genKey(struct pk11ContextStore **out, char *path, int mech, PRArenaPool *arena, SECItem *pwitem = NULL; SECItem *result = NULL; SECItem *salt = NULL; - SECItem der_algid; + SECItem der_algid = {0}; SECAlgorithmID *algid = NULL; SECOidTag algoid; CK_MECHANISM pbeMech; CK_MECHANISM cryptoMech; - SECAlgorithmID my_algid; + /* Have to use long form init due to internal structs */ + SECAlgorithmID my_algid = {{0}, {0}}; char *configdir = NULL; char *der_ascii = NULL; char *iv = NULL; @@ -262,7 +263,6 @@ genKey(struct pk11ContextStore **out, char *path, int mech, PRArenaPool *arena, strcpy((char*)salt->data, iv); salt->len = strlen(iv) + 1; - PORT_Memset(&der_algid, 0, sizeof(der_algid)); if(!alg){ /* * This is DES, or we are encoding AES - the process is the same. @@ -285,7 +285,6 @@ genKey(struct pk11ContextStore **out, char *path, int mech, PRArenaPool *arena, /* * We are decoding AES - use the supplied algid */ - PORT_Memset(&my_algid, 0, sizeof(my_algid)); /* Decode the base64 der encoding */ der_ascii = PL_Base64Decode(alg, strlen(alg), NULL); diff --git a/ldap/servers/plugins/rootdn_access/rootdn_access.c b/ldap/servers/plugins/rootdn_access/rootdn_access.c index e5ebb13f1..3b6590056 100644 --- a/ldap/servers/plugins/rootdn_access/rootdn_access.c +++ b/ldap/servers/plugins/rootdn_access/rootdn_access.c @@ -483,11 +483,10 @@ rootdn_check_access(Slapi_PBlock *pb){ */ if(daysAllowed){ char *timestr; - char day[4]; + char day[4] = {0}; char *today = day; timestr = asctime(timeinfo); // DDD MMM dd hh:mm:ss YYYY - memset(day, 0 ,sizeof(day)); memmove(day, timestr, 3); // we only want the day today = strToLower(today); daysAllowed = strToLower(daysAllowed); @@ -600,8 +599,7 @@ rootdn_check_access(Slapi_PBlock *pb){ * Check if we are IPv4, so we can grab the correct IP addr for "ip_str" */ if ( PR_IsNetAddrType( client_addr, PR_IpAddrV4Mapped ) ) { - PRNetAddr v4addr; - memset( &v4addr, 0, sizeof( v4addr ) ); + PRNetAddr v4addr = {{0}}; v4addr.inet.family = PR_AF_INET; v4addr.inet.ip = client_addr->ipv6.ip.pr_s6_addr32[3]; if( PR_NetAddrToString( &v4addr, ip_str, sizeof( ip_str )) != PR_SUCCESS){ diff --git a/ldap/servers/slapd/add.c b/ldap/servers/slapd/add.c index 240fa4102..8e671bbba 100644 --- a/ldap/servers/slapd/add.c +++ b/ldap/servers/slapd/add.c @@ -268,7 +268,6 @@ done: if(result_pb==NULL) { result_pb = slapi_pblock_new(); - pblock_init(result_pb); slapi_pblock_set(result_pb, SLAPI_PLUGIN_INTOP_RESULT, &opresult); } diff --git a/ldap/servers/slapd/attrsyntax.c b/ldap/servers/slapd/attrsyntax.c index 956c7b83d..9b5be46e4 100644 --- a/ldap/servers/slapd/attrsyntax.c +++ b/ldap/servers/slapd/attrsyntax.c @@ -1010,11 +1010,10 @@ attr_syntax_create( ) { char *s; - struct asyntaxinfo a; + struct asyntaxinfo a = {0}; int rc = LDAP_SUCCESS; /* XXXmcs: had to cast away const in many places below */ - memset(&a, 0, sizeof(a)); *asip = NULL; a.asi_name = slapi_ch_strdup(attr_names[0]); if ( NULL != attr_names[1] ) { @@ -1403,9 +1402,8 @@ attr_syntax_force_to_delete(struct asyntaxinfo *asip, void *arg) void attr_syntax_all_clear_flag( unsigned long flag ) { - struct attr_syntax_enum_flaginfo fi; + struct attr_syntax_enum_flaginfo fi = {0}; - memset( &fi, 0, sizeof(fi)); fi.asef_flag = flag; attr_syntax_enumerate_attrs( attr_syntax_clear_flag_callback, (void *)&fi, PR_TRUE ); @@ -1419,9 +1417,8 @@ attr_syntax_all_clear_flag( unsigned long flag ) void attr_syntax_delete_all_not_flagged( unsigned long flag ) { - struct attr_syntax_enum_flaginfo fi; + struct attr_syntax_enum_flaginfo fi = {0}; - memset( &fi, 0, sizeof(fi)); fi.asef_flag = flag; attr_syntax_enumerate_attrs( attr_syntax_delete_if_not_flagged, (void *)&fi, PR_TRUE ); @@ -1433,9 +1430,8 @@ attr_syntax_delete_all_not_flagged( unsigned long flag ) void attr_syntax_delete_all() { - struct attr_syntax_enum_flaginfo fi; + struct attr_syntax_enum_flaginfo fi = {0}; - memset( &fi, 0, sizeof(fi)); attr_syntax_enumerate_attrs( attr_syntax_force_to_delete, (void *)&fi, PR_TRUE ); } @@ -1447,9 +1443,8 @@ attr_syntax_delete_all() void attr_syntax_delete_all_for_schemareload(unsigned long flag) { - struct attr_syntax_enum_flaginfo fi; + struct attr_syntax_enum_flaginfo fi = {0}; - memset(&fi, 0, sizeof(fi)); fi.asef_flag = flag; attr_syntax_enumerate_attrs_ext(oid2asi, attr_syntax_delete_if_not_flagged, (void *)&fi); diff --git a/ldap/servers/slapd/back-ldbm/import-threads.c b/ldap/servers/slapd/back-ldbm/import-threads.c index 1cc7b4271..055777842 100644 --- a/ldap/servers/slapd/back-ldbm/import-threads.c +++ b/ldap/servers/slapd/back-ldbm/import-threads.c @@ -3670,7 +3670,7 @@ dse_conf_verify_core(struct ldbminfo *li, char *src_dir, char *file_name, char * Slapi_Entry **backup_entries = NULL; Slapi_Entry **bep = NULL; Slapi_Entry **curr_entries = NULL; - Slapi_PBlock srch_pb; + Slapi_PBlock srch_pb = {0}; filename = slapi_ch_smprintf("%s/%s", src_dir, file_name); @@ -3732,10 +3732,9 @@ dse_conf_verify_core(struct ldbminfo *li, char *src_dir, char *file_name, char * bep++; } /* 623986: terminate the list if we reallocated backup_entries */ - if (backup_entry_len > 256) + if (backup_entry_len > 256) { *bep = NULL; - - pblock_init(&srch_pb); + } if (entry_filter != NULL) { /* Single instance restoration */ diff --git a/ldap/servers/slapd/back-ldbm/ldbm_config.c b/ldap/servers/slapd/back-ldbm/ldbm_config.c index 4b612db44..cc570d453 100644 --- a/ldap/servers/slapd/back-ldbm/ldbm_config.c +++ b/ldap/servers/slapd/back-ldbm/ldbm_config.c @@ -2230,10 +2230,9 @@ void ldbm_config_internal_set(struct ldbminfo *li, char *attrname, char *value) */ void replace_ldbm_config_value(char *conftype, char *val, struct ldbminfo *li) { - Slapi_PBlock pb; + Slapi_PBlock pb = {0}; Slapi_Mods smods; - pblock_init(&pb); slapi_mods_init(&smods, 1); slapi_mods_add(&smods, LDAP_MOD_REPLACE, conftype, strlen(val), val); slapi_modify_internal_set_pb(&pb, CONFIG_LDBM_DN, diff --git a/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c b/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c index c2d0eca95..a78d850c9 100644 --- a/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c +++ b/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c @@ -1037,9 +1037,9 @@ ldbm_back_modrdn( Slapi_PBlock *pb ) for ( i = 0; rdns[i] != NULL; i++ ) { char *type; - Slapi_Value *svp[2]; - Slapi_Value sv; - memset(&sv,0,sizeof(Slapi_Value)); + Slapi_Value *svp[2] = {0}; + /* Have to use long form init due to presence of internal struct */ + Slapi_Value sv = {{0}, 0, 0}; if ( slapi_rdn2typeval( rdns[i], &type, &sv.bv ) != 0 ) { slapi_log_err(SLAPI_LOG_ERR, "ldbm_back_modrdn", @@ -2020,9 +2020,8 @@ moddn_rename_child_entry( entry_set_maxcsn(e->ep_entry, opcsn); } { - Slapi_Mods smods; + Slapi_Mods smods = {0}; Slapi_Mods *smodsp = NULL; - memset(&smods, 0, sizeof(smods)); slapi_mods_init(&smods, 2); slapi_mods_add( &smods, LDAP_MOD_DELETE, LDBM_ENTRYDN_STR, strlen( backentry_get_ndn(e) ), backentry_get_ndn(e) ); diff --git a/ldap/servers/slapd/connection.c b/ldap/servers/slapd/connection.c index 51eb6945e..9e68a59c6 100644 --- a/ldap/servers/slapd/connection.c +++ b/ldap/servers/slapd/connection.c @@ -104,7 +104,7 @@ connection_get_operation(void) { struct Slapi_op_stack *stack_obj = (struct Slapi_op_stack *)PR_StackPop(op_stack); if (!stack_obj) { - stack_obj = (struct Slapi_op_stack *)slapi_ch_malloc(sizeof(struct Slapi_op_stack)); + stack_obj = (struct Slapi_op_stack *)slapi_ch_calloc(1, sizeof(struct Slapi_op_stack)); stack_obj->op = operation_new( plugin_build_operation_action_bitmap( 0, plugin_get_server_plg() )); } else { @@ -282,8 +282,7 @@ connection_reset(Connection* conn, int ns, PRNetAddr * from, int fromLen, int is memcpy( conn->cin_addr, from, sizeof( PRNetAddr ) ); if ( PR_IsNetAddrType( conn->cin_addr, PR_IpAddrV4Mapped ) ) { - PRNetAddr v4addr; - memset( &v4addr, 0, sizeof( v4addr ) ); + PRNetAddr v4addr = {{0}}; v4addr.inet.family = PR_AF_INET; v4addr.inet.ip = conn->cin_addr->ipv6.ip.pr_s6_addr32[3]; PR_NetAddrToString( &v4addr, buf_ip, sizeof( buf_ip ) ); @@ -295,7 +294,7 @@ connection_reset(Connection* conn, int ns, PRNetAddr * from, int fromLen, int is } else { /* try syscall since "from" was not given and PR_GetPeerName failed */ /* a corner case */ - struct sockaddr_in addr; /* assuming IPv4 */ + struct sockaddr_in addr = {0}; /* assuming IPv4 */ #if ( defined( hpux ) ) int addrlen; #else @@ -303,7 +302,6 @@ connection_reset(Connection* conn, int ns, PRNetAddr * from, int fromLen, int is #endif addrlen = sizeof( addr ); - memset( &addr, 0, addrlen ); if ( (conn->c_prfd == NULL) && (getpeername( conn->c_sd, (struct sockaddr *)&addr, &addrlen ) == 0) ) @@ -344,8 +342,7 @@ connection_reset(Connection* conn, int ns, PRNetAddr * from, int fromLen, int is PL_strncpyz(buf_destip, "unknown local file", sizeof(buf_destip)); } } else if ( PR_IsNetAddrType( conn->cin_destaddr, PR_IpAddrV4Mapped ) ) { - PRNetAddr v4destaddr; - memset( &v4destaddr, 0, sizeof( v4destaddr ) ); + PRNetAddr v4destaddr = {{0}}; v4destaddr.inet.family = PR_AF_INET; v4destaddr.inet.ip = conn->cin_destaddr->ipv6.ip.pr_s6_addr32[3]; PR_NetAddrToString( &v4destaddr, buf_destip, sizeof( buf_destip ) ); @@ -360,7 +357,7 @@ connection_reset(Connection* conn, int ns, PRNetAddr * from, int fromLen, int is } else { /* try syscall since c_prfd == NULL */ /* a corner case */ - struct sockaddr_in destaddr; /* assuming IPv4 */ + struct sockaddr_in destaddr = {0}; /* assuming IPv4 */ #if ( defined( hpux ) ) int destaddrlen; #else @@ -368,7 +365,7 @@ connection_reset(Connection* conn, int ns, PRNetAddr * from, int fromLen, int is #endif destaddrlen = sizeof( destaddr ); - memset( &destaddr, 0, destaddrlen ); + if ( (getsockname( conn->c_sd, (struct sockaddr *)&destaddr, &destaddrlen ) == 0) ) { conn->cin_destaddr = (PRNetAddr *)slapi_ch_malloc( sizeof( PRNetAddr )); memset( conn->cin_destaddr, 0, sizeof( PRNetAddr )); diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c index 6da658e7c..1ea5a84b7 100644 --- a/ldap/servers/slapd/daemon.c +++ b/ldap/servers/slapd/daemon.c @@ -2464,7 +2464,7 @@ handle_new_connection(Connection_Table *ct, int tcps, PRFileDesc *pr_acceptfd, i int ns = 0; Connection *conn = NULL; /* struct sockaddr_in from;*/ - PRNetAddr from; + PRNetAddr from = {{0}}; PRFileDesc *pr_clonefd = NULL; ber_len_t maxbersize; slapdFrontendConfig_t *fecfg = getFrontendConfig(); @@ -2472,7 +2472,6 @@ handle_new_connection(Connection_Table *ct, int tcps, PRFileDesc *pr_acceptfd, i if (newconn) { *newconn = NULL; } - memset(&from, 0, sizeof(from)); /* reset to nulls so we can see what was set */ if ( (ns = accept_and_configure( tcps, pr_acceptfd, &from, sizeof(from), secure, local, &pr_clonefd)) == SLAPD_INVALID_SOCKET ) { return -1; @@ -2519,8 +2518,7 @@ handle_new_connection(Connection_Table *ct, int tcps, PRFileDesc *pr_acceptfd, i LBER_SBIOD_LEVEL_PROVIDER, conn ); #else /* !USE_OPENLDAP */ { - struct lber_x_ext_io_fns func_pointers; - memset(&func_pointers, 0, sizeof(func_pointers)); + struct lber_x_ext_io_fns func_pointers = {0}; func_pointers.lbextiofn_size = LBER_X_EXTIO_FNS_SIZE; func_pointers.lbextiofn_read = NULL; /* see connection_read_function */ func_pointers.lbextiofn_write = write_function; @@ -3044,7 +3042,6 @@ slapd_listenhost2addr(const char *listenhost, PRNetAddr ***addr) void *iter = NULL; int addrcnt = 0; int i = 0; - memset( netaddr, 0, sizeof( PRNetAddr )); /* need to count the address, first */ while ( (iter = PR_EnumerateAddrInfo( iter, infop, 0, netaddr )) != NULL ) { @@ -3391,17 +3388,17 @@ static void get_loopback_by_addr( void ) { #ifdef GETHOSTBYADDR_BUF_T - struct hostent hp; - GETHOSTBYADDR_BUF_T hbuf; + struct hostent hp = {0}; + GETHOSTBYADDR_BUF_T hbuf; #endif - unsigned long ipaddr; - struct in_addr ia; - int herrno, rc = 0; + unsigned long ipaddr; + struct in_addr ia; + int herrno = 0; + int rc = 0; - memset( (char *)&hp, 0, sizeof(hp)); ipaddr = htonl( INADDR_LOOPBACK ); (void) GETHOSTBYADDR( (char *)&ipaddr, sizeof( ipaddr ), - AF_INET, &hp, hbuf, sizeof(hbuf), &herrno ); + AF_INET, &hp, hbuf, sizeof(hbuf), &herrno ); } #endif /* RESOLVER_NEEDS_LOW_FILE_DESCRIPTORS */ diff --git a/ldap/servers/slapd/defbackend.c b/ldap/servers/slapd/defbackend.c index a07fddbbb..9a9bc98cc 100644 --- a/ldap/servers/slapd/defbackend.c +++ b/ldap/servers/slapd/defbackend.c @@ -32,8 +32,8 @@ /* * ---------------- Static Variables ----------------------------------------- */ -static struct slapdplugin defbackend_plugin; -static Slapi_Backend *defbackend_backend = NULL; +static struct slapdplugin defbackend_plugin = {0}; +static Slapi_Backend *defbackend_backend = NULL; /* @@ -58,26 +58,23 @@ defbackend_init( void ) { int rc; char *errmsg; - Slapi_PBlock pb; + Slapi_PBlock pb = {0}; slapi_log_err(SLAPI_LOG_TRACE, "defbackend_init", "<==\n"); /* * create a new backend */ - pblock_init( &pb ); defbackend_backend = slapi_be_new( DEFBACKEND_TYPE , DEFBACKEND_TYPE, 1 /* Private */, 0 /* Do Not Log Changes */ ); - if (( rc = slapi_pblock_set( &pb, SLAPI_BACKEND, defbackend_backend )) - != 0 ) { - errmsg = "slapi_pblock_set SLAPI_BACKEND failed"; - goto cleanup_and_return; + if (( rc = slapi_pblock_set( &pb, SLAPI_BACKEND, defbackend_backend )) != 0 ) { + errmsg = "slapi_pblock_set SLAPI_BACKEND failed"; + goto cleanup_and_return; } /* * create a plugin structure for this backend since the * slapi_pblock_set()/slapi_pblock_get() functions assume there is one. */ - memset( &defbackend_plugin, '\0', sizeof( struct slapdplugin )); defbackend_plugin.plg_type = SLAPI_PLUGIN_DATABASE; defbackend_backend->be_database = &defbackend_plugin; if (( rc = slapi_pblock_set( &pb, SLAPI_PLUGIN, &defbackend_plugin )) diff --git a/ldap/servers/slapd/delete.c b/ldap/servers/slapd/delete.c index 37f5317e5..a16718a6e 100644 --- a/ldap/servers/slapd/delete.c +++ b/ldap/servers/slapd/delete.c @@ -107,12 +107,10 @@ free_and_return:; Slapi_PBlock * slapi_delete_internal(const char *idn, LDAPControl **controls, int dummy) { - Slapi_PBlock pb; + Slapi_PBlock pb = {0}; Slapi_PBlock *result_pb; int opresult; - pblock_init (&pb); - slapi_delete_internal_set_pb (&pb, idn, controls, NULL, plugin_get_default_component_id(), 0); delete_internal_pb (&pb); diff --git a/ldap/servers/slapd/entry.c b/ldap/servers/slapd/entry.c index 3d0723c2e..51ca3cc67 100644 --- a/ldap/servers/slapd/entry.c +++ b/ldap/servers/slapd/entry.c @@ -4102,7 +4102,6 @@ slapi_entry_diff(Slapi_Mods *smods, Slapi_Entry *e1, Slapi_Entry *e2, int diff_c static void delete_subtree(Slapi_PBlock *pb, const char *dn, void *plg_id) { - Slapi_PBlock mypb; int ret = 0; int opresult; @@ -4117,11 +4116,11 @@ delete_subtree(Slapi_PBlock *pb, const char *dn, void *plg_id) Slapi_DN *rootDN = slapi_sdn_new_dn_byval(dn); slapi_pblock_get(pb, SLAPI_PLUGIN_INTOP_SEARCH_ENTRIES, &entries); for (ep = entries; ep && *ep; ep++) { + Slapi_PBlock mypb = {0}; const Slapi_DN *sdn = slapi_entry_get_sdn_const(*ep); - - if (slapi_sdn_compare(sdn, rootDN) == 0) + if (slapi_sdn_compare(sdn, rootDN) == 0) { continue; - pblock_init(&mypb); + } slapi_delete_internal_set_pb(&mypb, slapi_sdn_get_dn(sdn), NULL, NULL, plg_id, 0); slapi_delete_internal_pb(&mypb); @@ -4157,7 +4156,6 @@ slapi_entries_diff(Slapi_Entry **old_entries, Slapi_Entry **curr_entries, char *my_logging_prestr = ""; Slapi_Entry **oep, **cep; int rval = 0; - Slapi_PBlock pb; #define SLAPI_ENTRY_FLAG_DIFF_IN_BOTH 0x80 if (NULL != logging_prestr && '\0' != *logging_prestr) @@ -4222,7 +4220,7 @@ slapi_entries_diff(Slapi_Entry **old_entries, Slapi_Entry **curr_entries, } if (0 == isfirst && force_update && testall) { - pblock_init(&pb); + Slapi_PBlock pb = {0}; slapi_modify_internal_set_pb_ext(&pb, slapi_entry_get_sdn_const(*oep), slapi_mods_get_ldapmods_byref(smods), @@ -4250,9 +4248,9 @@ slapi_entries_diff(Slapi_Entry **old_entries, Slapi_Entry **curr_entries, { if (force_update) { + Slapi_PBlock pb = {0}; LDAPMod **mods; slapi_entry2mods(*oep, NULL, &mods); - pblock_init(&pb); slapi_add_internal_set_pb(&pb, slapi_entry_get_dn_const(*oep), mods, NULL, plg_id, 0); slapi_add_internal_pb(&pb); @@ -4279,7 +4277,7 @@ slapi_entries_diff(Slapi_Entry **old_entries, Slapi_Entry **curr_entries, if (testall) { if (force_update) { - pblock_init(&pb); + Slapi_PBlock pb = {0}; delete_subtree(&pb, slapi_entry_get_dn_const(*cep), plg_id); pblock_done(&pb); } diff --git a/ldap/servers/slapd/fedse.c b/ldap/servers/slapd/fedse.c index f02c4f6df..d64e7c675 100644 --- a/ldap/servers/slapd/fedse.c +++ b/ldap/servers/slapd/fedse.c @@ -1556,9 +1556,7 @@ static int init_dse_file(const char *configdir, Slapi_DN *config) { int rc= 1; /* OK */ - Slapi_PBlock pb; - - memset(&pb, 0, sizeof(pb)); + Slapi_PBlock pb = {0}; if(pfedse==NULL) { diff --git a/ldap/servers/slapd/filter.c b/ldap/servers/slapd/filter.c index 085a8d9fb..77c369fbe 100644 --- a/ldap/servers/slapd/filter.c +++ b/ldap/servers/slapd/filter.c @@ -774,10 +774,9 @@ slapi_filter_free( struct slapi_filter *f, int recurse ) slapi_ch_free((void**)&f->f_mr_type); slapi_ber_bvdone(&f->f_mr_value); if (f->f_mr.mrf_destroy != NULL) { - Slapi_PBlock pb; - pblock_init (&pb); + Slapi_PBlock pb = {0}; if ( ! slapi_pblock_set (&pb, SLAPI_PLUGIN_OBJECT, f->f_mr.mrf_object)) { - f->f_mr.mrf_destroy (&pb); + f->f_mr.mrf_destroy (&pb); } } break; diff --git a/ldap/servers/slapd/generation.c b/ldap/servers/slapd/generation.c index bd1690b03..76dffda44 100644 --- a/ldap/servers/slapd/generation.c +++ b/ldap/servers/slapd/generation.c @@ -59,13 +59,11 @@ get_database_dataversion(const char *dn) void set_database_dataversion(const char *dn, const char *dataversion) { - LDAPMod gen_mod; - LDAPMod *mods[2]; + LDAPMod gen_mod = {0}; + LDAPMod *mods[2] = {0}; struct berval* gen_vals[2]; struct berval gen_val; - Slapi_PBlock *pb; - - memset (&gen_mod, 0, sizeof(gen_mod)); + Slapi_PBlock *pb; gen_mod.mod_op = LDAP_MOD_REPLACE | LDAP_MOD_BVALUES; gen_mod.mod_type = "nsslapd-dataversion"; /* JCMREPL - Shouldn't be a Netscape specific attribute name */ diff --git a/ldap/servers/slapd/getsocketpeer.c b/ldap/servers/slapd/getsocketpeer.c index 255322bdc..2a738a10c 100644 --- a/ldap/servers/slapd/getsocketpeer.c +++ b/ldap/servers/slapd/getsocketpeer.c @@ -82,7 +82,7 @@ int slapd_get_socket_peer(PRFileDesc *nspr_fd, uid_t *uid, gid_t *gid) #include <sys/types.h> #include <sys/stat.h> #include <errno.h> - struct msghdr msg; + struct msghdr msg = {0}; struct iovec iov; char dummy[8]; int pass_sd[2]; @@ -90,8 +90,6 @@ int slapd_get_socket_peer(PRFileDesc *nspr_fd, uid_t *uid, gid_t *gid) unsigned int retrycnt = 0xffffffff; /* safety net */ int myerrno = 0; - memset((void *)&msg, 0, sizeof(msg)); - iov.iov_base = dummy; iov.iov_len = sizeof(dummy); msg.msg_iov = &iov; diff --git a/ldap/servers/slapd/ldaputil.c b/ldap/servers/slapd/ldaputil.c index 090b61335..e5b7c56f2 100644 --- a/ldap/servers/slapd/ldaputil.c +++ b/ldap/servers/slapd/ldaputil.c @@ -1776,8 +1776,8 @@ credentials_are_valid( { char *logname = "credentials_are_valid"; int myrc = 0; - krb5_creds mcreds; /* match these values */ - krb5_creds creds; /* returned creds */ + krb5_creds mcreds = {0}; /* match these values */ + krb5_creds creds = {0}; /* returned creds */ char *tgs_princ_name = NULL; krb5_timestamp currenttime; int authtracelevel = SLAPI_LOG_SHELL; /* special auth tracing */ @@ -1786,8 +1786,6 @@ credentials_are_valid( int time_buffer = 30; /* seconds - go ahead and renew if creds are about to expire */ - memset(&mcreds, 0, sizeof(mcreds)); - memset(&creds, 0, sizeof(creds)); *rc = 0; if (!cc) { /* ok - no error */ @@ -1890,7 +1888,7 @@ set_krb5_creds( krb5_principal princ = NULL; char *princ_name = NULL; krb5_error_code rc = 0; - krb5_creds creds; + krb5_creds creds = {0}; krb5_keytab kt = NULL; char *cc_name = NULL; char ktname[MAX_KEYTAB_NAME_LEN]; @@ -1902,10 +1900,6 @@ set_krb5_creds( appear to be used currently */ - /* wipe this out so we can safely free it later if we - short circuit */ - memset(&creds, 0, sizeof(creds)); - /* * we are using static variables and sharing an in-memory credentials cache * so we put a lock around all kerberos interactions diff --git a/ldap/servers/slapd/log.c b/ldap/servers/slapd/log.c index 1e456c8ec..1195c549e 100644 --- a/ldap/servers/slapd/log.c +++ b/ldap/servers/slapd/log.c @@ -3705,7 +3705,7 @@ log__delete_error_logfile(int locked) int rv = 0; char *logstr; char buffer[BUFSIZ]; - char tbuf[TBUFSIZE]; + char tbuf[TBUFSIZE] = {0}; /* If we have only one log, then will delete this one */ if (loginfo.log_error_maxnumlogs == 1) { @@ -3826,7 +3826,6 @@ delete_logfile: return 0; } } - memset(tbuf, 0, sizeof(tbuf)); log_convert_time (delete_logp->l_ctime, tbuf, 1 /*short */); if (!locked) { /* if locked, we should not call slapi_log_err, diff --git a/ldap/servers/slapd/main.c b/ldap/servers/slapd/main.c index 158d49d90..a59b7d5a2 100644 --- a/ldap/servers/slapd/main.c +++ b/ldap/servers/slapd/main.c @@ -1167,9 +1167,8 @@ cleanup: void signal2sigaction( int s, void *a ) { - struct sigaction act; + struct sigaction act = {0}; - memset(&act, 0, sizeof(struct sigaction)); act.sa_handler = (VFP)a; act.sa_flags = 0; (void)sigemptyset( &act.sa_mask ); @@ -2003,7 +2002,7 @@ static int slapd_exemode_ldif2db(void) { int return_value= 0; - Slapi_PBlock pb; + Slapi_PBlock pb = {0}; struct slapdplugin *plugin; slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); @@ -2094,7 +2093,6 @@ slapd_exemode_ldif2db(void) if (!(slapd_ldap_debug & LDAP_DEBUG_BACKLDBM)) { g_set_detached(1); } - memset( &pb, '\0', sizeof(pb) ); pb.pb_backend = NULL; pb.pb_plugin = plugin; pb.pb_removedupvals = ldif2db_removedupvals; @@ -2126,7 +2124,7 @@ static int slapd_exemode_db2ldif(int argc, char** argv) { int return_value= 0; - Slapi_PBlock pb; + Slapi_PBlock pb = {0}; struct slapdplugin *plugin; slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); char *my_ldiffile; @@ -2218,7 +2216,6 @@ slapd_exemode_db2ldif(int argc, char** argv) if (!(slapd_ldap_debug & LDAP_DEBUG_BACKLDBM)) { g_set_detached(1); } - memset( &pb, '\0', sizeof(pb) ); pb.pb_backend = NULL; pb.pb_plugin = plugin; pb.pb_ldif_include = db2ldif_include; @@ -2344,7 +2341,7 @@ static int slapd_exemode_db2index(void) { int return_value= 0; struct slapdplugin *plugin; - Slapi_PBlock pb; + Slapi_PBlock pb = {0}; slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); mapping_tree_init(); @@ -2414,7 +2411,6 @@ static int slapd_exemode_db2index(void) usage( myname, extraname ); return 1; } - memset( &pb, '\0', sizeof(pb) ); pb.pb_backend = NULL; pb.pb_plugin = plugin; pb.pb_db2index_attrs = db2index_attrs; @@ -2432,7 +2428,7 @@ static int slapd_exemode_db2archive(void) { int return_value= 0; - Slapi_PBlock pb; + Slapi_PBlock pb = {0}; struct slapdplugin *backend_plugin; slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); @@ -2469,7 +2465,6 @@ slapd_exemode_db2archive(void) g_set_detached(1); } - memset( &pb, '\0', sizeof(pb) ); pb.pb_backend = NULL; pb.pb_plugin = backend_plugin; pb.pb_instance_name = NULL; @@ -2484,7 +2479,7 @@ static int slapd_exemode_archive2db(void) { int return_value= 0; - Slapi_PBlock pb; + Slapi_PBlock pb = {0}; struct slapdplugin *backend_plugin; slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); @@ -2522,7 +2517,6 @@ slapd_exemode_archive2db(void) g_set_detached(1); } - memset( &pb, '\0', sizeof(pb) ); pb.pb_backend = NULL; pb.pb_plugin = backend_plugin; pb.pb_instance_name = cmd_line_instance_name; @@ -2541,7 +2535,7 @@ static int slapd_exemode_upgradedb(void) { int return_value= 0; - Slapi_PBlock pb; + Slapi_PBlock pb = {0}; struct slapdplugin *backend_plugin; slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); @@ -2579,7 +2573,6 @@ slapd_exemode_upgradedb(void) return 1; } - memset( &pb, '\0', sizeof(pb) ); pb.pb_backend = NULL; pb.pb_plugin = backend_plugin; pb.pb_seq_val = archive_name; @@ -2608,7 +2601,7 @@ static int slapd_exemode_upgradednformat(void) { int rc = -1; /* error, by default */ - Slapi_PBlock pb; + Slapi_PBlock pb = {0}; struct slapdplugin *backend_plugin; slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); @@ -2650,7 +2643,6 @@ slapd_exemode_upgradednformat(void) goto bail; } - memset( &pb, '\0', sizeof(pb) ); pb.pb_backend = NULL; pb.pb_plugin = backend_plugin; pb.pb_instance_name = cmd_line_instance_name; @@ -2686,7 +2678,7 @@ static int slapd_exemode_dbverify(void) { int return_value = 0; - Slapi_PBlock pb; + Slapi_PBlock pb = {0}; struct slapdplugin *backend_plugin; /* this should be the first time to be called! if the init order @@ -2706,7 +2698,6 @@ slapd_exemode_dbverify(void) return 1; } - memset( &pb, '\0', sizeof(pb) ); pb.pb_backend = NULL; pb.pb_seq_type = dbverify_verbose; pb.pb_plugin = backend_plugin; diff --git a/ldap/servers/slapd/mapping_tree.c b/ldap/servers/slapd/mapping_tree.c index e0f1ffe3d..4e9175706 100644 --- a/ldap/servers/slapd/mapping_tree.c +++ b/ldap/servers/slapd/mapping_tree.c @@ -3318,7 +3318,7 @@ slapi_get_suffix_by_dn(const Slapi_DN *dn) int slapi_mtn_set_referral(const Slapi_DN *sdn, char ** referral) { - Slapi_PBlock pb; + Slapi_PBlock pb = {0}; Slapi_Mods smods; int rc = LDAP_SUCCESS,i = 0, j = 0; Slapi_DN* node_sdn; @@ -3390,7 +3390,6 @@ slapi_mtn_set_referral(const Slapi_DN *sdn, char ** referral) if ( do_modify ) { - pblock_init (&pb); slapi_modify_internal_set_pb_ext (&pb, node_sdn, slapi_mods_get_ldapmods_byref(&smods), NULL, NULL, (void *) plugin_get_default_component_id(), 0); @@ -3416,7 +3415,7 @@ slapi_mtn_set_referral(const Slapi_DN *sdn, char ** referral) int slapi_mtn_set_state(const Slapi_DN *sdn, char *state) { - Slapi_PBlock pb; + Slapi_PBlock pb = {0}; Slapi_Mods smods; int rc = LDAP_SUCCESS; Slapi_DN *node_sdn; @@ -3444,7 +3443,6 @@ slapi_mtn_set_state(const Slapi_DN *sdn, char *state) /* Otherwise, means that the state has changed, modify it */ slapi_mods_init (&smods, 1); slapi_mods_add(&smods, LDAP_MOD_REPLACE, "nsslapd-state", strlen(state), state); - pblock_init (&pb); slapi_modify_internal_set_pb_ext (&pb, node_sdn, slapi_mods_get_ldapmods_byref(&smods), NULL, NULL, (void *) plugin_get_default_component_id(), 0); @@ -3466,7 +3464,7 @@ bail: Slapi_Attr * mtn_get_attr(char* node_dn, char * type) { - Slapi_PBlock pb; + Slapi_PBlock pb = {0}; int res = 0; Slapi_Entry **entries = NULL; Slapi_Attr *attr = NULL; @@ -3475,7 +3473,6 @@ mtn_get_attr(char* node_dn, char * type) attrs = (char **)slapi_ch_calloc(2, sizeof(char *)); attrs[0] = slapi_ch_strdup(type); - pblock_init(&pb); slapi_search_internal_set_pb(&pb, node_dn, LDAP_SCOPE_BASE, "objectclass=nsMappingTree", attrs, 0, NULL, NULL, (void *) plugin_get_default_component_id(), 0); @@ -3856,13 +3853,12 @@ static void dump_mapping_tree(mapping_tree_node *parent, int depth) static int _mtn_update_config_param(int op, char *type, char *strvalue) { - Slapi_PBlock confpb; + Slapi_PBlock confpb = {0}; Slapi_DN sdn; Slapi_Mods smods; LDAPMod **mods; int rc = LDAP_PARAM_ERROR; - pblock_init (&confpb); slapi_mods_init (&smods, 0); switch (op) { case LDAP_MOD_DELETE: diff --git a/ldap/servers/slapd/modify.c b/ldap/servers/slapd/modify.c index 3aa4359ee..51bf05752 100644 --- a/ldap/servers/slapd/modify.c +++ b/ldap/servers/slapd/modify.c @@ -406,12 +406,10 @@ slapi_modify_internal(const char *idn, LDAPControl **controls, int dummy) { - Slapi_PBlock pb; + Slapi_PBlock pb = {0}; Slapi_PBlock *result_pb = NULL; int opresult; - pblock_init(&pb); - slapi_modify_internal_set_pb (&pb, idn, (LDAPMod**)mods, controls, NULL, (void *)plugin_get_default_component_id(), 0); diff --git a/ldap/servers/slapd/modrdn.c b/ldap/servers/slapd/modrdn.c index 8b7753940..15f52105b 100644 --- a/ldap/servers/slapd/modrdn.c +++ b/ldap/servers/slapd/modrdn.c @@ -245,14 +245,12 @@ slapi_modrdn_internal(const char *iodn, const char *inewrdn, int deloldrdn, LDAP Slapi_PBlock * slapi_rename_internal(const char *iodn, const char *inewrdn, const char *inewsuperior, int deloldrdn, LDAPControl **controls, int dummy) { - Slapi_PBlock pb; + Slapi_PBlock pb = {0}; Slapi_PBlock *result_pb = NULL; int opresult= 0; Slapi_DN sdn; Slapi_DN newsuperiorsdn; - pblock_init (&pb); - slapi_sdn_init_dn_byref(&sdn, iodn); slapi_sdn_init_dn_byref(&newsuperiorsdn, inewsuperior); diff --git a/ldap/servers/slapd/operation.c b/ldap/servers/slapd/operation.c index 19e572ad8..ccbc549fd 100644 --- a/ldap/servers/slapd/operation.c +++ b/ldap/servers/slapd/operation.c @@ -148,6 +148,7 @@ operation_init(Slapi_Operation *o, int flags) if (NULL != o) { BerElement *ber = o->o_ber; /* may have already been set */ + /* We can't get rid of this til we remove the operation stack. */ memset(o,0,sizeof(Slapi_Operation)); o->o_ber = ber; o->o_msgid = -1; @@ -195,7 +196,7 @@ operation_new(int flags) BerElement *ber = NULL; if(flags & OP_FLAG_INTERNAL) { - o = (Slapi_Operation *) slapi_ch_malloc(sizeof(Slapi_Operation)); + o = (Slapi_Operation *) slapi_ch_calloc(1, sizeof(Slapi_Operation)); } else { diff --git a/ldap/servers/slapd/pblock.c b/ldap/servers/slapd/pblock.c index dcac322c1..52b8cf56a 100644 --- a/ldap/servers/slapd/pblock.c +++ b/ldap/servers/slapd/pblock.c @@ -35,7 +35,7 @@ pblock_init_common( ) { PR_ASSERT( NULL != pb ); - memset( pb, '\0', sizeof(Slapi_PBlock) ); + /* No need to memset, this is only called in backend_manager, and it uses {0} */ pb->pb_backend = be; pb->pb_conn = conn; pb->pb_op = op; diff --git a/ldap/servers/slapd/plugin.c b/ldap/servers/slapd/plugin.c index 4cdd5672c..7744aa67c 100644 --- a/ldap/servers/slapd/plugin.c +++ b/ldap/servers/slapd/plugin.c @@ -1753,10 +1753,9 @@ plugin_dependency_startall(int argc, char** argv, char *errmsg, int operation, c if(!config[plugin_index].entry_created) { int plugin_actions = 0; - Slapi_PBlock newpb; + Slapi_PBlock newpb = {0}; Slapi_Entry *newe; - pblock_init(&newpb); newe = slapi_entry_dup( config[plugin_index].e ); slapi_add_entry_internal_set_pb(&newpb, newe, NULL, plugin_get_default_component_id(), plugin_actions); @@ -2776,8 +2775,9 @@ plugin_free(struct slapdplugin *plugin) } release_componentid(plugin->plg_identity); slapi_counter_destroy(&plugin->plg_op_counter); - if (!plugin->plg_group) + if (!plugin->plg_group) { plugin_config_cleanup(&plugin->plg_conf); + } slapi_ch_free((void**)&plugin); } @@ -2844,7 +2844,7 @@ plugin_setup(Slapi_Entry *plugin_entry, struct slapi_componentid *group, struct slapi_componentid *cid = NULL; const char *existname = 0; slapi_plugin_init_fnptr initfunc = p_initfunc; - Slapi_PBlock pb; + Slapi_PBlock pb = {0}; int status = 0; int enabled = 1; char *configdir = 0; @@ -3067,7 +3067,6 @@ plugin_setup(Slapi_Entry *plugin_entry, struct slapi_componentid *group, PR_snprintf(attrname, sizeof(attrname), "%s%d", ATTR_PLUGIN_ARG, ++ii); } while (skipped < MAXSKIPPED); - memset((char *)&pb, '\0', sizeof(pb)); slapi_pblock_set(&pb, SLAPI_PLUGIN, plugin); slapi_pblock_set(&pb, SLAPI_PLUGIN_VERSION, (void *)SLAPI_PLUGIN_CURRENT_VERSION); @@ -3472,9 +3471,7 @@ plugin_remove_plugins(struct slapdplugin *plugin_entry, char *plugin_type) /* * Call the close function, cleanup the hashtable & the global shutdown list */ - Slapi_PBlock pb; - - pblock_init(&pb); + Slapi_PBlock pb = {0}; plugin_set_stopped(plugin); if (slapi_counter_get_value(plugin->plg_op_counter) > 0){ /* @@ -4328,7 +4325,7 @@ bail: int slapi_set_plugin_default_config(const char *type, Slapi_Value *value) { - Slapi_PBlock pb; + Slapi_PBlock pb = {0}; Slapi_Entry **entries = NULL; int rc = LDAP_SUCCESS; char **search_attrs = NULL; /* used by search */ @@ -4340,7 +4337,6 @@ slapi_set_plugin_default_config(const char *type, Slapi_Value *value) charray_add(&search_attrs, slapi_ch_strdup(type)); /* cn=plugin default config,cn=config */ - pblock_init(&pb); slapi_search_internal_set_pb(&pb, SLAPI_PLUGIN_DEFAULT_CONFIG, /* Base DN (normalized) */ LDAP_SCOPE_BASE, @@ -4432,7 +4428,7 @@ slapi_set_plugin_default_config(const char *type, Slapi_Value *value) int slapi_get_plugin_default_config(char *type, Slapi_ValueSet **valueset) { - Slapi_PBlock pb; + Slapi_PBlock pb = {0}; Slapi_Entry **entries = NULL; int rc = LDAP_PARAM_ERROR; char **search_attrs = NULL; /* used by search */ @@ -4444,7 +4440,6 @@ slapi_get_plugin_default_config(char *type, Slapi_ValueSet **valueset) charray_add(&search_attrs, slapi_ch_strdup(type)); /* cn=plugin default config,cn=config */ - pblock_init(&pb); slapi_search_internal_set_pb(&pb, SLAPI_PLUGIN_DEFAULT_CONFIG, /* Base DN (normalized) */ LDAP_SCOPE_BASE, diff --git a/ldap/servers/slapd/plugin_internal_op.c b/ldap/servers/slapd/plugin_internal_op.c index 05ee90dc5..8cbdf0673 100644 --- a/ldap/servers/slapd/plugin_internal_op.c +++ b/ldap/servers/slapd/plugin_internal_op.c @@ -205,7 +205,7 @@ slapi_seq_callback( const char *ibase, plugin_referral_entry_callback ref_callback) { int r; - Slapi_PBlock pb; + Slapi_PBlock pb = {0}; if (ibase == NULL) { @@ -214,8 +214,6 @@ slapi_seq_callback( const char *ibase, return -1; } - pblock_init(&pb); - slapi_seq_internal_set_pb(&pb, (char *)ibase, type, attrname, val, attrs, attrsonly, controls, plugin_get_default_component_id(), 0); diff --git a/ldap/servers/slapd/plugin_mr.c b/ldap/servers/slapd/plugin_mr.c index 35874ec62..d216d12b9 100644 --- a/ldap/servers/slapd/plugin_mr.c +++ b/ldap/servers/slapd/plugin_mr.c @@ -574,7 +574,7 @@ plugin_mr_filter_create (mr_filter_t* f) { int rc = LDAP_UNAVAILABLE_CRITICAL_EXTENSION; struct slapdplugin* mrp = plugin_mr_find_registered (f->mrf_oid); - Slapi_PBlock pb; + Slapi_PBlock pb = {0}; if (mrp != NULL) { @@ -599,7 +599,6 @@ plugin_mr_filter_create (mr_filter_t* f) if (mrp) { /* set the default index create fn */ - pblock_init(&pb); slapi_pblock_set(&pb, SLAPI_PLUGIN, mrp); slapi_pblock_set(&pb, SLAPI_PLUGIN_MR_FILTER_CREATE_FN, default_mr_filter_create); slapi_pblock_set(&pb, SLAPI_PLUGIN_MR_INDEXER_CREATE_FN, default_mr_indexer_create); diff --git a/ldap/servers/slapd/schema.c b/ldap/servers/slapd/schema.c index ff77c920b..f50f57381 100644 --- a/ldap/servers/slapd/schema.c +++ b/ldap/servers/slapd/schema.c @@ -1807,8 +1807,7 @@ schema_list_attributes_callback(struct asyntaxinfo *asi, void *arg) char ** slapi_schema_list_attribute_names(unsigned long flag) { - struct listargs aew; - memset(&aew,0,sizeof(struct listargs)); + struct listargs aew = {0}; aew.flag=flag; attr_syntax_enumerate_attrs(schema_list_attributes_callback, &aew, @@ -5339,8 +5338,7 @@ init_schema_dse_ext(char *schemadir, Slapi_Backend *be, int dont_write = 1; int merge = 1; int dont_dup_check = 1; - Slapi_PBlock pb; - memset(&pb, 0, sizeof(pb)); + Slapi_PBlock pb = {0}; /* don't write out the file when reading */ slapi_pblock_set(&pb, SLAPI_DSE_DONT_WRITE_WHEN_ADDING, (void*)&dont_write); /* duplicate entries are allowed */ diff --git a/ldap/servers/slapd/ssl.c b/ldap/servers/slapd/ssl.c index 5ad22fd04..f6da41438 100644 --- a/ldap/servers/slapd/ssl.c +++ b/ldap/servers/slapd/ssl.c @@ -938,7 +938,7 @@ freeChildren( char **list ) { static void entrySetValue(Slapi_DN *sdn, char *type, char *value) { - Slapi_PBlock mypb; + Slapi_PBlock mypb = {0}; LDAPMod attr; LDAPMod *mods[2]; char *values[2]; @@ -954,7 +954,6 @@ entrySetValue(Slapi_DN *sdn, char *type, char *value) mods[0] = &attr; mods[1] = NULL; - pblock_init(&mypb); slapi_modify_internal_set_pb_ext(&mypb, sdn, mods, NULL, NULL, (void *)plugin_get_default_component_id(), 0); slapi_modify_internal_pb(&mypb); pblock_done(&mypb); diff --git a/ldap/servers/slapd/task.c b/ldap/servers/slapd/task.c index 0305ec811..ad52e9d99 100644 --- a/ldap/servers/slapd/task.c +++ b/ldap/servers/slapd/task.c @@ -701,15 +701,13 @@ static Slapi_Entry *get_internal_entry(Slapi_PBlock *pb, char *dn) static void modify_internal_entry(char *dn, LDAPMod **mods) { - Slapi_PBlock pb; Slapi_Operation *op; int ret = 0; int tries = 0; int dont_write_file = 1; do { - - pblock_init(&pb); + Slapi_PBlock pb = {0}; slapi_modify_internal_set_pb(&pb, dn, mods, NULL, NULL, (void *)plugin_get_default_component_id(), 0); @@ -836,7 +834,7 @@ static int task_import_add(Slapi_PBlock *pb, Slapi_Entry *e, int idx, rv = 0; const char *do_attr_indexes, *uniqueid_kind_str; int uniqueid_kind = SLAPI_UNIQUEID_GENERATE_TIME_BASED; - Slapi_PBlock mypb; + Slapi_PBlock mypb = {0}; Slapi_Task *task; char *nameFrombe_name = NULL; const char *encrypt_on_import = NULL; @@ -978,7 +976,6 @@ static int task_import_add(Slapi_PBlock *pb, Slapi_Entry *e, goto out; } - memset(&mypb, 0, sizeof(mypb)); mypb.pb_backend = be; mypb.pb_plugin = be->be_database; mypb.pb_removedupvals = atoi(fetch_attr(e, "nsImportChunkSize", "0")); @@ -1797,7 +1794,7 @@ task_upgradedb_add(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eAfter, int rv = SLAPI_DSE_CALLBACK_OK; Slapi_Backend *be = NULL; Slapi_Task *task = NULL; - Slapi_PBlock mypb; + Slapi_PBlock mypb = {0}; const char *archive_dir = NULL; const char *force = NULL; const char *database_type = "ldbm database"; @@ -1864,7 +1861,6 @@ task_upgradedb_add(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eAfter, task->task_work = 1; task->task_progress = 0; - memset(&mypb, 0, sizeof(mypb)); mypb.pb_backend = be; mypb.pb_plugin = be->be_database; if (force && 0 == strcasecmp(force, "true")) @@ -1956,17 +1952,14 @@ task_sysconfig_reload_add(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eAfter, /* skip comments */ continue; } else { - char env_value[sizeof(line)]; - char env_var[sizeof(line)]; + char env_value[sizeof(line)] = {0}; + char env_var[sizeof(line)] = {0}; int using_setenv = 0; int value_index = 0; int start_value = 0; int var_index = 0; int inquotes = 0; - memset(env_var, 0, sizeof(env_var)); - memset(env_value, 0, sizeof(env_value)); - /* * Remove leading spaces and tabs */ diff --git a/ldap/servers/slapd/time.c b/ldap/servers/slapd/time.c index 89bc91de6..db17babc3 100644 --- a/ldap/servers/slapd/time.c +++ b/ldap/servers/slapd/time.c @@ -364,12 +364,11 @@ write_genTime (time_t from, struct berval* into) time_t read_genTime(struct berval*from) { - struct tm t; + struct tm t = {0}; time_t retTime; time_t diffsec = 0; int i, gflag = 0, havesec = 0; - memset (&t, 0, sizeof(t)); t.tm_isdst = -1; t.tm_year = strntoul (from->bv_val , 4, 10) - 1900L; t.tm_mon = strntoul (from->bv_val + 4, 2, 10) - 1; diff --git a/ldap/servers/slapd/uniqueid.c b/ldap/servers/slapd/uniqueid.c index c878a8f53..3f1f6a891 100644 --- a/ldap/servers/slapd/uniqueid.c +++ b/ldap/servers/slapd/uniqueid.c @@ -37,16 +37,7 @@ static int isValidFormat (const char * buff); Slapi_UniqueID *slapi_uniqueIDNew () { Slapi_UniqueID *uId; - uId = (Slapi_UniqueID*)slapi_ch_malloc (sizeof (Slapi_UniqueID)); - - if (uId == NULL) - { - slapi_log_err(SLAPI_LOG_ERR, MODULE, "uniqueIDNew: " - "failed to allocate new id.\n"); - return NULL; - } - - memset (uId, 0, sizeof (Slapi_UniqueID)); + uId = (Slapi_UniqueID*)slapi_ch_calloc (1, sizeof (Slapi_UniqueID)); return uId; } diff --git a/ldap/servers/slapd/uniqueidgen.c b/ldap/servers/slapd/uniqueidgen.c index a6c03d702..6ac0799c1 100644 --- a/ldap/servers/slapd/uniqueidgen.c +++ b/ldap/servers/slapd/uniqueidgen.c @@ -176,16 +176,11 @@ int slapi_uniqueIDGenerateFromNameString (char **uId, const void *name, int namelen) { int rc; - Slapi_UniqueID idBase; - Slapi_UniqueID idGen; + Slapi_UniqueID idBase = {0}; + Slapi_UniqueID idGen = {0}; /* just use Id of all 0 as base id */ - if (uIdBase == NULL) - { - memset (&idBase, 0, sizeof (idBase)); - memset (&idGen, 0, sizeof (idGen)); - } - else + if (uIdBase != NULL) { rc = slapi_uniqueIDScan (&idBase, uIdBase); if (rc != UID_SUCCESS) diff --git a/ldap/servers/slapd/util.c b/ldap/servers/slapd/util.c index 3a775224c..48fa3c48b 100644 --- a/ldap/servers/slapd/util.c +++ b/ldap/servers/slapd/util.c @@ -412,7 +412,7 @@ filter_stuff_func(void *arg, const char *val, PRUint32 slen) char* slapi_filter_sprintf(const char *fmt, ...) { - struct filter_ctx ctx; + struct filter_ctx ctx = {0}; va_list args; char *buf; int rc; @@ -1143,7 +1143,7 @@ int slapd_chown_if_not_owner(const char *filename, uid_t uid, gid_t gid) { int fd = -1; - struct stat statbuf; + struct stat statbuf = {0}; int result = 1; if (!filename) { return result; @@ -1153,7 +1153,6 @@ slapd_chown_if_not_owner(const char *filename, uid_t uid, gid_t gid) if (fd == -1) { return result; } - memset(&statbuf, '\0', sizeof(statbuf)); if (!(result = fstat(fd, &statbuf))) { if (((uid != -1) && (uid != statbuf.st_uid)) || @@ -1519,16 +1518,16 @@ int util_info_sys_pages(size_t *pagesize, size_t *pages, size_t *procpages, size *availpages = util_getvirtualmemsize() / *pagesize; /* solaris has THE most annoying way to get this info */ { - struct prpsinfo psi; + struct prpsinfo psi = {0}; char fn[40]; int fd; sprintf(fn, "/proc/%d", getpid()); fd = open(fn, O_RDONLY); if (fd >= 0) { - memset(&psi, 0, sizeof(psi)); - if (ioctl(fd, PIOCPSINFO, (void *)&psi) == 0) + if (ioctl(fd, PIOCPSINFO, (void *)&psi) == 0) { *procpages = psi.pr_size; + } close(fd); } } diff --git a/ldap/servers/slapd/uuid.c b/ldap/servers/slapd/uuid.c index bfa8467f5..08ca7b3cb 100644 --- a/ldap/servers/slapd/uuid.c +++ b/ldap/servers/slapd/uuid.c @@ -118,7 +118,7 @@ typedef struct static unsigned int uuid_seed = 0; /* seed for the random generator */ - uuid_state _state; /* generator's state */ +uuid_state _state;/* generator's state */ /* uuid_init -- initializes uuid layer */ int uuid_init (const char *configDir, const Slapi_DN *configDN, PRBool mtGen) @@ -276,16 +276,13 @@ void uuid_create_from_name(guid_t * uuid, /* resulting UUID */ { PK11Context *c = NULL; - unsigned char hash[16]; + unsigned char hash[16] = {0}; unsigned int hashLen; - guid_t net_nsid; /* context UUID in network byte order */ - - memset(hash, 0, 16); + guid_t net_nsid = {0}; /* context UUID in network byte order */ /* put name space ID in network byte order so it hashes the same no matter what endian machine we're on */ - memset(&net_nsid, 0, sizeof(guid_t)); net_nsid.time_low = PR_htonl(nsid.time_low); net_nsid.time_mid = PR_htons(nsid.time_mid); net_nsid.time_hi_and_version = PR_htons(nsid.time_hi_and_version); diff --git a/lib/ldaputil/certmap.c b/lib/ldaputil/certmap.c index b05bbba22..8525f5135 100644 --- a/lib/ldaputil/certmap.c +++ b/lib/ldaputil/certmap.c @@ -164,11 +164,10 @@ static int certmap_name_to_secoid (const char *str) NSAPI_PUBLIC int ldapu_list_alloc (LDAPUList_t **list) { - *list = (LDAPUList_t *)malloc(sizeof(LDAPUList_t)); + *list = (LDAPUList_t *)calloc(1, sizeof(LDAPUList_t)); if (!*list) return LDAPU_ERR_OUT_OF_MEMORY; - memset((void *)*list, 0, sizeof(LDAPUList_t)); return LDAPU_SUCCESS; } @@ -193,13 +192,12 @@ NSAPI_PUBLIC int ldapu_list_add_info (LDAPUList_t *list, void *info) LDAPUListNode_t *node; /* Allocate the list node and set info in the node. */ - node = (LDAPUListNode_t *)malloc(sizeof(LDAPUListNode_t)); + node = (LDAPUListNode_t *)calloc(1, sizeof(LDAPUListNode_t)); if (!node) { return LDAPU_ERR_OUT_OF_MEMORY; } - memset((void *)node, 0, sizeof(LDAPUListNode_t)); node->info = info; return ldapu_list_add_node(list, node); @@ -281,15 +279,13 @@ static int dbinfo_to_certinfo (DBConfDBInfo_t *db_info, *certinfo_out = 0; - certinfo = (LDAPUCertMapInfo_t *)malloc(sizeof(LDAPUCertMapInfo_t)); + certinfo = (LDAPUCertMapInfo_t *)calloc(1, sizeof(LDAPUCertMapInfo_t)); if (!certinfo) { rv = LDAPU_ERR_OUT_OF_MEMORY; goto error; } - memset((void *)certinfo, 0, sizeof(LDAPUCertMapInfo_t)); - /* hijack few structures rather then copy. Make the pointers to the structures NULL in the original structure so that they don't freed up when db_info is freed. */ @@ -1453,7 +1449,7 @@ int ldapu_certmap_init (const char *config_file, LDAPUCertMapInfo_t **certmap_default) { int rv; - certmap_listinfo = (LDAPUCertMapListInfo_t *)malloc(sizeof(LDAPUCertMapListInfo_t)); + certmap_listinfo = (LDAPUCertMapListInfo_t *)calloc(1, sizeof(LDAPUCertMapListInfo_t)); *certmap_list = 0; *certmap_default = 0; @@ -1461,8 +1457,6 @@ int ldapu_certmap_init (const char *config_file, if (!certmap_listinfo) return LDAPU_ERR_OUT_OF_MEMORY; - memset((void *)certmap_listinfo, 0, sizeof(LDAPUCertMapListInfo_t)); - rv = certmap_read_certconfig_file(config_file); if (rv == LDAPU_SUCCESS) {
0
6c1a7f34b435a5affff76759e36153b7df7c12ec
389ds/389-ds-base
Resolves: #469243 Summary: ACL: support group filter Description: extended userattr #GROUPDN value to support LDAPURL
commit 6c1a7f34b435a5affff76759e36153b7df7c12ec Author: Noriko Hosoi <[email protected]> Date: Fri Oct 31 00:16:02 2008 +0000 Resolves: #469243 Summary: ACL: support group filter Description: extended userattr #GROUPDN value to support LDAPURL diff --git a/ldap/servers/plugins/acl/acllas.c b/ldap/servers/plugins/acl/acllas.c index b38150c2e..88beea158 100644 --- a/ldap/servers/plugins/acl/acllas.c +++ b/ldap/servers/plugins/acl/acllas.c @@ -2355,36 +2355,90 @@ acllas__eval_memberGroupDnAttr (char *attrName, Slapi_Entry *e, Slapi_Attr *attr; char *s, *p; - char *str, *s_str, *base, *groupattr; + char *str, *s_str, *base, *groupattr = NULL; int i,j,k,matched, enumerate_groups; aclUserGroup *u_group; char ebuf [ BUFSIZ ]; Slapi_Value *sval=NULL; const struct berval *attrVal; - - /* Parse the URL -- We can't use the ldap_url_parse() - ** we don't follow thw complete url naming scheme - */ + int qcnt = 0; + Slapi_PBlock *myPb = NULL; + Slapi_Entry **grpentries = NULL; + + /* Parse the URL -- getting the group attr and counting up '?'s. + * If there is no group attr and there are 3 '?' marks, + * we parse the URL with ldap_url_parse to get base dn and filter. + */ s_str = str = slapi_ch_strdup(attrName); while (str && ldap_utf8isspace(str)) LDAP_UTF8INC( str ); str +=8; s = strchr (str, '?'); if (s) { + qcnt++; p = s; p++; *s = '\0'; base = str; s = strchr (p, '?'); - if (s) *s = '\0'; + if (s) { + qcnt++; + *s = '\0'; + if (NULL != strchr (++s, '?')) { + qcnt++; + } + } groupattr = p; } else { slapi_ch_free ( (void **)&s_str ); return ACL_FALSE; } + + /* Full LDAPURL is given? */ + if ((NULL == groupattr || 0 == strlen(groupattr)) && 3 == qcnt) { + LDAPURLDesc *ludp = NULL; + int rval; + + if ( 0 != ldap_url_parse( attrName, &ludp) ) { + slapi_ch_free ( (void **)&s_str ); + return ACL_FALSE; + } + + /* Use new search internal API */ + myPb = slapi_pblock_new (); + slapi_search_internal_set_pb( + myPb, + ludp->lud_dn, + ludp->lud_scope, + ludp->lud_filter, + NULL, + 0, + NULL /* controls */, + NULL /* uniqueid */, + aclplugin_get_identity (ACL_PLUGIN_IDENTITY), + 0 ); + slapi_search_internal_pb(myPb); + ldap_free_urldesc( ludp ); + slapi_pblock_get(myPb, SLAPI_PLUGIN_INTOP_RESULT, &rval); + if (rval != LDAP_SUCCESS) { + slapi_ch_free ( (void **)&s_str ); + slapi_free_search_results_internal(myPb); + slapi_pblock_destroy (myPb); + return ACL_FALSE; + } + + slapi_pblock_get(myPb, SLAPI_PLUGIN_INTOP_SEARCH_ENTRIES, &grpentries); + if ((grpentries == NULL) || (grpentries[0] == NULL)) { + slapi_ch_free ( (void **)&s_str ); + slapi_free_search_results_internal(myPb); + slapi_pblock_destroy (myPb); + return ACL_FALSE; + } + } + if ( (u_group = aclg_get_usersGroup ( aclpb , n_clientdn )) == NULL) { - slapi_log_error( SLAPI_LOG_ACL, plugin_name, + slapi_log_error( SLAPI_LOG_ACL, plugin_name, "Failed to find/allocate a usergroup--aborting evaluation\n", 0, 0); slapi_ch_free ( (void **)&s_str ); return(ACL_DONT_KNOW); @@ -2540,12 +2594,28 @@ acllas__eval_memberGroupDnAttr (char *attrName, Slapi_Entry *e, j, ACL_ESCAPE_STRING_WITH_PUNCTUATION (u_group->aclug_member_groups[j], ebuf),0); matched = ACL_FALSE; - slapi_entry_attr_find( e, groupattr, &attr); - if (attr == NULL) { - slapi_ch_free ( (void **)&s_str ); - return ACL_FALSE; - } - { + if ((NULL == groupattr || 0 == strlen(groupattr)) && 3 == qcnt) { + /* Full LDAPURL case */ + for (k = 0; u_group->aclug_member_groups[k]; k++) { /* groups the bind + user belong to */ + Slapi_Entry **ep; + for (ep = grpentries; *ep; ep++) { /* groups having ACI */ + char *n_edn = slapi_entry_get_ndn(*ep); + if (slapi_utf8casecmp((ACLUCHP)u_group->aclug_member_groups[k], + (ACLUCHP)n_edn) == 0) { + matched = ACL_TRUE; + break; + } + } + } + slapi_free_search_results_internal(myPb); + slapi_pblock_destroy(myPb); + } else { + slapi_entry_attr_find( e, groupattr, &attr); + if (attr == NULL) { + slapi_ch_free ( (void **)&s_str ); + return ACL_FALSE; + } k = slapi_attr_first_value ( attr,&sval ); while ( k != -1 ) { char *n_attrval;
0
1358e0fc5f75b2e9439d41f84079fd283af436e3
389ds/389-ds-base
Coverity Fix 13138: Dereference after null check Fix description: Variable upperkey given to idl_new_range_fetch could be NULL or its data field could be NULL. That is interpreted there is no upper bound. This patch adds NULL check for upperkey and upperkey->data. Also, fixing a compiler warning. Reviewed by Rich (Thank you!!)
commit 1358e0fc5f75b2e9439d41f84079fd283af436e3 Author: Noriko Hosoi <[email protected]> Date: Wed Feb 20 17:33:27 2013 -0800 Coverity Fix 13138: Dereference after null check Fix description: Variable upperkey given to idl_new_range_fetch could be NULL or its data field could be NULL. That is interpreted there is no upper bound. This patch adds NULL check for upperkey and upperkey->data. Also, fixing a compiler warning. Reviewed by Rich (Thank you!!) diff --git a/ldap/servers/slapd/back-ldbm/idl_new.c b/ldap/servers/slapd/back-ldbm/idl_new.c index 15cab5598..2b52f33fc 100644 --- a/ldap/servers/slapd/back-ldbm/idl_new.c +++ b/ldap/servers/slapd/back-ldbm/idl_new.c @@ -415,14 +415,13 @@ idl_new_range_fetch( time_t curtime; void *saved_key = NULL; - if (NEW_IDL_NOOP == *flag_err) - { - *flag_err = 0; + if (NULL == flag_err) { return NULL; } - if(upperkey == NULL){ - LDAPDebug(LDAP_DEBUG_ANY, "idl_new_range_fetch: upperkey is NULL\n",0,0,0); - return ret; + + *flag_err = 0; + if (NEW_IDL_NOOP == *flag_err) { + return NULL; } dblayer_txn_init(li, &s_txn); if (txn) { @@ -486,7 +485,7 @@ idl_new_range_fetch( /* Iterate over the duplicates, amassing them into an IDL */ #ifdef DB_USE_BULK_FETCH while (cur_key.data && - (upperkey->data ? + (upperkey && upperkey->data ? ((operator == SLAPI_OP_LESS) ? DBTcmp(&cur_key, upperkey, ai->ai_key_cmp_fn) < 0 : DBTcmp(&cur_key, upperkey, ai->ai_key_cmp_fn) <= 0) : @@ -575,7 +574,8 @@ idl_new_range_fetch( #endif ret = cursor->c_get(cursor, &cur_key, &data, DB_NEXT_DUP|DB_MULTIPLE); if (ret) { - if (DBT_EQ(&cur_key, upperkey)) { /* this is the last key */ + if (upperkey && upperkey->data && DBT_EQ(&cur_key, upperkey)) { + /* this is the last key */ break; } /* First set the cursor (DB_NEXT_NODUP does not take DB_MULTIPLE) */ @@ -596,7 +596,7 @@ idl_new_range_fetch( } } #else - while (upperkey->data ? + while (upperkey && upperkey->data ? ((operator == SLAPI_OP_LESS) ? DBTcmp(&cur_key, upperkey, ai->ai_key_cmp_fn) < 0 : DBTcmp(&cur_key, upperkey, ai->ai_key_cmp_fn) <= 0) : @@ -632,7 +632,8 @@ idl_new_range_fetch( ret = cursor->c_get(cursor,&cur_key,&data,DB_NEXT_DUP); count++; if (ret) { - if (DBT_EQ(&cur_key, upperkey)) { /* this is the last key */ + if (upperkey && upperkey->data && DBT_EQ(&cur_key, upperkey)) { + /* this is the last key */ break; } DBT_FREE_PAYLOAD(cur_key);
0
b550579365b2dfd2ea0b057dea980111973aff12
389ds/389-ds-base
Issue 6693 - Fix error messages inconsistencies (#6694) Fix missing function names and missing new lines in error message. Sometime the newline was moved back from message arguments to the message to have a better consistency and to avoid that the tool detects a false positive Issue: #6693 Reviewed by: @droideck (Thanks!)
commit b550579365b2dfd2ea0b057dea980111973aff12 Author: progier389 <[email protected]> Date: Thu Mar 27 12:02:19 2025 +0100 Issue 6693 - Fix error messages inconsistencies (#6694) Fix missing function names and missing new lines in error message. Sometime the newline was moved back from message arguments to the message to have a better consistency and to avoid that the tool detects a false positive Issue: #6693 Reviewed by: @droideck (Thanks!) diff --git a/ldap/servers/plugins/replication/repl5_inc_protocol.c b/ldap/servers/plugins/replication/repl5_inc_protocol.c index 7263d0655..66a61f6e0 100644 --- a/ldap/servers/plugins/replication/repl5_inc_protocol.c +++ b/ldap/servers/plugins/replication/repl5_inc_protocol.c @@ -1723,7 +1723,7 @@ send_updates(Private_Repl_Protocol *prp, RUV *remote_update_vector, PRUint32 *nu } else { agmt_inc_last_update_changecount(prp->agmt, csn_get_replicaid(entry.op->csn), 1 /*skipped*/); } - slapi_log_err(finished ? SLAPI_LOG_WARNING : slapi_log_urp, + slapi_log_err(finished ? SLAPI_LOG_WARNING : slapi_log_urp, repl_plugin_name, "send_updates - %s: Failed to send update operation to receiver (uniqueid %s, CSN %s): %s. %s.\n", (char *)agmt_get_long_name(prp->agmt), entry.op->target_address.uniqueid, csn_str, diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_config.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_config.c index 8f847f418..a1d6c6af1 100644 --- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_config.c +++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_config.c @@ -284,7 +284,7 @@ bdb_config_db_lock_pause_set(void *arg, void *value, char *errorbuf, int phase _ if (val == 0) { slapi_log_err(SLAPI_LOG_NOTICE, "bdb_config_db_lock_pause_set", - "%s was set to '0'. The default value will be used (%s)", + "%s was set to '0'. The default value will be used (%s)\n", CONFIG_DB_LOCKS_PAUSE, DEFAULT_DBLOCK_PAUSE_STR); val = DEFAULT_DBLOCK_PAUSE; } @@ -315,7 +315,7 @@ bdb_config_db_lock_threshold_set(void *arg, void *value, char *errorbuf, int pha "%s: \"%d\" is invalid, threshold is indicated as a percentage and it must lie in range of 70 and 95", CONFIG_DB_LOCKS_THRESHOLD, val); slapi_log_err(SLAPI_LOG_ERR, "bdb_config_db_lock_threshold_set", - "%s: \"%d\" is invalid, threshold is indicated as a percentage and it must lie in range of 70 and 95", + "%s: \"%d\" is invalid, threshold is indicated as a percentage and it must lie in range of 70 and 95\n", CONFIG_DB_LOCKS_THRESHOLD, val); retval = LDAP_OPERATIONS_ERROR; return retval; diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c index b9e1e85f1..08543b888 100644 --- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c +++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c @@ -1752,7 +1752,7 @@ bdb_upgradedn_producer(void *param) inst->inst_name, dn_id); } slapi_log_err(SLAPI_LOG_ERR, "bdb_upgradedn_producer", - "%s: Error: failed to write a line \"%s\"", + "%s: Error: failed to write a line \"%s\"\n", inst->inst_name, dn_id); slapi_ch_free_string(&dn_id); goto error; @@ -3550,7 +3550,7 @@ bdb_dse_conf_verify_core(struct ldbminfo *li, char *src_dir, char *file_name, ch slapi_ch_free_string(&estr); if (!e) { slapi_log_err(SLAPI_LOG_WARNING, "bdb_dse_conf_verify_core", - "Skipping bad LDIF entry ending line %d of file \"%s\"", + "Skipping bad LDIF entry ending line %d of file \"%s\"\n", curr_lineno, filename); continue; } diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_misc.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_misc.c index eeafbf995..4cea7b879 100644 --- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_misc.c +++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_misc.c @@ -192,13 +192,13 @@ bdb_start_autotune(struct ldbminfo *li) /* First, set our message. In the case autosize is 0, we calculate some * sane defaults and populate these values, but it's only on first run. */ - msg = "This can be corrected by altering the values of nsslapd-dbcachesize, nsslapd-cachememsize and nsslapd-dncachememsize\n"; + msg = "This can be corrected by altering the values of nsslapd-dbcachesize, nsslapd-cachememsize and nsslapd-dncachememsize"; autosize_percentage = 25; } else { /* In this case we really are setting the values each start up, so * change the msg. */ - msg = "This can be corrected by altering the values of nsslapd-cache-autosize, nsslapd-cache-autosize-split and nsslapd-dncachememsize\n"; + msg = "This can be corrected by altering the values of nsslapd-cache-autosize, nsslapd-cache-autosize-split and nsslapd-dncachememsize"; autosize_percentage = li->li_cache_autosize; } /* Has to be less than 0, 0 means to disable I think */ @@ -240,7 +240,7 @@ bdb_start_autotune(struct ldbminfo *li) issane = util_is_cachesize_sane(mi, &zone_size); if (issane == UTIL_CACHESIZE_REDUCED) { slapi_log_err(SLAPI_LOG_WARNING, "bdb_start_autotune", "Your autosized cache values have been reduced. Likely your nsslapd-cache-autosize percentage is too high.\n"); - slapi_log_err(SLAPI_LOG_WARNING, "bdb_start_autotune", "%s", msg); + slapi_log_err(SLAPI_LOG_WARNING, "bdb_start_autotune", "%s\n", msg); } /* It's valid, lets divide it up and set according to user prefs */ db_size = (autosize_db_percentage_split * zone_size) / 100; @@ -382,7 +382,7 @@ bdb_start_autotune(struct ldbminfo *li) slapi_log_err(SLAPI_LOG_WARNING, "bdb_start_autotune", "In a future release this WILL prevent server start up. You MUST alter your configuration.\n"); slapi_log_err(SLAPI_LOG_WARNING, "bdb_start_autotune", "Total entry cache size: %" PRIu64 " B; dbcache size: %" PRIu64 " B; available memory size: %" PRIu64 " B; \n", total_cache_size, (uint64_t)li->li_dbcachesize, mi->system_available_bytes); - slapi_log_err(SLAPI_LOG_WARNING, "bdb_start_autotune", "%s", msg); + slapi_log_err(SLAPI_LOG_WARNING, "bdb_start_autotune", "%s\n", msg); /* WB 2016 - This should be UNCOMMENTED in a future release */ /* return SLAPI_FAIL_GENERAL; */ } diff --git a/ldap/servers/slapd/back-ldbm/idl.c b/ldap/servers/slapd/back-ldbm/idl.c index f690827b5..5574f840a 100644 --- a/ldap/servers/slapd/back-ldbm/idl.c +++ b/ldap/servers/slapd/back-ldbm/idl.c @@ -1370,7 +1370,7 @@ idl_old_delete_key( if ((idl = idl_fetch_one(be, db, key, txn, &rc)) == NULL) { idl_unlock_list(a->ai_idl, key); if (rc != 0 && rc != DBI_RC_NOTFOUND && rc != DBI_RC_RETRY) { - slapi_log_err(SLAPI_LOG_ERR, "idl_old_delete_key - (%s) 0 BAD %d %s\n", + slapi_log_err(SLAPI_LOG_ERR, "idl_old_delete_key", "(%s) 0 BAD %d %s\n", (char *)key->dptr, rc, (msg = dblayer_strerror(rc)) ? msg : ""); } if (0 == rc || DBI_RC_NOTFOUND == rc) diff --git a/ldap/servers/slapd/back-ldbm/ldbm_add.c b/ldap/servers/slapd/back-ldbm/ldbm_add.c index 6a4174052..db6024636 100644 --- a/ldap/servers/slapd/back-ldbm/ldbm_add.c +++ b/ldap/servers/slapd/back-ldbm/ldbm_add.c @@ -637,7 +637,7 @@ ldbm_back_add(Slapi_PBlock *pb) if ((addingentry->ep_id = next_id(be)) >= MAXID) { slapi_log_err(SLAPI_LOG_ERR, "ldbm_back_add ", "Maximum ID reached, cannot add entry to " - "backend '%s'", + "backend '%s'\n", be->be_name); ldap_result_code = LDAP_OPERATIONS_ERROR; goto error_return; diff --git a/ldap/servers/slapd/back-ldbm/ldbm_usn.c b/ldap/servers/slapd/back-ldbm/ldbm_usn.c index d002e3e4f..fd4e264fe 100644 --- a/ldap/servers/slapd/back-ldbm/ldbm_usn.c +++ b/ldap/servers/slapd/back-ldbm/ldbm_usn.c @@ -123,7 +123,7 @@ usn_get_last_usn(Slapi_Backend *be, PRUint64 *last_usn) rc = dblayer_new_cursor(be, db, NULL, &dbc); if (0 != rc) { slapi_log_err(SLAPI_LOG_ERR, "usn_get_last_usn", - "Failed to create a cursor: %d", rc); + "Failed to create a cursor: %d\n", rc); goto bail; } diff --git a/ldap/servers/slapd/back-ldbm/vlv.c b/ldap/servers/slapd/back-ldbm/vlv.c index bc9c68380..8f9263f25 100644 --- a/ldap/servers/slapd/back-ldbm/vlv.c +++ b/ldap/servers/slapd/back-ldbm/vlv.c @@ -1670,7 +1670,7 @@ vlv_trim_candidates_byvalue(backend *be, const IDList *candidates, const sort_sp slapi_attr_values2keys(&sort_control->sattr, invalue, &typedown_value, LDAP_FILTER_EQUALITY); /* JCM SLOW FUNCTION */ if (compare_fn == NULL) { slapi_log_err(SLAPI_LOG_WARNING, "vlv_trim_candidates_byvalue", - "Attempt to compare an unordered attribute"); + "Attempt to compare an unordered attribute\n"); compare_fn = slapi_berval_cmp; } } diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c index a9922958a..a43fc9285 100644 --- a/ldap/servers/slapd/daemon.c +++ b/ldap/servers/slapd/daemon.c @@ -582,7 +582,7 @@ disk_monitoring_thread(void *nothing __attribute__((unused))) { if (be_list_count == BE_LIST_SIZE) { /* error - too many backends */ slapi_log_err(SLAPI_LOG_ERR, "disk_monitoring_thread", - "Too many backends match search request - cannot proceed"); + "Too many backends match search request - cannot proceed\n"); } else { slapi_log_err(SLAPI_LOG_ALERT, "disk_monitoring_thread", "Putting the backend '%s' to read-only mode\n", be->be_name); diff --git a/ldap/servers/slapd/entry.c b/ldap/servers/slapd/entry.c index 5e9819a38..6040eb419 100644 --- a/ldap/servers/slapd/entry.c +++ b/ldap/servers/slapd/entry.c @@ -887,8 +887,8 @@ str2entry_dupcheck(const char *rawdn, const char *s, int flags, int read_statein if (strcasecmp(type, "dn") == 0) { if (slapi_entry_get_dn_const(e) != NULL) { char ebuf[BUFSIZ]; - slapi_log_err(SLAPI_LOG_TRACE, "str2entry_dupcheck" - "Entry has multiple dns \"%s\" and \"%s\" (second ignored)\n", + slapi_log_err(SLAPI_LOG_TRACE, "str2entry_dupcheck", + "Entry has multiple dns \"%s\" and \"%s\" (second ignored)\n", (char *)slapi_entry_get_dn_const(e), escape_string(valuecharptr, ebuf)); /* the memory below was not allocated by the slapi_ch_ functions */ diff --git a/ldap/servers/slapd/filterentry.c b/ldap/servers/slapd/filterentry.c index f5604161d..c3737eb9e 100644 --- a/ldap/servers/slapd/filterentry.c +++ b/ldap/servers/slapd/filterentry.c @@ -828,7 +828,7 @@ slapi_vattr_filter_test_ext( if (only_check_access != 0) { slapi_log_err(SLAPI_LOG_ERR, "slapi_vattr_filter_test_ext", - "⚠️ DANGER ⚠️ - only_check_access mode is BROKEN!!! YOU MUST CHECK ACCESS WITH FILTER MATCHING"); + "⚠️ DANGER ⚠️ - only_check_access mode is BROKEN!!! YOU MUST CHECK ACCESS WITH FILTER MATCHING\n"); } PR_ASSERT(only_check_access == 0); diff --git a/ldap/servers/slapd/ldaputil.c b/ldap/servers/slapd/ldaputil.c index 3910bdf7f..e59d22893 100644 --- a/ldap/servers/slapd/ldaputil.c +++ b/ldap/servers/slapd/ldaputil.c @@ -1305,7 +1305,7 @@ slapi_add_auth_response_control(Slapi_PBlock *pb, const char *binddn) if (slapi_pblock_set(pb, SLAPI_ADD_RESCONTROL, &arctrl) != 0) { slapi_log_err(SLAPI_LOG_ERR, "slapi_add_auth_response_control", - "Unable to add authentication response control"); + "Unable to add authentication response control\n"); } if (NULL != dnbuf_dynamic) { diff --git a/ldap/servers/slapd/libglobs.c b/ldap/servers/slapd/libglobs.c index b58f231f8..9a01011a6 100644 --- a/ldap/servers/slapd/libglobs.c +++ b/ldap/servers/slapd/libglobs.c @@ -1766,13 +1766,13 @@ FrontendConfig_init(void) /* initialize the read/write configuration lock */ if ((cfg->cfg_rwlock = slapi_new_rwlock()) == NULL) { slapi_log_err(SLAPI_LOG_EMERG, "FrontendConfig_init", - "Failed to initialize cfg_rwlock. Exiting now."); + "Failed to initialize cfg_rwlock. Exiting now.\n"); exit(-1); } #else if ((cfg->cfg_lock = PR_NewLock()) == NULL) { slapi_log_err(SLAPI_LOG_EMERG, "FrontendConfig_init", - "Failed to initialize cfg_lock. Exiting now."); + "Failed to initialize cfg_lock. Exiting now.\n"); exit(-1); } #endif diff --git a/ldap/servers/slapd/log.c b/ldap/servers/slapd/log.c index 6d68d1156..06dae4d0b 100644 --- a/ldap/servers/slapd/log.c +++ b/ldap/servers/slapd/log.c @@ -1212,7 +1212,7 @@ log_set_numlogsperdir(const char *attrname, char *numlogs_str, int logtype, char default: rv = LDAP_OPERATIONS_ERROR; slapi_log_err(SLAPI_LOG_ERR, "log_set_numlogsperdir", - "Invalid log type %d", logtype); + "Invalid log type %d\n", logtype); } } return rv; diff --git a/ldap/servers/slapd/modrdn.c b/ldap/servers/slapd/modrdn.c index e9f2047a6..81eb6ddd4 100644 --- a/ldap/servers/slapd/modrdn.c +++ b/ldap/servers/slapd/modrdn.c @@ -567,13 +567,13 @@ op_shared_rename(Slapi_PBlock *pb, int passin_args) "Syntax check of newSuperior failed\n"); if (!internal_op) { slapi_log_err(SLAPI_LOG_ARGS, "op_shared_rename", - "conn=%" PRIu64 " op=%d MODRDN invalid new superior (\"%s\")", + "conn=%" PRIu64 " op=%d MODRDN invalid new superior (\"%s\")\n", pb_conn->c_connid, operation->o_opid, newsuperior ? newsuperior : "(null)"); } else { slapi_log_err(SLAPI_LOG_ARGS, "op_shared_rename", - "conn=%s op=%d MODRDN invalid new superior (\"%s\")", + "conn=%s op=%d MODRDN invalid new superior (\"%s\")\n", LOG_INTERNAL_OP_CON_ID, LOG_INTERNAL_OP_OP_ID, newsuperior ? newsuperior : "(null)"); diff --git a/ldap/servers/slapd/passwd_extop.c b/ldap/servers/slapd/passwd_extop.c index 8fad2ee96..f758ac018 100644 --- a/ldap/servers/slapd/passwd_extop.c +++ b/ldap/servers/slapd/passwd_extop.c @@ -470,10 +470,10 @@ passwd_modify_extop(Slapi_PBlock *pb) * match this very plugin's OID: EXTOP_PASSWD_OID. */ slapi_pblock_get(pb, SLAPI_EXT_OP_REQ_OID, &oid); if (oid == NULL) { - errMesg = "Could not get OID value from request.\n"; + errMesg = "Could not get OID value from request."; rc = LDAP_OPERATIONS_ERROR; slapi_log_err(SLAPI_LOG_PLUGIN, "passwd_modify_extop", - "%s", errMesg); + "%s\n", errMesg); goto free_and_return; } else { slapi_log_err(SLAPI_LOG_PLUGIN, "passwd_modify_extop", @@ -481,7 +481,7 @@ passwd_modify_extop(Slapi_PBlock *pb) } if (strcasecmp(oid, EXTOP_PASSWD_OID) != 0) { - errMesg = "Request OID does not match Passwd OID.\n"; + errMesg = "Request OID does not match Passwd OID."; rc = LDAP_OPERATIONS_ERROR; goto free_and_return; } else { @@ -500,24 +500,24 @@ passwd_modify_extop(Slapi_PBlock *pb) goto free_and_return; } if (slapi_pblock_get(pb, SLAPI_CONN_SASL_SSF, &sasl_ssf) != 0) { - errMesg = "Could not get SASL SSF from connection\n"; + errMesg = "Could not get SASL SSF from connection"; rc = LDAP_OPERATIONS_ERROR; slapi_log_err(SLAPI_LOG_PLUGIN, "passwd_modify_extop", - "%s", errMesg); + "%s\n", errMesg); goto free_and_return; } if (slapi_pblock_get(pb, SLAPI_CONN_LOCAL_SSF, &local_ssf) != 0) { - errMesg = "Could not get local SSF from connection\n"; + errMesg = "Could not get local SSF from connection"; rc = LDAP_OPERATIONS_ERROR; slapi_log_err(SLAPI_LOG_PLUGIN, "passwd_modify_extop", - "%s", errMesg); + "%s\n", errMesg); goto free_and_return; } if (((conn->c_flags & CONN_FLAG_SSL) != CONN_FLAG_SSL) && (sasl_ssf <= 1) && (local_ssf <= 1)) { - errMesg = "Operation requires a secure connection.\n"; + errMesg = "Operation requires a secure connection."; rc = LDAP_CONFIDENTIALITY_REQUIRED; goto free_and_return; } @@ -536,7 +536,7 @@ passwd_modify_extop(Slapi_PBlock *pb) } if ((ber = ber_init(extop_value)) == NULL) { - errMesg = "PasswdModify Request decode failed.\n"; + errMesg = "PasswdModify Request decode failed."; rc = LDAP_PROTOCOL_ERROR; goto free_and_return; } @@ -571,7 +571,7 @@ passwd_modify_extop(Slapi_PBlock *pb) if (ber_scanf(ber, "a", &rawdn) == LBER_ERROR) { slapi_ch_free_string(&rawdn); slapi_log_err(SLAPI_LOG_ERR, "passwd_modify_extop", "ber_scanf failed :{\n"); - errMesg = "ber_scanf failed at userID parse.\n"; + errMesg = "ber_scanf failed at userID parse."; rc = LDAP_PROTOCOL_ERROR; goto free_and_return; } @@ -583,7 +583,7 @@ passwd_modify_extop(Slapi_PBlock *pb) if (rc) { /* syntax check failed */ op_shared_log_error_access(pb, "EXT", rawdn ? rawdn : "", "strict: invalid target dn"); - errMesg = "invalid target dn.\n"; + errMesg = "invalid target dn."; slapi_ch_free_string(&rawdn); rc = LDAP_INVALID_SYNTAX; goto free_and_return; @@ -597,7 +597,7 @@ passwd_modify_extop(Slapi_PBlock *pb) if (ber_scanf(ber, "a", &oldPasswd) == LBER_ERROR) { slapi_ch_free_string(&oldPasswd); slapi_log_err(SLAPI_LOG_ERR, "passwd_modify_extop", "ber_scanf failed :{\n"); - errMesg = "ber_scanf failed at oldPasswd parse.\n"; + errMesg = "ber_scanf failed at oldPasswd parse."; rc = LDAP_PROTOCOL_ERROR; goto free_and_return; } @@ -609,7 +609,7 @@ passwd_modify_extop(Slapi_PBlock *pb) if (ber_scanf(ber, "a", &newPasswd) == LBER_ERROR) { slapi_ch_free_string(&newPasswd); slapi_log_err(SLAPI_LOG_ERR, "passwd_modify_extop", "ber_scanf failed :{\n"); - errMesg = "ber_scanf failed at newPasswd parse.\n"; + errMesg = "ber_scanf failed at newPasswd parse."; rc = LDAP_PROTOCOL_ERROR; goto free_and_return; } @@ -626,7 +626,7 @@ parse_req_done: /* If the connection is bound anonymously, we must refuse to process this operation. */ if (bindDN == NULL || *bindDN == '\0') { /* Refuse the operation because they're bound anonymously */ - errMesg = "Anonymous Binds are not allowed.\n"; + errMesg = "Anonymous Binds are not allowed."; rc = LDAP_INSUFFICIENT_ACCESS; goto free_and_return; } @@ -640,7 +640,7 @@ parse_req_done: dn = slapi_sdn_get_ndn(target_sdn); if (dn == NULL || *dn == '\0') { /* Refuse the operation because they're bound anonymously */ - errMesg = "Invalid dn.\n"; + errMesg = "Invalid dn."; rc = LDAP_INVALID_DN_SYNTAX; goto free_and_return; } @@ -657,7 +657,7 @@ parse_req_done: * the bind operation (or used sasl or client cert auth or OS creds) */ slapi_pblock_get(pb, SLAPI_CONN_AUTHMETHOD, &authmethod); if (!authmethod || !strcmp(authmethod, SLAPD_AUTH_NONE)) { - errMesg = "User must be authenticated to the directory server.\n"; + errMesg = "User must be authenticated to the directory server."; rc = LDAP_INSUFFICIENT_ACCESS; goto free_and_return; } @@ -680,14 +680,14 @@ parse_req_done: if (rval != LDAP_SUCCESS) { if (!errMesg) - errMesg = "Error generating new password.\n"; + errMesg = "Error generating new password."; rc = LDAP_OPERATIONS_ERROR; goto free_and_return; } /* Make sure a passwd was actually generated */ if (newPasswd == NULL || *newPasswd == '\0') { - errMesg = "Error generating new password.\n"; + errMesg = "Error generating new password."; rc = LDAP_OPERATIONS_ERROR; goto free_and_return; } @@ -723,7 +723,7 @@ parse_req_done: /* If we can't find the entry, then that's an error */ if (ret) { /* Couldn't find the entry, fail */ - errMesg = "No such Entry exists.\n"; + errMesg = "No such Entry exists."; rc = LDAP_NO_SUCH_OBJECT; goto free_and_return; } @@ -767,7 +767,7 @@ parse_req_done: if (need_pwpolicy_ctrl) { slapi_pwpolicy_make_response_control(pb, -1, -1, LDAP_PWPOLICY_PWDMODNOTALLOWED); } - errMesg = "Insufficient access rights\n"; + errMesg = "Insufficient access rights"; rc = LDAP_INSUFFICIENT_ACCESS; goto free_and_return; } @@ -780,7 +780,7 @@ parse_req_done: ret = passwd_check_pwd(targetEntry, oldPasswd); if (ret) { /* No, then we fail this operation */ - errMesg = "Invalid oldPasswd value.\n"; + errMesg = "Invalid oldPasswd value."; rc = ret; goto free_and_return; } @@ -801,7 +801,7 @@ parse_req_done: if (need_pwpolicy_ctrl) { slapi_pwpolicy_make_response_control(pb, -1, -1, LDAP_PWPOLICY_PWDMODNOTALLOWED); } - errMesg = "User is not allowed to change password\n"; + errMesg = "User is not allowed to change password"; rc = LDAP_UNWILLING_TO_PERFORM; goto free_and_return; } @@ -823,7 +823,7 @@ parse_req_done: if (ret != LDAP_SUCCESS) { /* Failed to modify the password, e.g. because password policy, etc. */ - errMesg = "Failed to update password\n"; + errMesg = "Failed to update password"; rc = ret; goto free_and_return; } @@ -838,7 +838,7 @@ parse_req_done: /* Free anything that we allocated above */ free_and_return: slapi_log_err(SLAPI_LOG_PLUGIN, "passwd_modify_extop", - "%s", errMesg ? errMesg : "success"); + "%s\n", errMesg ? errMesg : "success"); if ((rc == LDAP_REFERRAL) && (referrals)) { send_referrals_from_entry(pb, referrals); diff --git a/ldap/servers/slapd/pw.c b/ldap/servers/slapd/pw.c index 94943325f..055ec0d74 100644 --- a/ldap/servers/slapd/pw.c +++ b/ldap/servers/slapd/pw.c @@ -243,8 +243,8 @@ slapi_encode_ext(Slapi_PBlock *pb, const Slapi_DN *sdn, char *value, char *alg) slapi_ch_free((void **)&scheme_list); } else { slapi_log_err(SLAPI_LOG_ERR, "slapi_encode_ext", - "Invalid scheme - %s\n" - "no pwdstorage scheme plugin loaded", + "Invalid scheme: %s ==> " + "no pwdstorage scheme plugin loaded\n", alg); } return NULL; diff --git a/ldap/servers/slapd/schema.c b/ldap/servers/slapd/schema.c index 401a3dce8..a8e6b1210 100644 --- a/ldap/servers/slapd/schema.c +++ b/ldap/servers/slapd/schema.c @@ -6563,7 +6563,7 @@ supplier_get_new_definitions(struct berval **objectclasses, struct berval **attr * it and look for objectclasses */ slapi_log_err(SLAPI_LOG_ERR, "supplier_get_new_definitions", - "Not able to build an attributes list from the consumer schema"); + "Not able to build an attributes list from the consumer schema\n"); } schema_dse_unlock(); *new_oc = oc2learn_list; diff --git a/ldap/servers/slapd/util.c b/ldap/servers/slapd/util.c index 565f5ff10..85c8e8d74 100644 --- a/ldap/servers/slapd/util.c +++ b/ldap/servers/slapd/util.c @@ -1544,7 +1544,7 @@ util_is_cachesize_sane(slapi_pal_meminfo *mi, uint64_t *cachesize) */ uint64_t adjust_cachesize = (mi->system_available_bytes * 0.5); if (adjust_cachesize > *cachesize) { - slapi_log_err(SLAPI_LOG_CRIT, "util_is_cachesize_sane", "Invalid adjusted cachesize is greater than request %" PRIu64, adjust_cachesize); + slapi_log_err(SLAPI_LOG_CRIT, "util_is_cachesize_sane", "Invalid adjusted cachesize is greater than request %" PRIu64 "\n", adjust_cachesize); return UTIL_CACHESIZE_ERROR; } if (adjust_cachesize < (16 * mi->pagesize_bytes)) {
0
7a8b5ace5e4a2abdffdfaf737bab80ede4e18eaf
389ds/389-ds-base
Ticket 49675 - Fix coverity issues Description: Fixed these coverity issues. Some of these fixes are just to quiet convscan: 16852 Unsigned compared - entrycache_add_int 16848 Unsigned compared - dncache_add_int 16704 Explicit null dereferenced s- lapd_SSL_client_auth 15953 Resource leak - new_task 15583 Out-of-bounds read - create_filter 15445 Unused value - ruv_update_ruv 15442 Argument cannot be negative - dse_write_file_nolock 15223 Double unlock - ruv_get_referrals 15170 Explicit null dereferenced - passwd_apply_mods 15581 Wrong sizeof argument - slapi_be_new 15144 Constant expression result - upgradedn_producer Also fixed a few compiler warnings https://pagure.io/389-ds-base/issue/49675 Reviewed by: spichugi & lkrispenz(Thanks!!)
commit 7a8b5ace5e4a2abdffdfaf737bab80ede4e18eaf Author: Mark Reynolds <[email protected]> Date: Sat May 12 15:44:43 2018 -0400 Ticket 49675 - Fix coverity issues Description: Fixed these coverity issues. Some of these fixes are just to quiet convscan: 16852 Unsigned compared - entrycache_add_int 16848 Unsigned compared - dncache_add_int 16704 Explicit null dereferenced s- lapd_SSL_client_auth 15953 Resource leak - new_task 15583 Out-of-bounds read - create_filter 15445 Unused value - ruv_update_ruv 15442 Argument cannot be negative - dse_write_file_nolock 15223 Double unlock - ruv_get_referrals 15170 Explicit null dereferenced - passwd_apply_mods 15581 Wrong sizeof argument - slapi_be_new 15144 Constant expression result - upgradedn_producer Also fixed a few compiler warnings https://pagure.io/389-ds-base/issue/49675 Reviewed by: spichugi & lkrispenz(Thanks!!) diff --git a/ldap/servers/plugins/replication/repl5_ruv.c b/ldap/servers/plugins/replication/repl5_ruv.c index 7bfdc3425..ddb88b998 100644 --- a/ldap/servers/plugins/replication/repl5_ruv.c +++ b/ldap/servers/plugins/replication/repl5_ruv.c @@ -73,7 +73,7 @@ static RUVElement *ruvAddIndexReplicaNoCSN(RUV *ruv, ReplicaId rid, const char * static int ruvReplicaCompare(const void *el1, const void *el2); static RUVElement *get_ruvelement_from_berval(const struct berval *bval); static char *get_replgen_from_berval(const struct berval *bval); - +static PRInt32 ruv_replica_count_nolock(const RUV *ruv, int lock); static const char *const prefix_replicageneration = "{replicageneration}"; static const char *const prefix_ruvcsn = "{replica "; /* intentionally missing '}' */ @@ -1366,22 +1366,30 @@ ruv_compare_ruv(const RUV *ruv1, const char *ruv1name, const RUV *ruv2, const ch return rc; } -PRInt32 -ruv_replica_count(const RUV *ruv) +static PRInt32 +ruv_replica_count_nolock(const RUV *ruv, int lock) { if (ruv == NULL) return 0; else { int count; - slapi_rwlock_rdlock(ruv->lock); + if (lock) + slapi_rwlock_rdlock(ruv->lock); count = dl_get_count(ruv->elements); - slapi_rwlock_unlock(ruv->lock); + if (lock) + slapi_rwlock_unlock(ruv->lock); return count; } } +PRInt32 +ruv_replica_count(const RUV *ruv) +{ + return ruv_replica_count_nolock(ruv, 1); +} + /* * Extract all the referral URL's from the RUV (but self URL), * returning them in an array of strings, that @@ -1406,7 +1414,7 @@ ruv_get_referrals(const RUV *ruv) slapi_rwlock_rdlock(ruv->lock); - n = ruv_replica_count(ruv); + n = ruv_replica_count_nolock(ruv, 0); if (n > 0) { RUVElement *replica; int cookie; @@ -1664,7 +1672,11 @@ ruv_update_ruv(RUV *ruv, const CSN *csn, const char *replica_purl, void *replica slapi_rwlock_wrlock(ruv->lock); if (local_rid != prim_rid) { repl_ruv = ruvGetReplica(ruv, prim_rid); - rc = ruv_update_ruv_element(ruv, repl_ruv, prim_csn, replica_purl, PR_FALSE); + if ((rc = ruv_update_ruv_element(ruv, repl_ruv, prim_csn, replica_purl, PR_FALSE))) { + slapi_log_err(SLAPI_LOG_REPL, repl_plugin_name, + "ruv_update_ruv - failed to update primary ruv, error (%d)", rc); + return rc; + } } repl_ruv = ruvGetReplica(ruv, local_rid); rc = ruv_update_ruv_element(ruv, repl_ruv, prim_csn, replica_purl, PR_TRUE); diff --git a/ldap/servers/plugins/uiduniq/uid.c b/ldap/servers/plugins/uiduniq/uid.c index 7f60955aa..3a75f1f29 100644 --- a/ldap/servers/plugins/uiduniq/uid.c +++ b/ldap/servers/plugins/uiduniq/uid.c @@ -500,7 +500,7 @@ create_filter(const char **attributes, const struct berval *value, const char *r } /* Allocate the buffer */ - filter = slapi_ch_malloc(filterLen); + filter = (char *)slapi_ch_calloc(1, filterLen + 1); fp = filter; max = &filter[filterLen]; diff --git a/ldap/servers/slapd/back-ldbm/back-ldbm.h b/ldap/servers/slapd/back-ldbm/back-ldbm.h index ffc803e89..79115fe12 100644 --- a/ldap/servers/slapd/back-ldbm/back-ldbm.h +++ b/ldap/servers/slapd/back-ldbm/back-ldbm.h @@ -359,10 +359,10 @@ struct backdn /* for the in-core cache of entries */ struct cache { - uint64_t c_maxsize; /* max size in bytes */ + uint64_t c_maxsize; /* max size in bytes */ Slapi_Counter *c_cursize; /* size in bytes */ - uint64_t c_maxentries; /* max entries allowed (-1: no limit) */ - uint64_t c_curentries; /* current # entries in cache */ + int64_t c_maxentries; /* max entries allowed (-1: no limit) */ + uint64_t c_curentries; /* current # entries in cache */ Hashtable *c_dntable; Hashtable *c_idtable; #ifdef UUIDCACHE_ON diff --git a/ldap/servers/slapd/back-ldbm/cache.c b/ldap/servers/slapd/back-ldbm/cache.c index 77757c2ac..827cd9111 100644 --- a/ldap/servers/slapd/back-ldbm/cache.c +++ b/ldap/servers/slapd/back-ldbm/cache.c @@ -494,7 +494,7 @@ cache_make_hashes(struct cache *cache, int type) /* initialize the cache */ int -cache_init(struct cache *cache, uint64_t maxsize, uint64_t maxentries, int type) +cache_init(struct cache *cache, uint64_t maxsize, int64_t maxentries, int type) { slapi_log_err(SLAPI_LOG_TRACE, "cache_init", "-->\n"); cache->c_maxsize = maxsize; @@ -703,7 +703,7 @@ entrycache_set_max_size(struct cache *cache, uint64_t bytes) } void -cache_set_max_entries(struct cache *cache, long entries) +cache_set_max_entries(struct cache *cache, int64_t entries) { struct backentry *eflush = NULL; struct backentry *eflushtemp = NULL; @@ -742,10 +742,10 @@ cache_get_max_size(struct cache *cache) return n; } -long +int64_t cache_get_max_entries(struct cache *cache) { - long n; + int64_t n; cache_lock(cache); n = cache->c_maxentries; @@ -773,7 +773,7 @@ cache_entry_size(struct backentry *e) * these u_long *'s to a struct */ void -cache_get_stats(struct cache *cache, PRUint64 *hits, PRUint64 *tries, uint64_t *nentries, uint64_t *maxentries, uint64_t *size, uint64_t *maxsize) +cache_get_stats(struct cache *cache, PRUint64 *hits, PRUint64 *tries, uint64_t *nentries, int64_t *maxentries, uint64_t *size, uint64_t *maxsize) { cache_lock(cache); if (hits) @@ -1431,7 +1431,7 @@ entrycache_add_int(struct cache *cache, struct backentry *e, int state, struct b /* don't add to lru since refcnt = 1 */ LOG("added entry of size %lu -> total now %lu out of max %lu\n", e->ep_size, slapi_counter_get_value(cache->c_cursize), cache->c_maxsize); - if (cache->c_maxentries >= 0) { + if (cache->c_maxentries > 0) { LOG(" total entries %ld out of %ld\n", cache->c_curentries, cache->c_maxentries); } @@ -1838,7 +1838,7 @@ dncache_add_int(struct cache *cache, struct backdn *bdn, int state, struct backd LOG("added entry of size %lu -> total now %lu out of max %lu\n", bdn->ep_size, slapi_counter_get_value(cache->c_cursize), cache->c_maxsize); - if (cache->c_maxentries >= 0) { + if (cache->c_maxentries > 0) { LOG(" total entries %ld out of %ld\n", cache->c_curentries, cache->c_maxentries); } diff --git a/ldap/servers/slapd/back-ldbm/import-threads.c b/ldap/servers/slapd/back-ldbm/import-threads.c index 1be15a42a..7c2921c37 100644 --- a/ldap/servers/slapd/back-ldbm/import-threads.c +++ b/ldap/servers/slapd/back-ldbm/import-threads.c @@ -1862,7 +1862,7 @@ upgradedn_producer(void *param) if (PL_strchr(rdnp, '\\')) { do_dn_norm = 1; } else { - while ((++rdnp <= endrdn) && (*rdnp == ' ') && (*rdnp == '\t')) + while ((++rdnp <= endrdn) && (*rdnp == ' ' || *rdnp == '\t')) ; /* DN contains an RDN <type>="<value>" ? */ if ((rdnp != endrdn) && ('"' == *rdnp) && ('"' == *endrdn)) { diff --git a/ldap/servers/slapd/back-ldbm/monitor.c b/ldap/servers/slapd/back-ldbm/monitor.c index 91abeea90..126715b61 100644 --- a/ldap/servers/slapd/back-ldbm/monitor.c +++ b/ldap/servers/slapd/back-ldbm/monitor.c @@ -48,7 +48,8 @@ ldbm_back_monitor_instance_search(Slapi_PBlock *pb __attribute__((unused)), struct berval *vals[2]; char buf[BUFSIZ]; uint64_t hits, tries; - uint64_t nentries, maxentries; + uint64_t nentries; + int64_t maxentries; uint64_t size, maxsize; /* NPCTE fix for bugid 544365, esc 0. <P.R> <04-Jul-2001> */ struct stat astat; @@ -100,7 +101,7 @@ ldbm_back_monitor_instance_search(Slapi_PBlock *pb __attribute__((unused)), MSET("maxEntryCacheSize"); sprintf(buf, "%" PRIu64, nentries); MSET("currentEntryCacheCount"); - sprintf(buf, "%" PRIu64, maxentries); + sprintf(buf, "%" PRId64, maxentries); MSET("maxEntryCacheCount"); if (entryrdn_get_switch()) { @@ -119,7 +120,7 @@ ldbm_back_monitor_instance_search(Slapi_PBlock *pb __attribute__((unused)), MSET("maxDnCacheSize"); sprintf(buf, "%" PRIu64, nentries); MSET("currentDnCacheCount"); - sprintf(buf, "%" PRIu64, maxentries); + sprintf(buf, "%" PRId64, maxentries); MSET("maxDnCacheCount"); } diff --git a/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h b/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h index 2898a3529..793150bee 100644 --- a/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h +++ b/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h @@ -32,14 +32,14 @@ void attr_create_empty(backend *be, char *type, struct attrinfo **ai); /* * cache.c */ -int cache_init(struct cache *cache, uint64_t maxsize, uint64_t maxentries, int type); +int cache_init(struct cache *cache, uint64_t maxsize, int64_t maxentries, int type); void cache_clear(struct cache *cache, int type); void cache_destroy_please(struct cache *cache, int type); void cache_set_max_size(struct cache *cache, uint64_t bytes, int type); void cache_set_max_entries(struct cache *cache, long entries); size_t cache_get_max_size(struct cache *cache); long cache_get_max_entries(struct cache *cache); -void cache_get_stats(struct cache *cache, uint64_t *hits, uint64_t *tries, uint64_t *entries, uint64_t *maxentries, uint64_t *size, uint64_t *maxsize); +void cache_get_stats(struct cache *cache, uint64_t *hits, uint64_t *tries, uint64_t *entries, int64_t *maxentries, uint64_t *size, uint64_t *maxsize); void cache_debug_hash(struct cache *cache, char **out); int cache_remove(struct cache *cache, void *e); void cache_return(struct cache *cache, void **bep); diff --git a/ldap/servers/slapd/backend_manager.c b/ldap/servers/slapd/backend_manager.c index 8ea433cc1..401ab5b21 100644 --- a/ldap/servers/slapd/backend_manager.c +++ b/ldap/servers/slapd/backend_manager.c @@ -36,9 +36,13 @@ slapi_be_new(const char *type, const char *name, int isprivate, int logchanges) int oldsize = maxbackends; maxbackends += BACKEND_GRAB_SIZE; backends = (Slapi_Backend **)slapi_ch_realloc((char *)backends, maxbackends * sizeof(Slapi_Backend *)); - memset(&backends[oldsize], '\0', BACKEND_GRAB_SIZE * sizeof(Slapi_Backend *)); + for (i = oldsize; i < maxbackends; i++){ + /* init the new be pointers so we can track empty slots */ + backends[i] = NULL; + } } + /* Find the first open slot */ for (i = 0; ((i < maxbackends) && (backends[i])); i++) ; diff --git a/ldap/servers/slapd/dse.c b/ldap/servers/slapd/dse.c index f93398ec8..e61e9d9ec 100644 --- a/ldap/servers/slapd/dse.c +++ b/ldap/servers/slapd/dse.c @@ -1005,8 +1005,10 @@ dse_write_file_nolock(struct dse *pdse) * we need to open and fsync the dir to make the rename stick. */ int fp_configdir = open(pdse->dse_configdir, O_PATH | O_DIRECTORY); - fsync(fp_configdir); - close(fp_configdir); + if (fp_configdir != -1) { + fsync(fp_configdir); + close(fp_configdir); + } } } if (fpw.fpw_prfd) diff --git a/ldap/servers/slapd/passwd_extop.c b/ldap/servers/slapd/passwd_extop.c index 5f21f2f71..df9fea7b2 100644 --- a/ldap/servers/slapd/passwd_extop.c +++ b/ldap/servers/slapd/passwd_extop.c @@ -146,29 +146,34 @@ passwd_apply_mods(Slapi_PBlock *pb_orig, const Slapi_DN *sdn, Slapi_Mods *mods, * that it was done by the root DN. */ Connection *pb_conn = NULL; slapi_pblock_get(pb_orig, SLAPI_CONNECTION, &pb_conn); - slapi_pblock_set(pb, SLAPI_CONNECTION, pb_conn); + if (pb_conn){ + slapi_pblock_set(pb, SLAPI_CONNECTION, pb_conn); + ret = slapi_modify_internal_pb(pb); - ret = slapi_modify_internal_pb(pb); + /* We now clean up the connection that we copied into the + * new pblock. We want to leave it untouched. */ + slapi_pblock_set(pb, SLAPI_CONNECTION, NULL); - /* We now clean up the connection that we copied into the - * new pblock. We want to leave it untouched. */ - slapi_pblock_set(pb, SLAPI_CONNECTION, NULL); + slapi_pblock_get(pb, SLAPI_PLUGIN_INTOP_RESULT, &ret); - slapi_pblock_get(pb, SLAPI_PLUGIN_INTOP_RESULT, &ret); - - /* Retrieve and duplicate the response controls since they will be - * destroyed along with the pblock used for the internal operation. */ - slapi_pblock_get(pb, SLAPI_RESCONTROLS, &pb_resp_controls); - if (pb_resp_controls) { - slapi_add_controls(resp_controls, pb_resp_controls, 1); - } + /* Retrieve and duplicate the response controls since they will be + * destroyed along with the pblock used for the internal operation. */ + slapi_pblock_get(pb, SLAPI_RESCONTROLS, &pb_resp_controls); + if (pb_resp_controls) { + slapi_add_controls(resp_controls, pb_resp_controls, 1); + } - if (ret != LDAP_SUCCESS) { - slapi_log_err(SLAPI_LOG_TRACE, "passwd_apply_mods", - "WARNING: passwordPolicy modify error %d on entry '%s'\n", - ret, slapi_sdn_get_dn(sdn)); + if (ret != LDAP_SUCCESS) { + slapi_log_err(SLAPI_LOG_TRACE, "passwd_apply_mods", + "WARNING: passwordPolicy modify error %d on entry '%s'\n", + ret, slapi_sdn_get_dn(sdn)); + } + } else { + ret = -1; + slapi_log_err(SLAPI_LOG_ERR, "passwd_apply_mods", + "(%s) Original connection is NULL\n", + slapi_sdn_get_dn(sdn)); } - slapi_pblock_destroy(pb); } diff --git a/ldap/servers/slapd/ssl.c b/ldap/servers/slapd/ssl.c index 36b09fd16..288bae9a2 100644 --- a/ldap/servers/slapd/ssl.c +++ b/ldap/servers/slapd/ssl.c @@ -2448,7 +2448,7 @@ slapd_SSL_client_auth(LDAP *ld) /* Free config data */ - if (!svrcore_setup()) { + if (!svrcore_setup() && token != NULL) { #ifdef WITH_SYSTEMD slapd_SSL_warn("Sending pin request to SVRCore. You may need to run " "systemd-tty-ask-password-agent to provide the password."); diff --git a/ldap/servers/slapd/task.c b/ldap/servers/slapd/task.c index 23a9f9e1e..c839c1ff6 100644 --- a/ldap/servers/slapd/task.c +++ b/ldap/servers/slapd/task.c @@ -600,6 +600,7 @@ new_task(const char *rawdn, void *plugin) if (task->task_log_lock == NULL) { /* Failed to allocate! Uh Oh! */ slapi_ch_free((void **)&task); + slapi_ch_free_string(&dn); slapi_log_err(SLAPI_LOG_ERR, "new_task", "Unable to allocate task lock for: %s\n", rawdn); return NULL; } diff --git a/src/libsds/test/benchmark_par.c b/src/libsds/test/benchmark_par.c index 587f7c53a..37ef7636a 100644 --- a/src/libsds/test/benchmark_par.c +++ b/src/libsds/test/benchmark_par.c @@ -115,7 +115,7 @@ batch_random(struct thread_info *info) size_t step = 0; size_t current_step = 0; size_t max_factors = info->iter / 2048; - size_t baseid; + size_t baseid = 0; void *output; /* Give ourselves a unique base id */ for (size_t i = 0; i < info->tid; i++) { @@ -188,7 +188,7 @@ batch_insert(struct thread_info *info) size_t cf = 0; size_t step = 0; size_t max_factors = info->iter / 2048; - size_t baseid; + size_t baseid = 0; /* Give ourselves a unique base id */ for (size_t i = 0; i < info->tid; i++) { baseid += 50000; @@ -215,7 +215,7 @@ batch_delete(struct thread_info *info) size_t cf = 0; size_t step = 0; size_t max_factors = info->iter / 2048; - size_t baseid; + size_t baseid = 0; /* Give ourselves a unique base id */ for (size_t i = 0; i < info->tid; i++) { baseid += 50000; diff --git a/src/svrcore/src/user.c b/src/svrcore/src/user.c index 3da668dcb..f3fd96371 100644 --- a/src/svrcore/src/user.c +++ b/src/svrcore/src/user.c @@ -29,8 +29,6 @@ static const char retryWarning[] = "Warning: Incorrect PIN may result in disabling the token"; static const char prompt[] = "Enter PIN for"; -static const char nt_retryWarning[] = -"Warning: You entered an incorrect PIN. Incorrect PIN may result in disabling the token"; struct SVRCOREUserPinObj { @@ -122,14 +120,6 @@ static char *getPin(SVRCOREPinObj *obj, const char *tokenName, PRBool retry) /* If the program is not interactive then return no result */ if (!tty->interactive) return 0; -#ifdef _WIN32 - if (retry) { - MessageBox(GetDesktopWindow(), nt_retryWarning, - "Netscape Server", MB_ICONEXCLAMATION | MB_OK); - } - return NT_PromptForPin(tokenName); -#else - if (retry) fprintf(stdout, "%s\n", retryWarning); @@ -168,9 +158,6 @@ static char *getPin(SVRCOREPinObj *obj, const char *tokenName, PRBool retry) if (line[0] == 0) return 0; return strdup(line); - -#endif /* _WIN32 */ - } /*
0
c57528b1ce88c0174674b8cdc8fc1293bc1fe49c
389ds/389-ds-base
Ticket 50169 - lib389 changed hardcoded systemctl path Description: Currently the server is using "/usr/bin/systemctl", but this fails on Debian. There is no need for a path anyway so jsut strip it. https://pagure.io/389-ds-base/issue/50169 Reviewed by: mhonek(Thanks!)
commit c57528b1ce88c0174674b8cdc8fc1293bc1fe49c Author: Mark Reynolds <[email protected]> Date: Wed Jan 16 12:27:55 2019 -0500 Ticket 50169 - lib389 changed hardcoded systemctl path Description: Currently the server is using "/usr/bin/systemctl", but this fails on Debian. There is no need for a path anyway so jsut strip it. https://pagure.io/389-ds-base/issue/50169 Reviewed by: mhonek(Thanks!) diff --git a/src/lib389/lib389/__init__.py b/src/lib389/lib389/__init__.py index bd5e86a1a..974e2189a 100644 --- a/src/lib389/lib389/__init__.py +++ b/src/lib389/lib389/__init__.py @@ -1206,9 +1206,9 @@ class DirSrv(SimpleLDAPObject, object): if self.with_systemd() and not self.containerised: # Do systemd things here ... - subprocess.check_call(["/usr/bin/systemctl", - "start", - "dirsrv@%s" % self.serverid]) + subprocess.check_call(["systemctl", + "start", + "dirsrv@%s" % self.serverid]) else: # Start the process. # Wait for it to terminate @@ -1269,9 +1269,9 @@ class DirSrv(SimpleLDAPObject, object): if self.with_systemd() and not self.containerised: # Do systemd things here ... - subprocess.check_call(["/usr/bin/systemctl", - "stop", - "dirsrv@%s" % self.serverid]) + subprocess.check_call(["systemctl", + "stop", + "dirsrv@%s" % self.serverid]) else: # TODO: Make the pid path in the files things # TODO: use the status call instead!!!! @@ -1296,9 +1296,9 @@ class DirSrv(SimpleLDAPObject, object): """ if self.with_systemd() and not self.containerised: # Do systemd things here ... - rc = subprocess.call(["/usr/bin/systemctl", - "is-active", "--quiet", - "dirsrv@%s" % self.serverid]) + rc = subprocess.call(["systemctl", + "is-active", "--quiet", + "dirsrv@%s" % self.serverid]) if rc == 0: return True # This .... probably will mess something up diff --git a/src/lib389/lib389/instance/setup.py b/src/lib389/lib389/instance/setup.py index d8c513ecd..1e3e891ed 100644 --- a/src/lib389/lib389/instance/setup.py +++ b/src/lib389/lib389/instance/setup.py @@ -714,9 +714,9 @@ class SetupDs(object): # If we are on the correct platform settings, systemd if general['systemd'] and not self.containerised: # Should create the symlink we need, but without starting it. - subprocess.check_call(["/usr/bin/systemctl", - "enable", - "dirsrv@%s" % slapd['instance_name']]) + subprocess.check_call(["systemctl", + "enable", + "dirsrv@%s" % slapd['instance_name']]) # Setup tmpfiles_d tmpfile_d = ds_paths.tmpfiles_d + "/dirsrv-" + slapd['instance_name'] + ".conf"
0
b5bfa2a0386e168ce2196a077169382ae53a94b4
389ds/389-ds-base
Ticket #48492 - heap corruption at schema replication. Description: 389-ds-base-1.3.2 and newer uses openldap schema parser, which is more strict with the definition. For instance, the older 389-ds-base could have a schema such as SINTAX OID in single quotes, which is not acceptable on the newer version. There was a bug to handle the error case that caused a crash. This patch adds 1) the null reference check to attr_syntax_free (attrsyntax.c), 2) a null init to the output arg in parse_at_str and parse_oc_str (schema.c) and 3) an error logging to schema_berval_to_atlist & schema_berval_to_oclist (schema.c) for troubleshooting. https://fedorahosted.org/389/ticket/48492 Reviewed by [email protected] and [email protected] (Thank you, William and Mark!)
commit b5bfa2a0386e168ce2196a077169382ae53a94b4 Author: Noriko Hosoi <[email protected]> Date: Wed Feb 10 11:36:32 2016 -0800 Ticket #48492 - heap corruption at schema replication. Description: 389-ds-base-1.3.2 and newer uses openldap schema parser, which is more strict with the definition. For instance, the older 389-ds-base could have a schema such as SINTAX OID in single quotes, which is not acceptable on the newer version. There was a bug to handle the error case that caused a crash. This patch adds 1) the null reference check to attr_syntax_free (attrsyntax.c), 2) a null init to the output arg in parse_at_str and parse_oc_str (schema.c) and 3) an error logging to schema_berval_to_atlist & schema_berval_to_oclist (schema.c) for troubleshooting. https://fedorahosted.org/389/ticket/48492 Reviewed by [email protected] and [email protected] (Thank you, William and Mark!) diff --git a/ldap/servers/slapd/attrsyntax.c b/ldap/servers/slapd/attrsyntax.c index 4cdcf86c1..8b2a77aad 100644 --- a/ldap/servers/slapd/attrsyntax.c +++ b/ldap/servers/slapd/attrsyntax.c @@ -189,6 +189,9 @@ attr_syntax_check_oids() void attr_syntax_free( struct asyntaxinfo *a ) { + if (!a) { + return; + } cool_charray_free( a->asi_aliases ); slapi_ch_free_string(&a->asi_name ); slapi_ch_free_string(&a->asi_desc ); diff --git a/ldap/servers/slapd/schema.c b/ldap/servers/slapd/schema.c index 65cbad5f8..dd565992d 100644 --- a/ldap/servers/slapd/schema.c +++ b/ldap/servers/slapd/schema.c @@ -263,6 +263,9 @@ static PRCallOnceType schema_dse_mandatory_init_callonce = { 0, 0, 0 }; static int parse_at_str(const char *input, struct asyntaxinfo **asipp, char *errorbuf, size_t errorbufsize, PRUint32 schema_flags, int is_user_defined, int schema_ds4x_compat, int is_remote) { + if (asipp) { + *asipp = NULL; + } #ifdef USE_OPENLDAP return parse_attr_str(input, asipp, errorbuf, errorbufsize, schema_flags, is_user_defined,schema_ds4x_compat,is_remote); #else @@ -274,6 +277,9 @@ static int parse_oc_str(const char *input, struct objclass **oc, char *errorbuf, size_t errorbufsize, PRUint32 schema_flags, int is_user_defined, int schema_ds4x_compat, struct objclass* private_schema ) { + if (oc) { + *oc = NULL; + } #ifdef USE_OPENLDAP return parse_objclass_str (input, oc, errorbuf, errorbufsize, schema_flags, is_user_defined, schema_ds4x_compat, private_schema ); #else @@ -7146,11 +7152,15 @@ schema_berval_to_oclist(struct berval **oc_berval) oc_list = NULL; oc_tail = NULL; if (oc_berval != NULL) { + errorbuf[0] = '\0'; for (i = 0; oc_berval[i] != NULL; i++) { /* parse the objectclass value */ if (LDAP_SUCCESS != (rc = parse_oc_str(oc_berval[i]->bv_val, &oc, errorbuf, sizeof (errorbuf), DSE_SCHEMA_NO_CHECK | DSE_SCHEMA_USE_PRIV_SCHEMA, 0, schema_ds4x_compat, oc_list))) { + slapi_log_error(SLAPI_LOG_FATAL, "schema", + "parse_oc_str returned error: %s\n", + errorbuf[0]?errorbuf:"unknown"); oc_free(&oc); rc = 1; break; @@ -7184,11 +7194,15 @@ schema_berval_to_atlist(struct berval **at_berval) schema_ds4x_compat = config_get_ds4_compatible_schema(); if (at_berval != NULL) { + errorbuf[0] = '\0'; for (i = 0; at_berval[i] != NULL; i++) { /* parse the objectclass value */ rc = parse_at_str(at_berval[i]->bv_val, &at, errorbuf, sizeof (errorbuf), DSE_SCHEMA_NO_CHECK | DSE_SCHEMA_USE_PRIV_SCHEMA, 0, schema_ds4x_compat, 0); - if(rc){ + if (rc) { + slapi_log_error(SLAPI_LOG_FATAL, "schema", + "parse_oc_str returned error: %s\n", + errorbuf[0]?errorbuf:"unknown"); attr_syntax_free(at); break; }
0
f43ed1ddaa9bcbf1308b2ecbe9044e2058776d2c
389ds/389-ds-base
Ticket 399 - slapi_ldap_bind() doesn't check bind results Bug Description: There are two issues here. One, we were not calling ldap_parse_result() for SIMPLE binds. Two, we were overwriting the error code, with the function result code. Fix Description: Always call ldap_parse_result, and use a separate error code variable to preserve the actual result code from the bind operation. https://fedorahosted.org/389/ticket/399 Reviewed by: nhosoi(Thanks Noriko!)
commit f43ed1ddaa9bcbf1308b2ecbe9044e2058776d2c Author: Mark Reynolds <[email protected]> Date: Fri Jun 29 13:46:45 2012 -0400 Ticket 399 - slapi_ldap_bind() doesn't check bind results Bug Description: There are two issues here. One, we were not calling ldap_parse_result() for SIMPLE binds. Two, we were overwriting the error code, with the function result code. Fix Description: Always call ldap_parse_result, and use a separate error code variable to preserve the actual result code from the bind operation. https://fedorahosted.org/389/ticket/399 Reviewed by: nhosoi(Thanks Noriko!) diff --git a/ldap/servers/slapd/ldaputil.c b/ldap/servers/slapd/ldaputil.c index fd4e93fc3..aaca1afe1 100644 --- a/ldap/servers/slapd/ldaputil.c +++ b/ldap/servers/slapd/ldaputil.c @@ -995,6 +995,7 @@ slapi_ldap_bind( ) { int rc = LDAP_SUCCESS; + int err; LDAPControl **clientctrls = NULL; int secure = 0; struct berval bvcreds = {0, NULL}; @@ -1115,21 +1116,27 @@ slapi_ldap_bind( mech ? mech : "SIMPLE"); goto done; } - /* if we got here, we were able to read success result */ - /* Get the controls sent by the server if requested */ - if (returnedctrls) { - if ((rc = ldap_parse_result(ld, result, &rc, NULL, NULL, - NULL, returnedctrls, - 0)) != LDAP_SUCCESS) { - slapi_log_error(SLAPI_LOG_FATAL, "slapi_ldap_bind", - "Error: could not bind id " - "[%s] mech [%s]: error %d (%s) errno %d (%s)\n", - bindid ? bindid : "(anon)", - mech ? mech : "SIMPLE", - rc, ldap_err2string(rc), errno, slapd_system_strerror(errno)); - goto done; - } - } + /* if we got here, we were able to read success result */ + /* Get the controls sent by the server if requested */ + if ((rc = ldap_parse_result(ld, result, &err, NULL, NULL, + NULL, returnedctrls, 0)) != LDAP_SUCCESS) { + slapi_log_error(SLAPI_LOG_FATAL, "slapi_ldap_bind", + "Error: could not parse bind result: error %d (%s) errno %d (%s)\n", + rc, ldap_err2string(rc), errno, slapd_system_strerror(errno)); + goto done; + } + + /* check the result code from the bind operation */ + if(err){ + rc = err; + slapi_log_error(SLAPI_LOG_FATAL, "slapi_ldap_bind", + "Error: could not bind id " + "[%s] mech [%s]: error %d (%s) errno %d (%s)\n", + bindid ? bindid : "(anon)", + mech ? mech : "SIMPLE", + rc, ldap_err2string(rc), errno, slapd_system_strerror(errno)); + goto done; + } /* parse the bind result and get the ldap error code */ if ((rc = ldap_parse_sasl_bind_result(ld, result, &servercredp,
0
48008ae89376bb8a1d0cddad86a606b44f11ea73
389ds/389-ds-base
Ticket 48951 - dsadm and dsconf base files Fix Description: To unite all commands within lib389! Add the base poc of dsadm and dsconf. dsadm is able to list instances running on the server. These are based on arg parse, and are extensible, able to be tested with pytest and take advantage of all the features of lib389. -- Move the SetupDs object out of tools to a seperate module in the class hierarchy. -- Complete rewrite of how the ini for the pre-rel installer works this allows us to autogenerate examples, better manager merging options, and track defaults. In the future we'll be able to base our versioned defaults on this via subclassing. -- So that we can assert the correct behaviour of the new installer this allows us to unit test it, and assert the installation is correct as we expect. -- Add dsconf backend handling, and cli tests for the new tools. This way we can make sure end to end of the process is tested, and that the behaviour is correct as expected. This include option management and failure scenarios we can't currently test in the cli tools. -- Based on Mark's comments, add connOnly to the test and the cli. https://fedorahosted.org/389/ticket/48951 Author: wibrown Review by: nhosoi, mreynolds (Thanks!)
commit 48008ae89376bb8a1d0cddad86a606b44f11ea73 Author: William Brown <[email protected]> Date: Mon Aug 15 11:23:06 2016 +1000 Ticket 48951 - dsadm and dsconf base files Fix Description: To unite all commands within lib389! Add the base poc of dsadm and dsconf. dsadm is able to list instances running on the server. These are based on arg parse, and are extensible, able to be tested with pytest and take advantage of all the features of lib389. -- Move the SetupDs object out of tools to a seperate module in the class hierarchy. -- Complete rewrite of how the ini for the pre-rel installer works this allows us to autogenerate examples, better manager merging options, and track defaults. In the future we'll be able to base our versioned defaults on this via subclassing. -- So that we can assert the correct behaviour of the new installer this allows us to unit test it, and assert the installation is correct as we expect. -- Add dsconf backend handling, and cli tests for the new tools. This way we can make sure end to end of the process is tested, and that the behaviour is correct as expected. This include option management and failure scenarios we can't currently test in the cli tools. -- Based on Mark's comments, add connOnly to the test and the cli. https://fedorahosted.org/389/ticket/48951 Author: wibrown Review by: nhosoi, mreynolds (Thanks!) diff --git a/src/lib389/cli/dsadm b/src/lib389/cli/dsadm new file mode 100755 index 000000000..00ea54df9 --- /dev/null +++ b/src/lib389/cli/dsadm @@ -0,0 +1,91 @@ +#!/usr/bin/python3 + +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- + +import argparse +import logging +import sys + +from lib389 import DirSrv +from lib389.cli_adm import instance as cli_instance +from lib389.cli_base import disconnect_instance + +logging.basicConfig() +log = logging.getLogger("dsadm") + +if __name__ == '__main__': + + parser = argparse.ArgumentParser() + + parser.add_argument('-v', '--verbose', + help="Display verbose operation tracing during command execution", + action='store_true', default=False + ) + # Should this actually be in the sub modules? That way they can set the requirements. + parser.add_argument('-Z', '--instance', + help="The name of the instance to act upon", + default=None, + ) + + subparsers = parser.add_subparsers(help="resources to act upon") + + # We stack our needed options in via submodules. + + cli_instance.create_parser(subparsers) + + # Then we tell it to execute. + + args = parser.parse_args() + + if args.verbose: + log.setLevel(logging.DEBUG) + else: + log.setLevel(logging.INFO) + + log.debug("The 389 Directory Server Administration Tool") + # Leave this comment here: UofA let me take this code with me provided + # I gave attribution. -- wibrown + log.debug("Inspired by works of: ITS, The University of Adelaide") + + log.debug("Called with: %s", args) + + # Assert we have a resources to work on. + if not hasattr(args, 'func'): + log.error("No resource provided to act upon") + log.error("USAGE: dsadm [options] <resource> <action> [action options]") + sys.exit(1) + + # Connect + # inst = None + inst = DirSrv(verbose=args.verbose) + + result = True + if args.verbose: + # inst = connect_instance(args.ldapurl, args.binddn, args.verbose, args.starttls) + result = args.func(inst, log, args) + else: + try: + # inst = connect_instance(args.ldapurl, args.binddn, args.verbose, args.starttls) + result = args.func(inst, log, args) + except Exception as e: + log.debug(e, exc_info=True) + log.error("Error: %s" % e.message) + disconnect_instance(inst) + + if result is True: + log.info('FINISH: Command succeeded') + elif result is False: + log.info('FAIL: Command failed. See output for details.') + + # Done! + log.debug("dsadm is brought to you by the letter R and the number 27.") + + if result is False: + sys.exit(1) + diff --git a/src/lib389/cli/dsconf b/src/lib389/cli/dsconf new file mode 100755 index 000000000..84507bac8 --- /dev/null +++ b/src/lib389/cli/dsconf @@ -0,0 +1,92 @@ +#!/usr/bin/python3 + +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- + +import argparse +import logging +import ldap +import sys + +from lib389 import DirSrv +from lib389.cli_conf import backend as cli_backend +from lib389.cli_base import disconnect_instance, connect_instance + +logging.basicConfig() +log = logging.getLogger("dsconf") + +if __name__ == '__main__': + + defbase = ldap.get_option(ldap.OPT_DEFBASE) + + parser = argparse.ArgumentParser() + # Build the base ldap options, this keeps in unified. + + parser.add_argument('-D', '--binddn', + help="The account to bind as for executing operations", + default=None, + ) + parser.add_argument('-H', '--ldapurl', + help="The LDAP url to connect to, IE ldap://mai.example.com:389", + default=None, + ) + parser.add_argument('-b', '--basedn', + help="The basedn for this operation.", + default=None, + ) + parser.add_argument('-Z', '--starttls', + help="Connect with StartTLS", + default=False, action='store_true' + ) + parser.add_argument('-v', '--verbose', + help="Display verbose operation tracing during command execution", + action='store_true', default=False + ) + + subparsers = parser.add_subparsers(help="resources to act upon") + + cli_backend.create_parser(subparsers) + + args = parser.parse_args() + + if args.verbose: + log.setLevel(logging.DEBUG) + else: + log.setLevel(logging.INFO) + + log.debug("The 389 Directory Server Configuration Tool") + # Leave this comment here: UofA let me take this code with me provided + # I gave attribution. -- wibrown + log.debug("Inspired by works of: ITS, The University of Adelaide") + + log.debug("Called with: %s", args) + + # Assert we have a resources to work on. + if not hasattr(args, 'func'): + log.error("No resource provided to act upon") + log.error("USAGE: dsadm [options] <resource> <action> [action options]") + sys.exit(1) + + # Connect + inst = None + if args.verbose: + inst = connect_instance(args.ldapurl, args.binddn, args.verbose, args.starttls) + args.func(inst, args.basedn, log, args) + else: + try: + inst = connect_instance(args.ldapurl, args.binddn, args.verbose, args.starttls) + args.func(inst, args.basedn, log, args) + except Exception as e: + log.debug(e, exc_info=True) + log.error("Error: %s" % e) + disconnect_instance(inst) + + # Done! + log.debug("dsconf is brought to you by the letter H and the number 25.") + + diff --git a/src/lib389/lib389/_mapped_object.py b/src/lib389/lib389/_mapped_object.py index 9f1bfc6de..88da0c8b7 100644 --- a/src/lib389/lib389/_mapped_object.py +++ b/src/lib389/lib389/_mapped_object.py @@ -101,6 +101,10 @@ class DSLdapObject(DSLogging): def __str__(self): return self.__unicode__() + def display(self): + e = self._instance.getEntry(self._dn) + return e.__repr__() + # We make this a property so that we can over-ride dynamically if needed @property def dn(self): diff --git a/src/lib389/lib389/backend.py b/src/lib389/lib389/backend.py index 0df04c7fb..90e8c85fc 100644 --- a/src/lib389/lib389/backend.py +++ b/src/lib389/lib389/backend.py @@ -378,6 +378,8 @@ class BackendLegacy(object): class Backend(DSLdapObject): + _must_attributes = ['nsslapd-suffix', 'cn'] + def __init__(self, instance, dn=None, batch=False): super(Backend, self).__init__(instance, dn, batch) self._rdn_attribute = 'cn' @@ -407,7 +409,7 @@ class Backend(DSLdapObject): nprops[BACKEND_PROPNAME_TO_ATTRNAME[key]] = [value, ] except KeyError: # This means, it's not a mapped value, so continue - pass + nprops[key] = value (dn, valid_props) = super(Backend, self)._validate(rdn, nprops, basedn) @@ -418,6 +420,7 @@ class Backend(DSLdapObject): super(Backend, self).create(dn, properties, basedn) if sample_entries is True: self.create_sample_entries() + return self def delete(self): if self._protected: diff --git a/src/lib389/lib389/cli_adm/__init__.py b/src/lib389/lib389/cli_adm/__init__.py new file mode 100644 index 000000000..d57ac3325 --- /dev/null +++ b/src/lib389/lib389/cli_adm/__init__.py @@ -0,0 +1,8 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- + diff --git a/src/lib389/lib389/cli_adm/instance.py b/src/lib389/lib389/cli_adm/instance.py new file mode 100644 index 000000000..057418e89 --- /dev/null +++ b/src/lib389/lib389/cli_adm/instance.py @@ -0,0 +1,146 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- + +from lib389._constants import * +from lib389.cli_base import _get_arg + +from lib389.tools import DirSrvTools +from lib389.instance.setup import SetupDs +from getpass import getpass +import os +import time +import sys + +from lib389.instance.options import General2Base, Slapd2Base + +def instance_list(inst, log, args): + instances = inst.list(all=True) + + try: + if len(instances) > 0: + for instance in instances: + log.info("instance: %s" % instance[CONF_SERVER_ID]) + else: + log.info("No instances of Directory Server") + except IOError as e: + log.info(e) + log.info("Perhaps you need to be a different user?") + +def instance_action(inst, log, args, act): + # Get the instance name if needed. + inst_id = _get_arg( args.instance, msg="Directory Server instance name to %s" % act) + + # Allocate the instance based on name + insts = inst.list(serverid=inst_id) + if len(insts) != 1: + raise ValueError("No such instance %s" % inst_id) + + inst.allocate(insts[0]) + + # Start it! + DirSrvTools.serverCmd(inst, act, True) + +def instance_start(inst, log, args): + instance_action(inst, log, args, "start") + +def instance_stop(inst, log, args): + instance_action(inst, log, args, "stop") + +def instance_create(inst, log, args): + if not args.ack: + sys.exit(0) + else: + log.info(""" + _________________________________________ +/ This is not what you want! Press ctrl-c \\ +\ now ... / + ----------------------------------------- + \\ / \\ //\\ + \\ |\\___/| / \\// \\\\ + /0 0 \\__ / // | \\ \\ + / / \\/_/ // | \\ \\ + @_^_@'/ \\/_ // | \\ \\ + //_^_/ \\/_ // | \\ \\ + ( //) | \\/// | \\ \\ + ( / /) _|_ / ) // | \\ _\\ + ( // /) '/,_ _ _/ ( ; -. | _ _\\.-~ .-~~~^-. + (( / / )) ,-{ _ `-.|.-~-. .~ `. + (( // / )) '/\\ / ~-. _ .-~ .-~^-. \\ + (( /// )) `. { } / \\ \\ + (( / )) .----~-.\\ \\-' .~ \\ `. \\^-. + ///.----..> \\ _ -~ `. ^-` ^-_ + ///-._ _ _ _ _ _ _}^ - - - - ~ ~-- ,.-~ + /.-~ + """) + for i in range(1,6): + log.info('%s ...' % (5 - int(i))) + time.sleep(1) + log.info('Launching ...') + sd = SetupDs(args.verbose, args.dryrun, log) + ### If args.file is not set, we need to interactively get answers! + if sd.create_from_inf(args.file): + # print("Sucessfully created instance") + return True + else: + # print("Failed to create instance") + return False + +def instance_example(inst, log, args): + print(""" +; --- BEGIN COPYRIGHT BLOCK --- +; Copyright (C) 2015 Red Hat, Inc. +; All rights reserved. +; +; License: GPL (version 3 or any later version). +; See LICENSE for details. +; --- END COPYRIGHT BLOCK --- + +; Author: firstyear at redhat.com + +; This is a version 2 ds setup inf file. +; It is used by the python versions of setup-ds-* +; Most options map 1 to 1 to the original .inf file. +; However, there are some differences that I envision +; For example, note the split backend section. +; You should be able to create, one, many or no backends in an install + + """) + g2b = General2Base(log) + s2b = Slapd2Base(log) + print(g2b.collect_help()) + print(s2b.collect_help()) + +def create_parser(subparsers): + instance_parser = subparsers.add_parser('instance', help="Manager instances of Directory Server") + + subcommands = instance_parser.add_subparsers(help="action") + + list_parser = subcommands.add_parser('list', help="List installed instances of Directory Server") + list_parser.set_defaults(func=instance_list) + + start_parser = subcommands.add_parser('start', help="Start an instance of Directory Server, if it is not currently running") + start_parser.add_argument('instance', nargs=1, help="The name of the instance to start.") + start_parser.set_defaults(func=instance_start) + + stop_parser = subcommands.add_parser('stop', help="Stop an instance of Directory Server, if it is currently running") + stop_parser.add_argument('instance', nargs=1, help="The name of the instance to stop.") + stop_parser.set_defaults(func=instance_stop) + + create_parser = subcommands.add_parser('create', help="Create an instance of Directory Server. Can be interactive or silent with an inf answer file") + create_parser.add_argument('-n', '--dryrun', help="Validate system and configurations only. Do not alter the system.", action='store_true', default=False) + create_parser.add_argument('-f', '--file', help="Inf file to use with prepared answers") + create_parser.add_argument('--IsolemnlyswearthatIamuptonogood', dest="ack", + help="""You are here likely here by mistake! You want setup-ds.pl! +By setting this value you acknowledge and take responsibility for the fact this command is UNTESTED and NOT READY. You are ON YOUR OWN! +""", + action='store_true', default=False) + create_parser.set_defaults(func=instance_create) + + example_parser = subcommands.add_parser('example', help="Display an example ini answer file, with comments") + example_parser.set_defaults(func=instance_example) + diff --git a/src/lib389/lib389/cli_base/__init__.py b/src/lib389/lib389/cli_base/__init__.py new file mode 100644 index 000000000..9314d1baa --- /dev/null +++ b/src/lib389/lib389/cli_base/__init__.py @@ -0,0 +1,164 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- + +import logging +import sys + +from getpass import getpass +from lib389 import DirSrv +from lib389.properties import SER_LDAP_URL, SER_ROOT_DN, SER_ROOT_PW + +MAJOR, MINOR, _, _, _ = sys.version_info + +# REALLY PYTHON 3? REALLY??? +def _input(msg): + if MAJOR >= 3: + return input(msg) + else: + return raw_input(msg) + + +def _get_arg(args, msg=None): + if args is not None and len(args) > 0: + if type(args) is list: + return args[0] + else: + return args + else: + return _input("%s : " % msg) + +def _get_args(args, kws): + kwargs = {} + while len(kws) > 0: + kw, msg, priv = kws.pop(0) + + if args is not None and len(args) > 0: + kwargs[kw] = args.pop(0) + else: + if priv: + kwargs[kw] = getpass("%s : " % msg) + else: + kwargs[kw] = _input("%s : " % msg) + return kwargs + +# This is really similar to get_args, but generates from an array +def _get_attributes(args, attrs): + kwargs = {} + for attr in attrs: + if args is not None and len(args) > 0: + kwargs[attr] = args.pop(0) + else: + if attr.lower() == 'userpassword': + kwargs[attr] = getpass("Enter value for %s : " % attr) + else: + kwargs[attr] = _input("Enter value for %s : " % attr) + return kwargs + + +def _warn(data, msg=None): + if msg is not None: + print("%s :" % msg) + if 'Yes I am sure' != _input("Type 'Yes I am sure' to continue: "): + raise Exception("Not sure if want") + return data + +# We'll need another of these that does a "connect via instance name?" +def connect_instance(ldapurl, binddn, verbose, starttls): + dsargs = { + SER_LDAP_URL: ldapurl, + SER_ROOT_DN: binddn + } + ds = DirSrv(verbose=verbose) + ds.allocate(dsargs) + if not ds.can_autobind() and binddn is not None: + dsargs[SER_ROOT_PW] = getpass("Enter password for %s on %s : " % (binddn, ldapurl)) + elif binddn is None: + raise Exception("Must provide a binddn to connect with") + ds.allocate(dsargs) + ds.open(starttls=starttls, connOnly=True) + print("") + return ds + +def disconnect_instance(inst): + if inst is not None: + inst.close() + +def _generic_list(inst, basedn, log, manager_class, **kwargs): + mc = manager_class(inst, basedn) + ol = mc.list() + if len(ol) == 0: + log.info("No objects to display") + elif len(ol) > 0: + # We might sort this in the future + for o in ol: + o_str = o.__unicode__() + log.info(o_str) + +# Display these entries better! +def _generic_get(inst, basedn, log, manager_class, selector): + mc = manager_class(inst, basedn) + o = mc.get(selector) + o_str = o.display() + log.info(o_str) + +def _generic_get_dn(inst, basedn, log, manager_class, dn): + mc = manager_class(inst, basedn) + o = mc.get(dn=dn) + o_str = o.display() + log.info(o_str) + +def _generic_create(inst, basedn, log, manager_class, kwargs): + mc = manager_class(inst, basedn) + o = mc.create(properties=kwargs) + o_str = o.__unicode__() + log.info('Sucessfully created %s' % o_str) + +def _generic_delete(inst, basedn, log, object_class, dn): + # Load the oc direct + o = object_class(inst, dn) + o.delete() + log.info('Sucessfully deleted %s' % dn) + + +class LogCapture(logging.Handler): + """ + This useful class is for intercepting logs, and then making assertions about + the outputs provided. Used by the cli unit tests + """ + + def __init__(self): + """ + Create a log instance and primes the output capture. + """ + super(LogCapture, self).__init__() + self.outputs = [] + self.log = logging.getLogger("LogCapture") + self.log.addHandler(self) + + def emit(self, record): + self.outputs.append(record) + + def contains(self, query): + """ + Assert that the query string listed is in some logged Record. + """ + result = False + for rec in self.outputs: + if query in rec.message: + result = True + return result + + def flush(self): + self.outputs = [] + +class FakeArgs(object): + def __init__(self): + pass + + +# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 diff --git a/src/lib389/lib389/cli_conf/__init__.py b/src/lib389/lib389/cli_conf/__init__.py new file mode 100644 index 000000000..d57ac3325 --- /dev/null +++ b/src/lib389/lib389/cli_conf/__init__.py @@ -0,0 +1,8 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- + diff --git a/src/lib389/lib389/cli_conf/backend.py b/src/lib389/lib389/cli_conf/backend.py new file mode 100644 index 000000000..e99cc82c0 --- /dev/null +++ b/src/lib389/lib389/cli_conf/backend.py @@ -0,0 +1,76 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- + +from lib389.backend import Backend, Backends +import argparse + +from lib389.cli_base import ( + _generic_list, + _generic_get, + _generic_get_dn, + _generic_create, + _generic_delete, + _get_arg, + _get_args, + _get_attributes, + _warn, + ) + +SINGULAR = Backend +MANY = Backends +RDN = 'cn' + +def backend_list(inst, basedn, log, args): + _generic_list(inst, basedn, log.getChild('backend_list'), MANY) + +def backend_get(inst, basedn, log, args): + rdn = _get_arg( args.selector, msg="Enter %s to retrieve" % RDN) + _generic_get(inst, basedn, log.getChild('backend_get'), MANY, rdn) + +def backend_get_dn(inst, basedn, log, args): + dn = _get_arg( args.dn, msg="Enter dn to retrieve") + _generic_get_dn(inst, basedn, log.getChild('backend_get_dn'), MANY, dn) + +def backend_create(inst, basedn, log, args): + kwargs = _get_attributes(args.extra, SINGULAR._must_attributes) + _generic_create(inst, basedn, log.getChild('backend_create'), MANY, kwargs) + +def backend_delete(inst, basedn, log, args, warn=True): + dn = _get_arg( args.dn, msg="Enter dn to delete") + if warn: + _warn(dn, msg="Deleting %s %s" % (SINGULAR.__name__, dn)) + _generic_delete(inst, basedn, log.getChild('backend_delete'), SINGULAR, dn) + + +def create_parser(subparsers): + backend_parser = subparsers.add_parser('backend', help="Manage database suffixes and backends") + + subcommands = backend_parser.add_subparsers(help="action") + + list_parser = subcommands.add_parser('list', help="List current active backends and suffixes") + list_parser.set_defaults(func=backend_list) + + get_parser = subcommands.add_parser('get', help='get') + get_parser.set_defaults(func=backend_get) + get_parser.add_argument('selector', nargs='?', help='The backend to search for') + + get_dn_parser = subcommands.add_parser('get_dn', help='get_dn') + get_dn_parser.set_defaults(func=backend_get_dn) + get_dn_parser.add_argument('dn', nargs='?', help='The backend dn to get') + + create_parser = subcommands.add_parser('create', help='create') + create_parser.set_defaults(func=backend_create) + create_parser.add_argument('extra', nargs=argparse.REMAINDER, + help='A create may take one or more extra arguments. This parameter provides them' + ) + + delete_parser = subcommands.add_parser('delete', help='deletes the object') + delete_parser.set_defaults(func=backend_delete) + delete_parser.add_argument('dn', nargs='?', help='The dn to delete') + + diff --git a/src/lib389/lib389/clitools/ds_list_instances b/src/lib389/lib389/clitools/ds_list_instances deleted file mode 100755 index 5d5c07cd2..000000000 --- a/src/lib389/lib389/clitools/ds_list_instances +++ /dev/null @@ -1,29 +0,0 @@ -#!/usr/bin/python - -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- - -from lib389._constants import * -from clitools import CliTool - - -class ListTool(CliTool): - def list_instances(self): - # Remember, the prefix can be set with the os environment - try: - instances = self.ds.list(all=True) - print('Instances on this system:') - for instance in instances: - print(instance[CONF_SERVER_ID]) - except IOError as e: - print(e) - print("Perhaps you need to be a different user?") - -if __name__ == '__main__': - listtool = ListTool() - listtool.list_instances() diff --git a/src/lib389/lib389/clitools/ds_start b/src/lib389/lib389/clitools/ds_start deleted file mode 100755 index 143051cac..000000000 --- a/src/lib389/lib389/clitools/ds_start +++ /dev/null @@ -1,34 +0,0 @@ -#!/usr/bin/python - -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- - -# from clitools import clitools_parser, get_instance_dict, get_rootdn_pass -from lib389.clitools import CliTool, clitools_parser -# from lib389 import DirSrv -from lib389._constants import * -from lib389.tools import DirSrvTools - - -class StartTool(CliTool): - def start(self): - try: - self.populate_instance_dict(self.args.instance) - self.ds.allocate(self.inst) - - DirSrvTools.serverCmd(self.ds, "start", True) - finally: - pass - # self.disconnect() - -if __name__ == '__main__': - # Do some arg parse stuff - # You can always add a child parser here too ... - args = clitools_parser.parse_args() - tool = StartTool(args) - tool.start() diff --git a/src/lib389/lib389/clitools/ds_stop b/src/lib389/lib389/clitools/ds_stop deleted file mode 100755 index 7988c8b4e..000000000 --- a/src/lib389/lib389/clitools/ds_stop +++ /dev/null @@ -1,34 +0,0 @@ -#!/usr/bin/python - -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- - -# from clitools import clitools_parser, get_instance_dict, get_rootdn_pass -from lib389.clitools import CliTool, clitools_parser -# from lib389 import DirSrv -from lib389._constants import * -from lib389.tools import DirSrvTools - - -class StartTool(CliTool): - def start(self): - try: - self.populate_instance_dict(self.args.instance) - self.ds.allocate(self.inst) - - DirSrvTools.serverCmd(self.ds, "stop", True) - finally: - pass - # self.disconnect() - -if __name__ == '__main__': - # Do some arg parse stuff - # You can always add a child parser here too ... - args = clitools_parser.parse_args() - tool = StartTool(args) - tool.start() diff --git a/src/lib389/lib389/instance/__init__.py b/src/lib389/lib389/instance/__init__.py new file mode 100644 index 000000000..d664b4e42 --- /dev/null +++ b/src/lib389/lib389/instance/__init__.py @@ -0,0 +1,7 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- diff --git a/src/lib389/lib389/instance/options.py b/src/lib389/lib389/instance/options.py new file mode 100644 index 000000000..8561fcc0e --- /dev/null +++ b/src/lib389/lib389/instance/options.py @@ -0,0 +1,258 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- + +import socket +import sys +import os + +MAJOR, MINOR, _, _, _ = sys.version_info + +if MAJOR >= 3: + import configparser +else: + import ConfigParser as configparser + +format_keys = [ + 'prefix', + 'bin_dir', + 'sbin_dir', + 'sysconf_dir', + 'data_dir', + 'local_state_dir', + 'lib_dir', + 'cert_dir', + 'config_dir', + 'inst_dir', + 'backup_dir', + 'db_dir', + 'ldif_dir', + 'lock_dir', + 'log_dir', + 'run_dir', + 'schema_dir', + 'tmp_dir', +] + + +class Options2(object): + # This stores the base options in a self._options dict. + # It provides a number of options for: + # - dict overlay + # - parsing the config parser types. + def __init__(self, log): + # 'key' : (default, helptext, valid_func ) + self._options = {} # this takes the default + self._type = {} # Lists the type the item should be. + self._helptext = {} # help text for the option, MANDATORY. + self._valid_func = {} # options verification function. + self._section = None + self.log = log + + def parse_inf_config(self, config): + v = None + for k in self._options.keys(): + try: + if self._type[k] == int: + v = config.getint(self._section, k) + elif self._type[k] == bool: + v = config.getboolean(self._section, k) + elif self._type[k] == str: + v = config.get(self._section, k) + # How does this handle wrong types? + except ValueError: + # Should we raise an assertion error? + # No, the sectons don't exist, continue + self.log.debug('%s:%s not in inf, or incorrect type, using default' % (self._section, k)) + continue + except configparser.NoOptionError: + self.log.debug('%s:%s not in inf, or incorrect type, using default' % (self._section, k)) + continue + self._options[k] = v + + def set(self, option, value): + self._options[option] = value + + def verify(self): + pass + + def collect(self): + return self._options + + def collect_help(self): + helptext = "[%s]\n" % self._section + for k in self._options.keys(): + helptext += "# %s: %s\n" % (k, self._helptext[k]) + helptext += "# type: %s\n" % (self._type[k].__name__) + helptext += "; %s = %s\n\n" % (k, self._options[k]) + return helptext + +# +# Base, example dicts of the general, backend (userRoot) options. +# + +class General2Base(Options2): + def __init__(self, log): + super(General2Base, self).__init__(log) + self._section = 'general' + + self._options['config_version'] = 2 + self._type['config_version'] = int + self._helptext['config_version'] = "The format version of the inf answer file." + + self._options['full_machine_name'] = socket.gethostname() + self._type['full_machine_name'] = str + self._helptext['full_machine_name'] = "The fully qualified hostname of this system." + + self._options['strict_host_checking'] = True + self._type['strict_host_checking'] = bool + self._helptext['strict_host_checking'] = "If true, will validate forward and reverse dns names for full_machine_name" + + self._options['selinux'] = True + self._type['selinux'] = bool + self._helptext['selinux'] = "Enable SELinux detection and integration. Normally, this should always be True, and will correctly detect when SELinux is not present." + + self._options['defaults'] = '99999' + self._type['defaults'] = str + self._helptext['defaults'] = "Set the configuration defaults version. If set to 99999, always use the latest values available for the slapd section. This allows pinning default values in cn=config to specific Directory Server releases." + + +# +# This module contains the base options and configs for Director Server +# setup and install. This allows +# + +class Slapd2Base(Options2): + def __init__(self, log): + super(Slapd2Base, self).__init__(log) + self._section = 'slapd' + + self._options['instance_name'] = None + self._type['instance_name'] = str + self._helptext['instance_name'] = "The name of the instance. Cannot be changed post installation." + + self._options['user'] = 'dirsrv' + self._type['user'] = str + self._helptext['user'] = "The user account ns-slapd will drop privileges to during operation." + + self._options['group'] = 'dirsrv' + self._type['group'] = str + self._helptext['group'] = "The group ns-slapd will drop privilleges to during operation." + + self._options['root_dn'] = 'cn=Directory Manager' + self._type['root_dn'] = str + self._helptext['root_dn'] = "The Distinquished Name of the Administrator account. This is equivalent to root of your Directory Server." + + self._options['root_password'] = None + self._type['root_password'] = str + self._helptext['root_password'] = "The password for the root_dn account. " + + self._options['prefix'] = os.environ.get('PREFIX', "") + self._type['prefix'] = str + self._helptext['prefix'] = "The filesystem prefix for all other locations. Unless you are developing DS, you likely never need to set this. This value can be reffered to in other fields with {prefix}, and can be set with the environment variable PREFIX." + + self._options['port'] = 389 + self._type['port'] = int + self._helptext['port'] = "The TCP port that Directory Server will listen on for LDAP connections." + + self._options['secure_port'] = 636 + self._type['secure_port'] = int + self._helptext['secure_port'] = "The TCP port that Directory Server will listen on for TLS secured LDAP connections." + + # In the future, make bin and sbin /usr/[s]bin, but we may need autotools assistance from Ds + self._options['bin_dir'] = "{prefix}/bin" + self._type['bin_dir'] = str + self._helptext['bin_dir'] = "The location Directory Server can find binaries. You should not need to alter this value." + + self._options['sbin_dir'] = "{prefix}/sbin" + self._type['sbin_dir'] = str + self._helptext['sbin_dir'] = "The location Directory Server can find systemd administration binaries. You should not need to alter this value." + + self._options['sysconf_dir'] = "{prefix}/etc" + self._type['sysconf_dir'] = str + self._helptext['sysconf_dir'] = "The location of the system configuration directory. You should not need to alter this value." + + # In the future, make bin and sbin /usr/[s]bin, but we may need autotools assistance from Ds + self._options['data_dir'] = "{prefix}/share" + self._type['data_dir'] = str + self._helptext['data_dir'] = "The location of shared static data. You should not need to alter this value." + + self._options['local_state_dir'] = "{prefix}/var" + self._type['local_state_dir'] = str + self._helptext['local_state_dir'] = "The location prefix to variable data. You should not need to alter this value." + + self._options['lib_dir'] = "{prefix}/usr/lib64/dirsrv" + self._type['lib_dir'] = str + self._helptext['lib_dir'] = "The location to Directory Server shared libraries. You should not need to alter this value." + + self._options['cert_dir'] = "{sysconf_dir}/dirsrv/slapd-{instance_name}" + self._type['cert_dir'] = str + self._helptext['cert_dir'] = "The location where NSS will store certificates." + + self._options['config_dir'] = "{sysconf_dir}/dirsrv/slapd-{instance_name}" + self._type['config_dir'] = str + self._helptext['config_dir'] = "The location where dse.ldif and other configuration will be stored. You should not need to alter this value." + + self._options['inst_dir'] = "{local_state_dir}/lib/dirsrv/slapd-{instance_name}" + self._type['inst_dir'] = str + self._helptext['inst_dir'] = "The location of the Directory Server databases, ldif and backups. You should not need to alter this value." + + self._options['backup_dir'] = "{inst_dir}/bak" + self._type['backup_dir'] = str + self._helptext['backup_dir'] = "The location where Directory Server will export and import backups from. You should not need to alter this value." + + self._options['db_dir'] = "{inst_dir}/db" + self._type['db_dir'] = str + self._helptext['db_dir'] = "The location where Directory Server will store databases. You should not need to alter this value." + + self._options['ldif_dir'] = "{inst_dir}/ldif" + self._type['ldif_dir'] = str + self._helptext['ldif_dir'] = "The location where Directory Server will export and import ldif from. You should not need to alter this value." + + self._options['lock_dir'] = "{local_state_dir}/lock/dirsrv/slapd-{instance_name}" + self._type['lock_dir'] = str + self._helptext['lock_dir'] = "The location where Directory Server will store lock files. You should not need to alter this value." + + self._options['log_dir'] = "{local_state_dir}/log/dirsrv/slapd-{instance_name}" + self._type['log_dir'] = str + self._helptext['log_dir'] = "The location where Directory Server will write log files. You should not need to alter this value." + + self._options['run_dir'] = "{local_state_dir}/run/dirsrv" + self._type['run_dir'] = str + self._helptext['run_dir'] = "The location where Directory Server will create pid files. You should not need to alter this value." + + self._options['schema_dir'] = "{config_dir}/schema" + self._type['schema_dir'] = str + self._helptext['schema_dir'] = "The location where Directory Server will store and write schema. You should not need to alter this value." + + self._options['tmp_dir'] = "/tmp" + self._type['tmp_dir'] = str + self._helptext['tmp_dir'] = "The location where Directory Server will write temporary files. You should not need to alter this value." + + def _format(self, d): + new_d = {} + ks = d.keys() + no_format_keys = ks - format_keys + + for k in no_format_keys: + new_d[k] = d[k] + for k in format_keys: + # Will these be done in correct order? + if self._type[k] == str: + new_d[k] = d[k].format(**new_d) + else: + new_d[k] = d[k] + return new_d + + def collect(self): + # This does the final format and return of options. + return self._format(self._options) + +# We use inheritence to "overlay" from base types and options, and we can then +# stack progressive versions "options" on top. +# This class is for + diff --git a/src/lib389/lib389/instance/setup.py b/src/lib389/lib389/instance/setup.py new file mode 100644 index 000000000..5f6b18d08 --- /dev/null +++ b/src/lib389/lib389/instance/setup.py @@ -0,0 +1,425 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- + +import os +import ldap +import shutil +import sys +import pwd +import grp +import re +import socket + +from lib389._constants import * +from lib389.properties import * +from lib389.passwd import password_hash, password_generate + +from lib389.instance.options import General2Base, Slapd2Base + +# The poc backend api +from lib389.backend import Backends +from lib389.utils import ( + is_a_dn, + ensure_bytes, + ensure_str, + socket_check_open,) + +try: + # There are too many issues with this on EL7 + # Out of the box, it's just outright broken ... + import six.moves.urllib.request + import six.moves.urllib.parse + import six.moves.urllib.error + import six +except ImportError: + pass + +MAJOR, MINOR, _, _, _ = sys.version_info + +if MAJOR >= 3: + import configparser +else: + import ConfigParser as configparser + + +class SetupDs(object): + """ + Implements the Directory Server installer. + + This maybe subclassed, and a number of well known steps will be called. + This allows the inf to be shared, and for other installers to work in lock + step with this. + + If you are subclassing you want to derive: + + _validate_config_2(self, config): + _prepare(self, extra): + _install(self, extra): + + If you are calling this from an INF, you can pass the config in + _validate_config, then stash the result into self.extra + + If you have anything you need passed to your install helpers, this can + be given in create_from_args(extra) if you are calling as an api. + + If you use create_from_inf, self.extra is passed to create_from_args for + you. You only need to over-load the three methods above. + + A logging interface is provided to self.log that you should call. + """ + + def __init__(self, verbose=False, dryrun=False, log=None): + self.verbose = verbose + self.extra = None + self.dryrun = dryrun + # Expose the logger to our children. + self.log = log.getChild('SetupDs') + if self.verbose: + self.log.info('Running setup with verbose') + + # Could be nicer if we did self._get_config_fallback_<type>? + def _get_config_fallback(self, config, group, attr, value, boolean=False, num=False): + try: + if boolean: + return config.getboolean(group, attr) + elif num: + return config.getint(group, attr) + else: + return config.get(group, attr) + except ValueError: + return value + except configparser.NoOptionError: + self.log.info("%s not specified:setting to default - %s" % (attr, value)) + return value + + def _validate_config_2(self, config): + pass + + def _prepare(self, extra): + pass + + def _install(self, extra): + pass + + def _validate_ds_2_config(self, config): + assert config.has_section('slapd') + # Extract them in a way that create can understand. + + general_options = General2Base(self.log) + general_options.parse_inf_config(config) + general_options.verify() + general = general_options.collect() + + if self.verbose: + self.log.info("Configuration general %s" % general) + + slapd_options = Slapd2Base(self.log) + slapd_options.parse_inf_config(config) + slapd_options.verify() + slapd = slapd_options.collect() + + if self.verbose: + self.log.info("Configuration slapd %s" % slapd) + + backends = [] + for section in config.sections(): + if section.startswith('backend-'): + be = {} + # TODO: Add the other BACKEND_ types + be[BACKEND_NAME] = section.replace('backend-', '') + be[BACKEND_SUFFIX] = config.get(section, 'suffix') + be[BACKEND_SAMPLE_ENTRIES] = config.getboolean(section, 'sample_entries') + backends.append(be) + + if self.verbose: + self.log.info("Configuration backends %s" % backends) + + return (general, slapd, backends) + + def _validate_ds_config(self, config): + # This will move to lib389 later. + # Check we have all the sections. + # Make sure we have needed keys. + assert(config.has_section('general')) + assert(config.has_option('general', 'config_version')) + assert(config.get('general', 'config_version') >= '2') + if config.get('general', 'config_version') == '2': + # Call our child api to validate itself from the inf. + self._validate_config_2(config) + return self._validate_ds_2_config(config) + else: + self.log.info("Failed to validate configuration version.") + assert(False) + + def create_from_inf(self, inf_path): + """ + Will trigger a create from the settings stored in inf_path + """ + # Get the inf file + if self.verbose: + self.log.info("Using inf from %s" % inf_path) + if not os.path.isfile(inf_path): + self.log.error("%s is not a valid file path" % inf_path) + return False + config = None + try: + config = configparser.SafeConfigParser() + config.read([inf_path]) + except Exception as e: + self.log.error("Exception %s occured" % e) + return False + + if self.verbose: + self.log.info("Configuration %s" % config.sections()) + + (general, slapd, backends) = self._validate_ds_config(config) + + # Actually do the setup now. + self.create_from_args(general, slapd, backends, self.extra) + + return True + + def _prepare_ds(self, general, slapd, backends): + + assert(general['defaults'] is not None) + if self.verbose: + self.log.info("PASSED: using config settings %s" % general['defaults']) + # Validate our arguments. + assert(slapd['user'] is not None) + # check the user exists + assert(pwd.getpwnam(slapd['user'])) + slapd['user_uid'] = pwd.getpwnam(slapd['user']).pw_uid + assert(slapd['group'] is not None) + assert(grp.getgrnam(slapd['group'])) + slapd['group_gid'] = grp.getgrnam(slapd['group']).gr_gid + # check this group exists + # Check that we are running as this user / group, or that we are root. + assert(os.geteuid() == 0 or getpass.getuser() == slapd['user']) + + if self.verbose: + self.log.info("PASSED: user / group checking") + + assert(general['full_machine_name'] is not None) + assert(general['strict_host_checking'] is not None) + if general['strict_host_checking'] is True: + # Check it resolves with dns + assert(socket.gethostbyname(general['full_machine_name'])) + if self.verbose: + self.log.info("PASSED: Hostname strict checking") + + assert(slapd['prefix'] is not None) + if (slapd['prefix'] != ""): + assert(os.path.exists(slapd['prefix'])) + if self.verbose: + self.log.info("PASSED: prefix checking") + + # We need to know the prefix before we can do the instance checks + assert(slapd['instance_name'] is not None) + # Check if the instance exists or not. + # Should I move this import? I think this prevents some recursion + from lib389 import DirSrv + ds = DirSrv(verbose=self.verbose) + ds.prefix = slapd['prefix'] + insts = ds.list(serverid=slapd['instance_name']) + assert(len(insts) == 0) + + if self.verbose: + self.log.info("PASSED: instance checking") + + assert(slapd['root_dn'] is not None) + # Assert this is a valid DN + assert(is_a_dn(slapd['root_dn'])) + assert(slapd['root_password'] is not None) + # Check if pre-hashed or not. + # !!!!!!!!!!!!!! + + # Right now, the way that rootpw works on ns-slapd works, it force hashes the pw + # see https://fedorahosted.org/389/ticket/48859 + if not re.match('^\{[A-Z0-9]+\}.*$', slapd['root_password']): + # We need to hash it. Call pwdhash-bin. + # slapd['root_password'] = password_hash(slapd['root_password'], prefix=slapd['prefix']) + pass + else: + pass + + # Create a random string + # Hash it. + # This will be our temporary rootdn password so that we can do + # live mods and setup rather than static ldif manipulations. + self._raw_secure_password = password_generate() + self._secure_password = password_hash(self._raw_secure_password, bin_dir=slapd['bin_dir']) + + if self.verbose: + self.log.info("PASSED: root user checking") + + assert(slapd['port'] is not None) + assert(socket_check_open('::1', slapd['port']) is False) + assert(slapd['secure_port'] is not None) + assert(socket_check_open('::1', slapd['secure_port']) is False) + if self.verbose: + self.log.info("PASSED: network avaliability checking") + + # Make assertions of the paths? + + # Make assertions of the backends? + + def create_from_args(self, general, slapd, backends=[], extra=None): + """ + Actually does the setup. this is what you want to call as an api. + """ + # Check we have privs to run + + if self.verbose: + self.log.info("READY: preparing installation") + self._prepare_ds(general, slapd, backends) + # Call our child api to prepare itself. + self._prepare(extra) + + if self.verbose: + self.log.info("READY: beginning installation") + + if self.dryrun: + self.log.info("NOOP: dry run requested") + else: + # Actually trigger the installation. + self._install_ds(general, slapd, backends) + # Call the child api to do anything it needs. + self._install(extra) + self.log.info("FINISH: completed installation") + + def _install_ds(self, general, slapd, backends): + """ + Actually install the Ds from the dicts provided. + + You should never call this directly, as it bypasses assertions. + """ + # register the instance to /etc/sysconfig + # We do this first so that we can trick remove-ds.pl if needed. + # There may be a way to create this from template like the dse.ldif ... + initconfig = "" + with open("%s/dirsrv/config/template-initconfig" % slapd['sysconf_dir']) as template_init: + for line in template_init.readlines(): + initconfig += line.replace('{{', '{', 1).replace('}}', '}', 1).replace('-', '_') + with open("%s/sysconfig/dirsrv-%s" % (slapd['sysconf_dir'], slapd['instance_name']), 'w') as f: + f.write(initconfig.format( + SERVER_DIR=slapd['lib_dir'], + SERVERBIN_DIR=slapd['sbin_dir'], + CONFIG_DIR=slapd['config_dir'], + INST_DIR=slapd['inst_dir'], + RUN_DIR=slapd['run_dir'], + DS_ROOT='', + PRODUCT_NAME='slapd', + )) + + # Create all the needed paths + # we should only need to make bak_dir, cert_dir, config_dir, db_dir, ldif_dir, lock_dir, log_dir, run_dir? schema_dir, + for path in ('backup_dir', 'cert_dir', 'config_dir', 'db_dir', 'ldif_dir', 'lock_dir', 'log_dir', 'run_dir'): + if self.verbose: + self.log.info("ACTION: creating %s" % slapd[path]) + try: + os.makedirs(slapd[path], mode=0o770) + except OSError: + pass + os.chown(slapd[path], slapd['user_uid'], slapd['group_gid']) + + # Copy correct data to the paths. + # Copy in the schema + # This is a little fragile, make it better. + shutil.copytree(os.path.join(slapd['sysconf_dir'], 'dirsrv/schema'), slapd['schema_dir']) + os.chown(slapd['schema_dir'], slapd['user_uid'], slapd['group_gid']) + + # Copy in the collation + srcfile = os.path.join(slapd['sysconf_dir'], 'dirsrv/config/slapd-collations.conf') + dstfile = os.path.join(slapd['config_dir'], 'slapd-collations.conf') + shutil.copy2(srcfile, dstfile) + os.chown(slapd['schema_dir'], slapd['user_uid'], slapd['group_gid']) + + # Selinux fixups? + # Restorecon of paths? + # Bind sockets to our type? + + # Create certdb in sysconfidir + if self.verbose: + self.log.info("ACTION: Creating certificate database is %s" % slapd['cert_dir']) + # nss_create_new_database(slapd['cert_dir']) + + # Create dse.ldif with a temporary root password. + # The template is in slapd['data_dir']/dirsrv/data/template-dse.ldif + # Variables are done with %KEY%. + # You could cheat and read it in, do a replace of % to { and } then use format? + if self.verbose: + self.log.info("ACTION: Creating dse.ldif") + dse = "" + with open(os.path.join(slapd['data_dir'], 'dirsrv', 'data', 'template-dse.ldif')) as template_dse: + for line in template_dse.readlines(): + dse += line.replace('%', '{', 1).replace('%', '}', 1) + + with open(os.path.join(slapd['config_dir'], 'dse.ldif'), 'w') as file_dse: + file_dse.write(dse.format( + schema_dir=slapd['schema_dir'], + lock_dir=slapd['lock_dir'], + tmp_dir=slapd['tmp_dir'], + cert_dir=slapd['cert_dir'], + ldif_dir=slapd['ldif_dir'], + bak_dir=slapd['backup_dir'], + run_dir=slapd['run_dir'], + inst_dir="", + log_dir=slapd['log_dir'], + fqdn=general['full_machine_name'], + ds_port=slapd['port'], + ds_user=slapd['user'], + rootdn=slapd['root_dn'], + # ds_passwd=slapd['root_password'], + ds_passwd=self._secure_password, # We set our own password here, so we can connect and mod. + ds_suffix='', + config_dir=slapd['config_dir'], + db_dir=slapd['db_dir'], + )) + + # open the connection to the instance. + + # Should I move this import? I think this prevents some recursion + from lib389 import DirSrv + ds_instance = DirSrv(self.verbose) + args = { + SER_PORT: slapd['port'], + SER_SERVERID_PROP: slapd['instance_name'], + SER_ROOT_DN: slapd['root_dn'], + SER_ROOT_PW: self._raw_secure_password, + SER_DEPLOYED_DIR: slapd['prefix'] + } + + ds_instance.allocate(args) + # Does this work? + assert(ds_instance.exists()) + # Start the server + ds_instance.start(timeout=60) + ds_instance.open() + + # Create the backends as listed + # Load example data if needed. + for backend in backends: + ds_instance.backends.create(properties=backend) + + # Make changes using the temp root + # Change the root password finally + + # Complete. + ds_instance.config.set('nsslapd-rootpw', + ensure_str(slapd['root_password'])) + + def _remove_ds(self): + """ + The opposite of install: Removes an instance from the system. + This takes a backup of all relevant data, and removes the paths. + """ + # This probably actually would need to be able to read the ldif, to + # know what to remove ... + for path in ('backup_dir', 'cert_dir', 'config_dir', 'db_dir', + 'ldif_dir', 'lock_dir', 'log_dir', 'run_dir'): + print(path) diff --git a/src/lib389/lib389/passwd.py b/src/lib389/lib389/passwd.py index fa6a5b5d0..58d6637d8 100644 --- a/src/lib389/lib389/passwd.py +++ b/src/lib389/lib389/passwd.py @@ -32,10 +32,10 @@ PWSCHEMES = [ # How do we feed our prefix into this? -def password_hash(pw, scheme=BESTSCHEME, prefix='/'): +def password_hash(pw, scheme=BESTSCHEME, bin_dir='/bin'): # Check that the binary exists assert(scheme in PWSCHEMES) - pwdhashbin = os.path.join(prefix, 'bin', 'pwdhash-bin') + pwdhashbin = os.path.join(bin_dir, 'pwdhash-bin') assert(os.path.isfile(pwdhashbin)) h = subprocess.check_output([pwdhashbin, '-s', scheme, pw]).strip() return h.decode('utf-8') diff --git a/src/lib389/lib389/tests/cli/__init__.py b/src/lib389/lib389/tests/cli/__init__.py new file mode 100644 index 000000000..d57ac3325 --- /dev/null +++ b/src/lib389/lib389/tests/cli/__init__.py @@ -0,0 +1,8 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- + diff --git a/src/lib389/lib389/tests/cli/adm_instance.py b/src/lib389/lib389/tests/cli/adm_instance.py new file mode 100644 index 000000000..6de4aa788 --- /dev/null +++ b/src/lib389/lib389/tests/cli/adm_instance.py @@ -0,0 +1,31 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- + +# Test the cli tools from the dsadm command for correct behaviour. + +import pytest +from lib389.cli_adm.instance import instance_list +from lib389 import DirSrv +from lib389.cli_base import LogCapture + + + +def test_instance_list(): + lc = LogCapture() + inst = DirSrv() + instance_list(inst, lc.log, None) + assert(lc.contains("No instances of Directory Server") or lc.contains("instance: ")) + # Now assert the logs in the capture. + +# Need a fixture to install an instance. + +# Test start + +# Test stop + + diff --git a/src/lib389/lib389/tests/cli/conf_backend.py b/src/lib389/lib389/tests/cli/conf_backend.py new file mode 100644 index 000000000..33f9b28ac --- /dev/null +++ b/src/lib389/lib389/tests/cli/conf_backend.py @@ -0,0 +1,114 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- + +import pytest + +from lib389.cli_conf.backend import backend_list, backend_get, backend_get_dn, backend_create, backend_delete +from lib389 import DirSrv +from lib389.cli_base import LogCapture, FakeArgs + +from lib389.instance.setup import SetupDs +from lib389.instance.options import General2Base, Slapd2Base +from lib389._constants import * + +INSTANCE_PORT = 54321 +INSTANCE_SERVERID = 'standalone' + +DEBUGGING = True + +class TopologyInstance(object): + def __init__(self, standalone, logcap): + # For these tests, we don't want to open the instance. + # instance.open() + self.standalone = standalone + self.logcap = logcap + +# Need a teardown to destroy the instance. [email protected] +def topology(request): + + lc = LogCapture() + instance = DirSrv(verbose=DEBUGGING) + instance.log.debug("Instance allocated") + args = {SER_PORT: INSTANCE_PORT, + SER_SERVERID_PROP: INSTANCE_SERVERID} + instance.allocate(args) + if instance.exists(): + instance.delete() + + # This will need to change to instance.create in the future + # when it's linked up! + sds = SetupDs(verbose=DEBUGGING, dryrun=False, log=lc.log) + + # Get the dicts from Type2Base, as though they were from _validate_ds_2_config + # IE get the defaults back just from Slapd2Base.collect + # Override instance name, root password, port and secure port. + + general_options = General2Base(lc.log) + general_options.verify() + general = general_options.collect() + + # Need an args -> options2 ... + slapd_options = Slapd2Base(lc.log) + slapd_options.set('instance_name', INSTANCE_SERVERID) + slapd_options.set('port', INSTANCE_PORT) + slapd_options.set('root_password', PW_DM) + slapd_options.verify() + slapd = slapd_options.collect() + + sds.create_from_args(general, slapd, {}, None) + insts = instance.list(serverid=INSTANCE_SERVERID) + # Assert we did change the system. + assert(len(insts) == 1) + # Make sure we can connect + instance.open(connOnly=True) + + def fin(): + if instance.exists() and not DEBUGGING: + instance.delete() + request.addfinalizer(fin) + + return TopologyInstance(instance, lc) + + +def test_backend_cli(topology): + # + args = FakeArgs() + backend_list(topology.standalone, None, topology.logcap.log, None) + # Assert none. + assert(topology.logcap.contains("No objects to display")) + topology.logcap.flush() + # Add a backend + # We need to fake the args + args.extra = ['dc=example,dc=com', 'userRoot'] + backend_create(topology.standalone, None, topology.logcap.log, args) + # Assert one. + backend_list(topology.standalone, None, topology.logcap.log, None) + # Assert none. + assert(topology.logcap.contains("userRoot")) + topology.logcap.flush() + # Assert we can get by name, suffix, dn + args.selector = 'userRoot' + backend_get(topology.standalone, None, topology.logcap.log, args) + # Assert none. + assert(topology.logcap.contains("userRoot")) + topology.logcap.flush() + # Assert we can get by name, suffix, dn + args.dn = 'cn=userRoot,cn=ldbm database,cn=plugins,cn=config' + backend_get_dn(topology.standalone, None, topology.logcap.log, args) + # Assert none. + assert(topology.logcap.contains("userRoot")) + topology.logcap.flush() + # delete it + backend_delete(topology.standalone, None, topology.logcap.log, args, warn=False) + backend_list(topology.standalone, None, topology.logcap.log, None) + # Assert none. + assert(topology.logcap.contains("No objects to display")) + topology.logcap.flush() + # Done! + diff --git a/src/lib389/lib389/tests/instance/__init__.py b/src/lib389/lib389/tests/instance/__init__.py new file mode 100644 index 000000000..d57ac3325 --- /dev/null +++ b/src/lib389/lib389/tests/instance/__init__.py @@ -0,0 +1,8 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- + diff --git a/src/lib389/lib389/tests/instance/setup_test.py b/src/lib389/lib389/tests/instance/setup_test.py new file mode 100644 index 000000000..5b19b4c98 --- /dev/null +++ b/src/lib389/lib389/tests/instance/setup_test.py @@ -0,0 +1,114 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- + + +import pytest +from lib389 import DirSrv +from lib389.cli_base import LogCapture +from lib389.instance.setup import SetupDs +from lib389.instance.options import General2Base, Slapd2Base +from lib389._constants import * + +import tempfile + +INSTANCE_PORT = 54321 +INSTANCE_SERVERID = 'standalone' + +DEBUGGING = True + +class TopologyInstance(object): + def __init__(self, standalone): + # For these tests, we don't want to open the instance. + # instance.open() + self.standalone = standalone + +# Need a teardown to destroy the instance. [email protected] +def topology(request): + + instance = DirSrv(verbose=DEBUGGING) + instance.log.debug("Instance allocated") + args = {SER_PORT: INSTANCE_PORT, + SER_SERVERID_PROP: INSTANCE_SERVERID} + instance.allocate(args) + if instance.exists(): + instance.delete() + + def fin(): + if instance.exists() and not DEBUGGING: + instance.delete() + request.addfinalizer(fin) + + return TopologyInstance(instance) + +def test_setup_ds_minimal_dry(topology): + # Create the setupDs + lc = LogCapture() + # Give it the right types. + sds = SetupDs(verbose=DEBUGGING, dryrun=True, log=lc.log) + + # Get the dicts from Type2Base, as though they were from _validate_ds_2_config + # IE get the defaults back just from Slapd2Base.collect + # Override instance name, root password, port and secure port. + + general_options = General2Base(lc.log) + general_options.verify() + general = general_options.collect() + + slapd_options = Slapd2Base(lc.log) + slapd_options.set('instance_name', INSTANCE_SERVERID) + slapd_options.set('port', INSTANCE_PORT) + slapd_options.set('root_password', PW_DM) + slapd_options.verify() + slapd = slapd_options.collect() + + sds.create_from_args(general, slapd, {}, None) + + insts = topology.standalone.list(serverid=INSTANCE_SERVERID) + # Assert we did not change the system. + assert(len(insts) == 0) + +def test_setup_ds_minimal(topology): + # Create the setupDs + lc = LogCapture() + # Give it the right types. + sds = SetupDs(verbose=DEBUGGING, dryrun=False, log=lc.log) + + # Get the dicts from Type2Base, as though they were from _validate_ds_2_config + # IE get the defaults back just from Slapd2Base.collect + # Override instance name, root password, port and secure port. + + general_options = General2Base(lc.log) + general_options.verify() + general = general_options.collect() + + slapd_options = Slapd2Base(lc.log) + slapd_options.set('instance_name', INSTANCE_SERVERID) + slapd_options.set('port', INSTANCE_PORT) + slapd_options.set('root_password', PW_DM) + slapd_options.verify() + slapd = slapd_options.collect() + + sds.create_from_args(general, slapd, {}, None) + insts = topology.standalone.list(serverid=INSTANCE_SERVERID) + # Assert we did change the system. + assert(len(insts) == 1) + # Make sure we can connect + topology.standalone.open() + # Make sure we can start stop. + topology.standalone.stop() + topology.standalone.start() + + +def test_setup_ds_inf_minimal(topology): + # Write a template inf + # Check it? + # Setup the server + + pass + diff --git a/src/lib389/lib389/tools.py b/src/lib389/lib389/tools.py index 2d92bc7f0..58ab4caa0 100644 --- a/src/lib389/lib389/tools.py +++ b/src/lib389/lib389/tools.py @@ -120,21 +120,6 @@ def runCmd(cmd, timeout_sec): timer.cancel() return proc.returncode -# Could be nicer if we did _get_config_fallback_<type>? -def _get_config_fallback(config, group, attr, value, boolean=False, num=False): - try: - if boolean: - return config.getboolean(group, attr) - elif num: - return config.getint(group, attr) - else: - return config.get(group, attr) - except ValueError: - return value - except configparser.NoOptionError: - log.info("%s not specified:setting to default - %s" % (attr, value)) - return value - class DirSrvTools(object): """DirSrv mix-in.""" @@ -1192,406 +1177,3 @@ class MockDirSrv(object): return 'ldaps://%s:%s' % (self.host, self.sslport) else: return 'ldap://%s:%s' % (self.host, self.port) - - -class SetupDs(object): - """ - Implements the Directory Server installer. - - This maybe subclassed, and a number of well known steps will be called. - This allows the inf to be shared, and for other installers to work in lock - step with this. - - If you are subclassing you want to derive: - - _validate_config_2(self, config): - _prepare(self, extra): - _install(self, extra): - - If you are calling this from an INF, you can pass the config in - _validate_config, then stash the result into self.extra - - If you have anything you need passed to your install helpers, this can - be given in create_from_args(extra) if you are calling as an api. - - If you use create_from_inf, self.extra is passed to create_from_args for - you. You only need to over-load the three methods above. - - A logging interface is provided to self.log that you should call. - """ - - def __init__(self, verbose=False, dryrun=False): - self.verbose = verbose - self.extra = None - self.dryrun = dryrun - # Expose the logger to our children. - self.log = log - if self.verbose: - log.info('Running setup with verbose') - - def _validate_config_2(self, config): - pass - - def _prepare(self, extra): - pass - - def _install(self, extra): - pass - - def _validate_ds_2_config(self, config): - assert config.has_section('slapd') - # Extract them in a way that create can understand. - general = {} - general['config_version'] = config.getint('general', 'config_version') - general['full_machine_name'] = config.get('general', 'full_machine_name') - general['strict_host_checking'] = _get_config_fallback(config, 'general', 'strict_host_checking', True, boolean=True) - # Change this to detect if SELinux is running - general['selinux'] = _get_config_fallback(config, 'general', 'selinux', False, boolean=True) - - if self.verbose: - log.info("Configuration general %s" % general) - - # Validate that we are a config_version=2 - assert general['config_version'] >= 2 - - slapd = {} - # Can probably set these defaults out of somewhere else ... - slapd['instance_name'] = config.get('slapd', 'instance_name') - slapd['user'] = _get_config_fallback(config, 'slapd', 'user', 'dirsrv') - slapd['group'] = _get_config_fallback(config, 'slapd', 'group', 'dirsrv') - slapd['root_dn'] = _get_config_fallback(config, 'slapd', 'root_dn', 'cn=Directory Manager') - slapd['root_password'] = config.get('slapd', 'root_password') - slapd['prefix'] = _get_config_fallback(config, 'slapd', 'prefix', '/') - - # How do we default, defaults to the DS version. - slapd['defaults'] = _get_config_fallback(config, 'slapd', 'defaults', None) - slapd['port'] = _get_config_fallback(config, 'slapd', 'port', 389, num=True) - slapd['secure_port'] = _get_config_fallback(config, 'slapd', 'secure_port', 636, num=True) - - # These are all the paths for DS, that are RELATIVE to the prefix - # This will need to change to cope with configure scripts from DS! - # perhaps these should be read as a set of DEFAULTs from a config file? - slapd['bin_dir'] = _get_config_fallback(config, 'slapd', 'bin_dir', os.path.join(slapd['prefix'], 'bin')) - slapd['sysconf_dir'] = _get_config_fallback(config, 'slapd', 'sysconf_dir', os.path.join(slapd['prefix'], 'etc')) - slapd['data_dir'] = _get_config_fallback(config, 'slapd', 'data_dir', os.path.join(slapd['prefix'], 'share')) - slapd['local_state_dir'] = _get_config_fallback(config, 'slapd', 'local_state_dir', os.path.join(slapd['prefix'], 'var')) - - slapd['lib_dir'] = _get_config_fallback(config, 'slapd', 'lib_dir', os.path.join(slapd['prefix'], 'usr/lib64/dirsrv')) - slapd['cert_dir'] = _get_config_fallback(config, 'slapd', 'cert_dir', os.path.join(slapd['prefix'], 'etc/dirsrv/slapd-%s' % slapd['instance_name'])) - slapd['config_dir'] = _get_config_fallback(config, 'slapd', 'config_dir', os.path.join(slapd['prefix'], 'etc/dirsrv/slapd-%s' % slapd['instance_name'])) - - slapd['inst_dir'] = _get_config_fallback(config, 'slapd', 'inst_dir', os.path.join(slapd['prefix'], 'var/lib/dirsrv/slapd-%s' % slapd['instance_name'])) - slapd['backup_dir'] = _get_config_fallback(config, 'slapd', 'backup_dir', os.path.join(slapd['inst_dir'], 'bak')) - slapd['db_dir'] = _get_config_fallback(config, 'slapd', 'db_dir', os.path.join(slapd['inst_dir'], 'db')) - slapd['ldif_dir'] = _get_config_fallback(config, 'slapd', 'ldif_dir', os.path.join(slapd['inst_dir'], 'ldif')) - - slapd['lock_dir'] = _get_config_fallback(config, 'slapd', 'lock_dir', os.path.join(slapd['prefix'], 'var/lock/dirsrv/slapd-%s' % slapd['instance_name'])) - slapd['log_dir'] = _get_config_fallback(config, 'slapd', 'log_dir', os.path.join(slapd['prefix'], 'var/log/dirsrv/slapd-%s' % slapd['instance_name'])) - slapd['run_dir'] = _get_config_fallback(config, 'slapd', 'run_dir', os.path.join(slapd['prefix'], 'var/run/dirsrv')) - slapd['sbin_dir'] = _get_config_fallback(config, 'slapd', 'sbin_dir', os.path.join(slapd['prefix'], 'sbin')) - slapd['schema_dir'] = _get_config_fallback(config, 'slapd', 'schema_dir', os.path.join(slapd['prefix'], 'etc/dirsrv/slapd-%s' % slapd['instance_name'], 'schema')) - slapd['tmp_dir'] = _get_config_fallback(config, 'slapd', 'tmp_dir', '/tmp') - - # Need to add all the default filesystem paths. - - if self.verbose: - log.info("Configuration slapd %s" % slapd) - - backends = [] - for section in config.sections(): - if section.startswith('backend-'): - be = {} - # TODO: Add the other BACKEND_ types - be[BACKEND_NAME] = section.replace('backend-', '') - be[BACKEND_SUFFIX] = config.get(section, 'suffix') - be[BACKEND_SAMPLE_ENTRIES] = config.getboolean(section, 'sample_entries') - backends.append(be) - - if self.verbose: - log.info("Configuration backends %s" % backends) - - return (general, slapd, backends) - - def _validate_ds_config(self, config): - # This will move to lib389 later. - # Check we have all the sections. - # Make sure we have needed keys. - assert(config.has_section('general')) - assert(config.has_option('general', 'config_version')) - assert(config.get('general', 'config_version') >= '2') - if config.get('general', 'config_version') == '2': - # Call our child api to validate itself from the inf. - self._validate_config_2(config) - return self._validate_ds_2_config(config) - else: - log.info("Failed to validate configuration version.") - assert(False) - - def create_from_inf(self, inf_path): - """ - Will trigger a create from the settings stored in inf_path - """ - # Get the inf file - if self.verbose: - log.info("Using inf from %s" % inf_path) - if not os.path.isfile(inf_path): - log.error("%s is not a valid file path" % inf_path) - return False - config = None - try: - config = configparser.SafeConfigParser() - config.read([inf_path]) - except Exception as e: - log.error("Exception %s occured" % e) - return False - - if self.verbose: - log.info("Configuration %s" % config.sections()) - - (general, slapd, backends) = self._validate_ds_config(config) - - # Actually do the setup now. - self.create_from_args(general, slapd, backends, self.extra) - - return True - - def _prepare_ds(self, general, slapd, backends): - # Validate our arguments. - assert(slapd['user'] is not None) - # check the user exists - assert(pwd.getpwnam(slapd['user'])) - slapd['user_uid'] = pwd.getpwnam(slapd['user']).pw_uid - assert(slapd['group'] is not None) - assert(grp.getgrnam(slapd['group'])) - slapd['group_gid'] = grp.getgrnam(slapd['group']).gr_gid - # check this group exists - # Check that we are running as this user / group, or that we are root. - assert(os.geteuid() == 0 or getpass.getuser() == slapd['user']) - - if self.verbose: - log.info("PASSED: user / group checking") - - assert(general['full_machine_name'] is not None) - assert(general['strict_host_checking'] is not None) - if general['strict_host_checking'] is True: - # Check it resolves with dns - assert(socket.gethostbyname(general['full_machine_name'])) - if self.verbose: - log.info("PASSED: Hostname strict checking") - - assert(slapd['prefix'] is not None) - assert(os.path.exists(slapd['prefix'])) - if self.verbose: - log.info("PASSED: prefix checking") - - # We need to know the prefix before we can do the instance checks - assert(slapd['instance_name'] is not None) - # Check if the instance exists or not. - # Should I move this import? I think this prevents some recursion - from lib389 import DirSrv - ds = DirSrv(verbose=self.verbose) - ds.prefix = slapd['prefix'] - insts = ds.list(serverid=slapd['instance_name']) - assert(len(insts) == 0) - - if self.verbose: - log.info("PASSED: instance checking") - - assert(slapd['root_dn'] is not None) - # Assert this is a valid DN - assert(is_a_dn(slapd['root_dn'])) - assert(slapd['root_password'] is not None) - # Check if pre-hashed or not. - # !!!!!!!!!!!!!! - - # Right now, the way that rootpw works on ns-slapd works, it force hashes the pw - # see https://fedorahosted.org/389/ticket/48859 - if not re.match('^\{[A-Z0-9]+\}.*$', slapd['root_password']): - # We need to hash it. Call pwdhash-bin. - # slapd['root_password'] = password_hash(slapd['root_password'], prefix=slapd['prefix']) - pass - else: - pass - - # Create a random string - # Hash it. - # This will be our temporary rootdn password so that we can do - # live mods and setup rather than static ldif manipulations. - self._raw_secure_password = password_generate() - self._secure_password = password_hash(self._raw_secure_password, prefix=slapd['prefix']) - - if self.verbose: - log.info("PASSED: root user checking") - - assert(slapd['defaults'] is not None) - if self.verbose: - log.info("PASSED: using config settings %s" % slapd['defaults']) - - assert(slapd['port'] is not None) - assert(socket_check_open('::1', slapd['port']) is False) - assert(slapd['secure_port'] is not None) - assert(socket_check_open('::1', slapd['secure_port']) is False) - if self.verbose: - log.info("PASSED: network avaliability checking") - - # Make assertions of the paths? - - # Make assertions of the backends? - - def create_from_args(self, general, slapd, backends=[], extra=None): - """ - Actually does the setup. this is what you want to call as an api. - """ - # Check we have privs to run - - if self.verbose: - log.info("READY: preparing installation") - self._prepare_ds(general, slapd, backends) - # Call our child api to prepare itself. - self._prepare(extra) - - if self.verbose: - log.info("READY: beginning installation") - - if self.dryrun: - log.info("NOOP: dry run requested") - else: - # Actually trigger the installation. - self._install_ds(general, slapd, backends) - # Call the child api to do anything it needs. - self._install(extra) - if self.verbose: - log.info("Directory Server is brought to you by the letter R and the number 27.") - log.info("FINISH: completed installation") - - def _install_ds(self, general, slapd, backends): - """ - Actually install the Ds from the dicts provided. - - You should never call this directly, as it bypasses assertions. - """ - # register the instance to /etc/sysconfig - # We do this first so that we can trick remove-ds.pl if needed. - # There may be a way to create this from template like the dse.ldif ... - initconfig = "" - with open("%s/dirsrv/config/template-initconfig" % slapd['sysconf_dir']) as template_init: - for line in template_init.readlines(): - initconfig += line.replace('{{', '{', 1).replace('}}', '}', 1).replace('-', '_') - with open("%s/sysconfig/dirsrv-%s" % (slapd['sysconf_dir'], slapd['instance_name']), 'w') as f: - f.write(initconfig.format( - SERVER_DIR=slapd['lib_dir'], - SERVERBIN_DIR=slapd['sbin_dir'], - CONFIG_DIR=slapd['config_dir'], - INST_DIR=slapd['inst_dir'], - RUN_DIR=slapd['run_dir'], - DS_ROOT='', - PRODUCT_NAME='slapd', - )) - - # Create all the needed paths - # we should only need to make bak_dir, cert_dir, config_dir, db_dir, ldif_dir, lock_dir, log_dir, run_dir? schema_dir, - for path in ('backup_dir', 'cert_dir', 'config_dir', 'db_dir', 'ldif_dir', 'lock_dir', 'log_dir', 'run_dir'): - if self.verbose: - log.info("ACTION: creating %s" % slapd[path]) - try: - os.makedirs(slapd[path], mode=0o770) - except OSError: - pass - os.chown(slapd[path], slapd['user_uid'], slapd['group_gid']) - - # Copy correct data to the paths. - # Copy in the schema - # This is a little fragile, make it better. - shutil.copytree(os.path.join(slapd['sysconf_dir'], 'dirsrv/schema'), slapd['schema_dir']) - os.chown(slapd['schema_dir'], slapd['user_uid'], slapd['group_gid']) - - # Copy in the collation - srcfile = os.path.join(slapd['sysconf_dir'], 'dirsrv/config/slapd-collations.conf') - dstfile = os.path.join(slapd['config_dir'], 'slapd-collations.conf') - shutil.copy2(srcfile, dstfile) - os.chown(slapd['schema_dir'], slapd['user_uid'], slapd['group_gid']) - - # Selinux fixups? - # Restorecon of paths? - # Bind sockets to our type? - - # Create certdb in sysconfidir - if self.verbose: - log.info("ACTION: Creating certificate database is %s" % slapd['cert_dir']) - # nss_create_new_database(slapd['cert_dir']) - - # Create dse.ldif with a temporary root password. - # The template is in slapd['data_dir']/dirsrv/data/template-dse.ldif - # Variables are done with %KEY%. - # You could cheat and read it in, do a replace of % to { and } then use format? - if self.verbose: - log.info("ACTION: Creating dse.ldif") - dse = "" - with open(os.path.join(slapd['data_dir'], 'dirsrv', 'data', 'template-dse.ldif')) as template_dse: - for line in template_dse.readlines(): - dse += line.replace('%', '{', 1).replace('%', '}', 1) - - with open(os.path.join(slapd['config_dir'], 'dse.ldif'), 'w') as file_dse: - file_dse.write(dse.format( - schema_dir=slapd['schema_dir'], - lock_dir=slapd['lock_dir'], - tmp_dir=slapd['tmp_dir'], - cert_dir=slapd['cert_dir'], - ldif_dir=slapd['ldif_dir'], - bak_dir=slapd['backup_dir'], - run_dir=slapd['run_dir'], - inst_dir="", - log_dir=slapd['log_dir'], - fqdn=general['full_machine_name'], - ds_port=slapd['port'], - ds_user=slapd['user'], - rootdn=slapd['root_dn'], - # ds_passwd=slapd['root_password'], - ds_passwd=self._secure_password, # We set our own password here, so we can connect and mod. - ds_suffix='', - config_dir=slapd['config_dir'], - db_dir=slapd['db_dir'], - )) - - # open the connection to the instance. - - # Should I move this import? I think this prevents some recursion - from lib389 import DirSrv - ds_instance = DirSrv(self.verbose) - args = { - SER_PORT: slapd['port'], - SER_SERVERID_PROP: slapd['instance_name'], - SER_ROOT_DN: slapd['root_dn'], - SER_ROOT_PW: self._raw_secure_password, - SER_DEPLOYED_DIR: slapd['prefix'] - } - - ds_instance.allocate(args) - # Does this work? - assert(ds_instance.exists()) - # Start the server - ds_instance.start(timeout=60) - ds_instance.open() - - # Create the backends as listed - # Load example data if needed. - for backend in backends: - ds_instance.backends.create(properties=backend) - - # Make changes using the temp root - # Change the root password finally - - # Complete. - ds_instance.config.set('nsslapd-rootpw', - ensure_str(slapd['root_password'])) - - def _remove_ds(self): - """ - The opposite of install: Removes an instance from the system. - This takes a backup of all relevant data, and removes the paths. - """ - # This probably actually would need to be able to read the ldif, to - # know what to remove ... - for path in ('backup_dir', 'cert_dir', 'config_dir', 'db_dir', - 'ldif_dir', 'lock_dir', 'log_dir', 'run_dir'): - print(path) diff --git a/src/lib389/setup.py b/src/lib389/setup.py index ae656f3f6..0582b250f 100644 --- a/src/lib389/setup.py +++ b/src/lib389/setup.py @@ -53,21 +53,8 @@ setup( data_files=[ ('/usr/sbin/', [ # 'lib389/clitools/ds_setup', - 'lib389/clitools/ds_aci_lint', - 'lib389/clitools/ds_list_instances', - 'lib389/clitools/ds_monitor_backend', - 'lib389/clitools/ds_monitor_server', - 'lib389/clitools/ds_schema_attributetype_list', - 'lib389/clitools/ds_schema_attributetype_query', - 'lib389/clitools/ds_start', - 'lib389/clitools/ds_stop', - 'lib389/clitools/ds_krb_create_keytab', - 'lib389/clitools/ds_krb_create_principal', - 'lib389/clitools/ds_krb_create_realm', - 'lib389/clitools/ds_krb_destroy_realm', - 'lib389/clitools/ds_backend_getattr', - 'lib389/clitools/ds_backend_setattr', - 'lib389/clitools/ds_backend_list', + 'cli/dsadm', + 'cli/dsconf', ]), ],
0
d8e8119e92c6d872e317bbae5d59a08d80cb0966
389ds/389-ds-base
Ticket 47963 - skip nested groups breaks memberof fixup task Bug Description: Setting memberofskipnested to "on" breaks memberOf fixup task. Fix Description: We never want to skip nested group checking when performing a fixup task. Add a flag to distinguish that the fixup operation is a task, and not a delete. https://fedorahosted.org/389/ticket/47963 Reviewed by: nkinder(Thanks!)
commit d8e8119e92c6d872e317bbae5d59a08d80cb0966 Author: Mark Reynolds <[email protected]> Date: Wed Nov 26 16:57:05 2014 -0500 Ticket 47963 - skip nested groups breaks memberof fixup task Bug Description: Setting memberofskipnested to "on" breaks memberOf fixup task. Fix Description: We never want to skip nested group checking when performing a fixup task. Add a flag to distinguish that the fixup operation is a task, and not a delete. https://fedorahosted.org/389/ticket/47963 Reviewed by: nkinder(Thanks!) diff --git a/ldap/servers/plugins/memberof/memberof.c b/ldap/servers/plugins/memberof/memberof.c index 118d2322c..a59494159 100644 --- a/ldap/servers/plugins/memberof/memberof.c +++ b/ldap/servers/plugins/memberof/memberof.c @@ -2631,6 +2631,9 @@ void memberof_fixup_task_thread(void *arg) memberof_copy_config(&configCopy, memberof_get_config()); memberof_unlock_config(); + /* Mark this as a task operation */ + configCopy.fixup_task = 1; + if (usetxn) { Slapi_DN *sdn = slapi_sdn_new_dn_byref(td->dn); Slapi_Backend *be = slapi_be_select(sdn); @@ -2816,7 +2819,7 @@ int memberof_fix_memberof_callback(Slapi_Entry *e, void *callback_data) memberof_del_dn_data del_data = {0, config->memberof_attr}; Slapi_ValueSet *groups = 0; - if(!config->skip_nested){ + if(!config->skip_nested || config->fixup_task){ /* get a list of all of the groups this user belongs to */ groups = memberof_get_groups(config, sdn); } diff --git a/ldap/servers/plugins/memberof/memberof.h b/ldap/servers/plugins/memberof/memberof.h index 93f031b71..67fb31043 100644 --- a/ldap/servers/plugins/memberof/memberof.h +++ b/ldap/servers/plugins/memberof/memberof.h @@ -86,6 +86,7 @@ typedef struct memberofconfig { Slapi_Filter *group_filter; Slapi_Attr **group_slapiattrs; int skip_nested; + int fixup_task; } MemberOfConfig;
0
c3389a46c584fa39b2278a295f8b2b6dad726d31
389ds/389-ds-base
Ticket #47947 - start dirsrv after chrony on RHEL7 and Fedora Description: After=chronyd.service to systemd.template.service.in, which is installed as this file: /usr/lib/systemd/system/[email protected] and pointed by each server's service file: /etc/systemd/system/dirsrv.target.wants/[email protected] https://fedorahosted.org/389/ticket/47947 Reviewed by [email protected] (Thank you, Rich!!)
commit c3389a46c584fa39b2278a295f8b2b6dad726d31 Author: Noriko Hosoi <[email protected]> Date: Tue Dec 16 13:42:56 2014 -0800 Ticket #47947 - start dirsrv after chrony on RHEL7 and Fedora Description: After=chronyd.service to systemd.template.service.in, which is installed as this file: /usr/lib/systemd/system/[email protected] and pointed by each server's service file: /etc/systemd/system/dirsrv.target.wants/[email protected] https://fedorahosted.org/389/ticket/47947 Reviewed by [email protected] (Thank you, Rich!!) diff --git a/wrappers/systemd.template.service.in b/wrappers/systemd.template.service.in index 42c7c2395..9432cb01e 100644 --- a/wrappers/systemd.template.service.in +++ b/wrappers/systemd.template.service.in @@ -15,6 +15,7 @@ [Unit] Description=@capbrand@ Directory Server %i. PartOf=@systemdgroupname@ +After=chronyd.service [Service] Type=forking
0
d523c4c8c2eeb426247e42773f31dfa4420e79d6
389ds/389-ds-base
Issue 5120 - Fix compilation error Bug Description: Compilation fails with `-Wunused-function`: ``` ldap/servers/slapd/main.c:290:1: warning: ‘referral_set_defaults’ defined but not used [-Wunused-function] 290 | referral_set_defaults(void) | ^~~~~~~~~~~~~~~~~~~~~ make: *** [Makefile:4148: all] Error 2 ``` Fix Description: Remove unused function `referral_set_defaults`. Fixes: https://github.com/389ds/389-ds-base/issues/5120 Reviewed by: @progier389 (Thanks!)
commit d523c4c8c2eeb426247e42773f31dfa4420e79d6 Author: Viktor Ashirov <[email protected]> Date: Mon Aug 11 13:22:52 2025 +0200 Issue 5120 - Fix compilation error Bug Description: Compilation fails with `-Wunused-function`: ``` ldap/servers/slapd/main.c:290:1: warning: ‘referral_set_defaults’ defined but not used [-Wunused-function] 290 | referral_set_defaults(void) | ^~~~~~~~~~~~~~~~~~~~~ make: *** [Makefile:4148: all] Error 2 ``` Fix Description: Remove unused function `referral_set_defaults`. Fixes: https://github.com/389ds/389-ds-base/issues/5120 Reviewed by: @progier389 (Thanks!) diff --git a/ldap/servers/slapd/main.c b/ldap/servers/slapd/main.c index 9d81d80f3..c370588e5 100644 --- a/ldap/servers/slapd/main.c +++ b/ldap/servers/slapd/main.c @@ -285,14 +285,6 @@ main_setuid(char *username) return 0; } -/* set good defaults for front-end config in referral mode */ -static void -referral_set_defaults(void) -{ - char errorbuf[SLAPI_DSE_RETURNTEXT_SIZE]; - config_set_maxdescriptors(CONFIG_MAXDESCRIPTORS_ATTRIBUTE, "1024", errorbuf, 1); -} - static int name2exemode(char *progname, char *s, int exit_if_unknown) {
0
fa9db453fba0104250e088a721faf39751fead32
389ds/389-ds-base
Bug(s) fixed: 165827 Bug Description: Daily Acceptance: Directory Install failed to register Directory server as a Red Hat server (81) Reviewed by: Nathan (Thanks!) Fix Description: The index code, in the replace case, was not checking to see if there were actually any values to delete before attempting to delete them. This fix just checks to see if there are any values to delete. Platforms tested: RHEL3 Flag Day: no Doc impact: no QA impact: should be covered by regular nightly and manual testing with the new indexing tests New Tests integrated into TET: none
commit fa9db453fba0104250e088a721faf39751fead32 Author: Rich Megginson <[email protected]> Date: Fri Aug 12 19:07:37 2005 +0000 Bug(s) fixed: 165827 Bug Description: Daily Acceptance: Directory Install failed to register Directory server as a Red Hat server (81) Reviewed by: Nathan (Thanks!) Fix Description: The index code, in the replace case, was not checking to see if there were actually any values to delete before attempting to delete them. This fix just checks to see if there are any values to delete. Platforms tested: RHEL3 Flag Day: no Doc impact: no QA impact: should be covered by regular nightly and manual testing with the new indexing tests New Tests integrated into TET: none diff --git a/ldap/servers/slapd/back-ldbm/index.c b/ldap/servers/slapd/back-ldbm/index.c index 6e31042d8..b56370322 100644 --- a/ldap/servers/slapd/back-ldbm/index.c +++ b/ldap/servers/slapd/back-ldbm/index.c @@ -550,10 +550,12 @@ index_add_mods( } /* We need to first remove the old values from the - * index. */ + * index, if any. */ + if (deleted_valueArray) { index_addordel_values_sv( be, mods[i]->mod_type, deleted_valueArray, evals, id, flags, txn ); + } /* Free valuearray */ slapi_valueset_free(mod_vals);
0
136fa64f95263ffcaca5e855b771b5d377e3de52
389ds/389-ds-base
Ticket #346 - Slow ldapmodify operation time for large quantities of multi-valued attribute values Description: slapi_entry_add_value is used to add values for sorting, which did not pass the attribute syntax info and the fallback compare algorithm was used. It sometimes different from the attribute syntax based sorting and failed to find out an attribute. This patch passes an attribute syntax info for sorting. Plus, featuring slapi_berval_cmp for the fallback compare algorithm. It is closer to the syntax based sorting. https://fedorahosted.org/389/ticket/346 Reviewed by [email protected] (Thank you, Rich!)
commit 136fa64f95263ffcaca5e855b771b5d377e3de52 Author: Noriko Hosoi <[email protected]> Date: Wed Apr 16 15:04:36 2014 -0700 Ticket #346 - Slow ldapmodify operation time for large quantities of multi-valued attribute values Description: slapi_entry_add_value is used to add values for sorting, which did not pass the attribute syntax info and the fallback compare algorithm was used. It sometimes different from the attribute syntax based sorting and failed to find out an attribute. This patch passes an attribute syntax info for sorting. Plus, featuring slapi_berval_cmp for the fallback compare algorithm. It is closer to the syntax based sorting. https://fedorahosted.org/389/ticket/346 Reviewed by [email protected] (Thank you, Rich!) diff --git a/ldap/servers/slapd/entry.c b/ldap/servers/slapd/entry.c index 72ece6601..adf7860ac 100644 --- a/ldap/servers/slapd/entry.c +++ b/ldap/servers/slapd/entry.c @@ -2818,7 +2818,7 @@ slapi_entry_add_value (Slapi_Entry *e, const char *type, const Slapi_Value *valu Slapi_Attr **a= NULL; attrlist_find_or_create(&e->e_attrs, type, &a); if(value != (Slapi_Value *) NULL) { - slapi_valueset_add_value ( &(*a)->a_present_values, value); + slapi_valueset_add_attr_value_ext(*a, &(*a)->a_present_values, (Slapi_Value *)value, 0); } return 0; } diff --git a/ldap/servers/slapd/valueset.c b/ldap/servers/slapd/valueset.c index 0740c380e..1378bd1fb 100644 --- a/ldap/servers/slapd/valueset.c +++ b/ldap/servers/slapd/valueset.c @@ -926,16 +926,10 @@ valueset_value_syntax_cmp( const Slapi_Attr *a, const Slapi_Value *v1, const Sla "slapi_attr_values2keys_sv failed for type %s\n", a->a_type, 0, 0 ); } else { - struct berval *bv1, *bv2; + const struct berval *bv1, *bv2; bv1 = &keyvals[0]->bv; bv2 = &keyvals[1]->bv; - if ( bv1->bv_len < bv2->bv_len ) { - rc = -1; - } else if ( bv1->bv_len > bv2->bv_len ) { - rc = 1; - } else { - rc = memcmp( bv1->bv_val, bv2->bv_val, bv1->bv_len ); - } + rc = slapi_berval_cmp (bv1, bv2); } if (keyvals != NULL) valuearray_free( &keyvals );
0
2a28c6e39788de486b192d93031da32b6ecac2ae
389ds/389-ds-base
Bump tokio from 1.43.0 to 1.44.2 in /src (#6732) Bumps [tokio](https://github.com/tokio-rs/tokio) from 1.43.0 to 1.44.2. - [Release notes](https://github.com/tokio-rs/tokio/releases) - [Commits](https://github.com/tokio-rs/tokio/compare/tokio-1.43.0...tokio-1.44.2) --- updated-dependencies: - dependency-name: tokio dependency-version: 1.44.2 dependency-type: indirect ... Signed-off-by: dependabot[bot] <[email protected]> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
commit 2a28c6e39788de486b192d93031da32b6ecac2ae Author: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed Apr 9 18:41:03 2025 -0700 Bump tokio from 1.43.0 to 1.44.2 in /src (#6732) Bumps [tokio](https://github.com/tokio-rs/tokio) from 1.43.0 to 1.44.2. - [Release notes](https://github.com/tokio-rs/tokio/releases) - [Commits](https://github.com/tokio-rs/tokio/compare/tokio-1.43.0...tokio-1.44.2) --- updated-dependencies: - dependency-name: tokio dependency-version: 1.44.2 dependency-type: indirect ... Signed-off-by: dependabot[bot] <[email protected]> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> diff --git a/src/Cargo.lock b/src/Cargo.lock index 2c633a486..ad0b1cb56 100644 --- a/src/Cargo.lock +++ b/src/Cargo.lock @@ -702,9 +702,9 @@ checksum = "23d434d3f8967a09480fb04132ebe0a3e088c173e6d0ee7897abbdf4eab0f8b9" [[package]] name = "tokio" -version = "1.43.0" +version = "1.44.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d61fa4ffa3de412bfea335c6ecff681de2b609ba3c77ef3e00e521813a9ed9e" +checksum = "e6b88822cbe49de4185e3a4cbf8321dd487cf5fe0c5c65695fef6346371e9c48" dependencies = [ "backtrace", "pin-project-lite",
0
363987a5c7737ab5768f25134f17e6b51fa893f1
389ds/389-ds-base
fix recent compiler warnings Reviewed by: mareynold (Thanks!) ../ds.git/ldap/servers/slapd/entry.c: In function 'slapi_entry_get_nrdn_const': ../ds.git/ldap/servers/slapd/entry.c:2193: warning: passing argument 1 of 'slapi_rdn_init_all_dn' discards qualifiers from pointer target type ../ds.git/ldap/servers/slapd/slapi-plugin.h:2873: note: expected 'struct Slapi_RDN *' but argument is of type 'const struct slapi_rdn *' ../ds.git/ldap/servers/slapd/pw.c: In function 'slapi_pwpolicy_is_expired': ../ds.git/ldap/servers/slapd/pw.c:2193: warning: unused variable 'now' ../ds.git/ldap/servers/slapd/uniqueid.c:56: warning: 'str2Byte' declared 'static' but never defined ../ds.git/lib/base/rwlock.cpp: In function 'void rwlock_Unlock(void*)': ../ds.git/lib/base/rwlock.cpp:146: warning: suggest explicit braces to avoid ambiguous 'else' ../ds.git/ldap/servers/slapd/auth.c: In function ‘slapu_search_s’: ../ds.git/ldap/servers/slapd/auth.c:85: warning: initialization discards qualifiers from pointer target type ../ds.git/ldap/servers/slapd/back-ldbm/cache.c: In function 'dump_hash': ../ds.git/ldap/servers/slapd/back-ldbm/cache.c:305: warning: suggest parentheses around assignment used as truth value ../ds.git/ldap/servers/slapd/back-ldbm/idl_common.c: In function 'idl_append_extend': ../ds.git/ldap/servers/slapd/back-ldbm/idl_common.c:144: warning: unused variable 'x'
commit 363987a5c7737ab5768f25134f17e6b51fa893f1 Author: Rich Megginson <[email protected]> Date: Wed Jan 18 13:23:30 2012 -0700 fix recent compiler warnings Reviewed by: mareynold (Thanks!) ../ds.git/ldap/servers/slapd/entry.c: In function 'slapi_entry_get_nrdn_const': ../ds.git/ldap/servers/slapd/entry.c:2193: warning: passing argument 1 of 'slapi_rdn_init_all_dn' discards qualifiers from pointer target type ../ds.git/ldap/servers/slapd/slapi-plugin.h:2873: note: expected 'struct Slapi_RDN *' but argument is of type 'const struct slapi_rdn *' ../ds.git/ldap/servers/slapd/pw.c: In function 'slapi_pwpolicy_is_expired': ../ds.git/ldap/servers/slapd/pw.c:2193: warning: unused variable 'now' ../ds.git/ldap/servers/slapd/uniqueid.c:56: warning: 'str2Byte' declared 'static' but never defined ../ds.git/lib/base/rwlock.cpp: In function 'void rwlock_Unlock(void*)': ../ds.git/lib/base/rwlock.cpp:146: warning: suggest explicit braces to avoid ambiguous 'else' ../ds.git/ldap/servers/slapd/auth.c: In function ‘slapu_search_s’: ../ds.git/ldap/servers/slapd/auth.c:85: warning: initialization discards qualifiers from pointer target type ../ds.git/ldap/servers/slapd/back-ldbm/cache.c: In function 'dump_hash': ../ds.git/ldap/servers/slapd/back-ldbm/cache.c:305: warning: suggest parentheses around assignment used as truth value ../ds.git/ldap/servers/slapd/back-ldbm/idl_common.c: In function 'idl_append_extend': ../ds.git/ldap/servers/slapd/back-ldbm/idl_common.c:144: warning: unused variable 'x' diff --git a/ldap/servers/slapd/auth.c b/ldap/servers/slapd/auth.c index 796ca7a37..0631b1558 100644 --- a/ldap/servers/slapd/auth.c +++ b/ldap/servers/slapd/auth.c @@ -82,7 +82,7 @@ slapu_search_s( LDAP* ld, const char* rawbaseDN, int scope, const char* filter, Slapi_PBlock* pb = NULL; LDAPControl **ctrls; Slapi_DN *sdn = slapi_sdn_new_dn_byval(rawbaseDN); - char *baseDN = slapi_sdn_get_dn(sdn); + const char *baseDN = slapi_sdn_get_dn(sdn); if (ld != internal_ld) { err = ldap_search_ext_s(ld, baseDN, scope, filter, attrs, attrsonly, diff --git a/ldap/servers/slapd/back-ldbm/cache.c b/ldap/servers/slapd/back-ldbm/cache.c index e2f1768c7..1ccfd29a7 100644 --- a/ldap/servers/slapd/back-ldbm/cache.c +++ b/ldap/servers/slapd/back-ldbm/cache.c @@ -302,7 +302,7 @@ dump_hash(Hashtable *ht) } PR_snprintf(p, ids_size, "%s:", ep_id); p += len + 1; ids_size -= len + 1; - } while (e = HASH_NEXT(ht, e)); + } while ((e = HASH_NEXT(ht, e))); } if (p != ep_ids) { LDAPDebug1Arg(LDAP_DEBUG_ANY, "%s\n", ep_ids); diff --git a/ldap/servers/slapd/back-ldbm/idl_common.c b/ldap/servers/slapd/back-ldbm/idl_common.c index f04f8de99..584bba565 100644 --- a/ldap/servers/slapd/back-ldbm/idl_common.c +++ b/ldap/servers/slapd/back-ldbm/idl_common.c @@ -141,7 +141,6 @@ idl_append_extend(IDList **orig_idl, ID id) } if ( idl->b_nids == idl->b_nmax ) { - size_t x = 0; /* No more room, need to extend */ /* Allocate new IDL with twice the space of this one */ IDList *idl_new = NULL; diff --git a/ldap/servers/slapd/back-ldbm/import-threads.c b/ldap/servers/slapd/back-ldbm/import-threads.c index ec8585d42..458ead0f6 100644 --- a/ldap/servers/slapd/back-ldbm/import-threads.c +++ b/ldap/servers/slapd/back-ldbm/import-threads.c @@ -1782,8 +1782,8 @@ upgradedn_producer(void *param) char ebuf[BUFSIZ]; import_log_notice(job, "WARNING: skipping entry \"%s\"", escape_string(slapi_entry_get_dn(e), ebuf)); - import_log_notice(job, "REASON: entry too large (%u bytes) for " - "the buffer size (%u bytes)", newesize, job->fifo.bsize); + import_log_notice(job, "REASON: entry too large (%lu bytes) for " + "the buffer size (%lu bytes)", newesize, job->fifo.bsize); backentry_free(&ep); job->skipped++; continue; diff --git a/ldap/servers/slapd/entry.c b/ldap/servers/slapd/entry.c index cac7b851a..5afba099f 100644 --- a/ldap/servers/slapd/entry.c +++ b/ldap/servers/slapd/entry.c @@ -2190,7 +2190,8 @@ slapi_entry_get_nrdn_const( const Slapi_Entry *e ) if (NULL == nrdn) { const char *dn = slapi_entry_get_dn_const(e); if (dn) { - slapi_rdn_init_all_dn(&e->e_srdn, dn); + /* cast away const */ + slapi_rdn_init_all_dn((Slapi_RDN *)&e->e_srdn, dn); nrdn = slapi_rdn_get_nrdn(slapi_entry_get_srdn((Slapi_Entry *)e)); } } diff --git a/ldap/servers/slapd/pw.c b/ldap/servers/slapd/pw.c index 58fce2897..2a7b29bc8 100644 --- a/ldap/servers/slapd/pw.c +++ b/ldap/servers/slapd/pw.c @@ -2190,7 +2190,6 @@ int slapi_pwpolicy_is_expired(Slapi_PWPolicy *pwpolicy, Slapi_Entry *e, time_t *expire_time, int *remaining_grace) { int is_expired = 0; - time_t now = current_time(); if (pwpolicy && e) { /* If password expiration is enabled in the policy, diff --git a/ldap/servers/slapd/uniqueid.c b/ldap/servers/slapd/uniqueid.c index 979e06b5e..1d23e172f 100644 --- a/ldap/servers/slapd/uniqueid.c +++ b/ldap/servers/slapd/uniqueid.c @@ -53,7 +53,6 @@ #define MODULE "uniqueid" /* for logging */ static int isValidFormat (const char * buff); -static PRUint8 str2Byte (const char *str); /* All functions that strat with slapi_ are exposed to the plugins */ diff --git a/lib/base/rwlock.cpp b/lib/base/rwlock.cpp index 81d4e47a4..ec753446b 100644 --- a/lib/base/rwlock.cpp +++ b/lib/base/rwlock.cpp @@ -143,11 +143,12 @@ void rwlock_Unlock(RWLOCK lockP) rwLockP->write = 0; else rwLockP->numReaders--; - if (rwLockP->numReaders == 0) + if (rwLockP->numReaders == 0) { if (rwLockP->numWriteWaiters != 0) condvar_notify(rwLockP->writeFree); else condvar_notifyAll(rwLockP->readFree); + } crit_exit(rwLockP->crit); }
0
bf00ddf580bfd3f9340ab7703c23d867ad5524af
389ds/389-ds-base
Resolves: 184141 Summary: Make password modify extop work properly with the password policy control.
commit bf00ddf580bfd3f9340ab7703c23d867ad5524af Author: Nathan Kinder <[email protected]> Date: Thu Jan 15 18:24:48 2009 +0000 Resolves: 184141 Summary: Make password modify extop work properly with the password policy control. diff --git a/ldap/servers/slapd/extendop.c b/ldap/servers/slapd/extendop.c index 53947c6ef..ced66c344 100644 --- a/ldap/servers/slapd/extendop.c +++ b/ldap/servers/slapd/extendop.c @@ -311,6 +311,19 @@ do_extended( Slapi_PBlock *pb ) goto free_and_return; } + /* decode the optional controls - put them in the pblock */ + if ( (lderr = get_ldapmessage_controls( pb, pb->pb_op->o_ber, NULL )) != 0 ) + { + char *dn = NULL; + slapi_pblock_get(pb, SLAPI_CONN_DN, &dn); + + op_shared_log_error_access (pb, "EXT", dn ? dn : "", "failed to decode LDAP controls"); + send_ldap_result( pb, lderr, NULL, NULL, 0, NULL ); + + slapi_ch_free_string(&dn); + goto free_and_return; + } + slapi_pblock_set( pb, SLAPI_EXT_OP_REQ_OID, extoid ); slapi_pblock_set( pb, SLAPI_EXT_OP_REQ_VALUE, &extval ); rc = plugin_call_exop_plugins( pb, extoid ); diff --git a/ldap/servers/slapd/modify.c b/ldap/servers/slapd/modify.c index 4fd2a8b25..09ccd424d 100644 --- a/ldap/servers/slapd/modify.c +++ b/ldap/servers/slapd/modify.c @@ -437,21 +437,30 @@ void slapi_modify_internal_set_pb (Slapi_PBlock *pb, const char *dn, LDAPMod **m static int modify_internal_pb (Slapi_PBlock *pb) { - LDAPControl **controls; + LDAPControl **controls; + LDAPControl *pwpolicy_ctrl; Operation *op; - int opresult = 0; + int opresult = 0; LDAPMod **normalized_mods = NULL; LDAPMod **mods; LDAPMod **mod; Slapi_Mods smods; - int pw_change = 0; - char *old_pw = NULL; + int pw_change = 0; + char *old_pw = NULL; PR_ASSERT (pb != NULL); slapi_pblock_get(pb, SLAPI_MODIFY_MODS, &mods); slapi_pblock_get(pb, SLAPI_CONTROLS_ARG, &controls); + /* See if pwpolicy control is present. We need to do + * this before we call op_shared_allow_pw_change() since + * it looks for SLAPI_PWPOLICY in the pblock to determine + * if the response contorl is needed. */ + pwpolicy_ctrl = slapi_control_present( controls, + LDAP_X_CONTROL_PWPOLICY_REQUEST, NULL, NULL ); + slapi_pblock_set( pb, SLAPI_PWPOLICY, &pwpolicy_ctrl ); + if(mods == NULL) { opresult = LDAP_PARAM_ERROR; diff --git a/ldap/servers/slapd/passwd_extop.c b/ldap/servers/slapd/passwd_extop.c index 8b1185a56..5958171bb 100644 --- a/ldap/servers/slapd/passwd_extop.c +++ b/ldap/servers/slapd/passwd_extop.c @@ -143,33 +143,48 @@ passwd_modify_getEntry( const char *dn, Slapi_Entry **e2 ) { /* Construct Mods pblock and perform the modify operation * Sets result of operation in SLAPI_PLUGIN_INTOP_RESULT */ -static int passwd_apply_mods(const char *dn, Slapi_Mods *mods) +static int passwd_apply_mods(const char *dn, Slapi_Mods *mods, LDAPControl **req_controls, + LDAPControl ***resp_controls) { Slapi_PBlock pb; + LDAPControl **req_controls_copy = NULL; + LDAPControl **pb_resp_controls = NULL; int ret=0; LDAPDebug( LDAP_DEBUG_TRACE, "=> passwd_apply_mods\n", 0, 0, 0 ); if (mods && (slapi_mods_get_num_mods(mods) > 0)) { + /* We need to dup the request controls since the original + * pblock owns the ones that have been passed in. */ + if (req_controls) { + slapi_add_controls(&req_controls_copy, req_controls, 1); + } + pblock_init(&pb); slapi_modify_internal_set_pb (&pb, dn, - slapi_mods_get_ldapmods_byref(mods), - NULL, /* Controls */ - NULL, /* UniqueID */ - pw_get_componentID(), /* PluginID */ - 0); /* Flags */ + slapi_mods_get_ldapmods_byref(mods), + req_controls_copy, NULL, /* UniqueID */ + pw_get_componentID(), /* PluginID */ + 0); /* Flags */ - ret =slapi_modify_internal_pb (&pb); + ret =slapi_modify_internal_pb (&pb); - slapi_pblock_get(&pb, SLAPI_PLUGIN_INTOP_RESULT, &ret); + slapi_pblock_get(&pb, SLAPI_PLUGIN_INTOP_RESULT, &ret); - if (ret != LDAP_SUCCESS){ - LDAPDebug(LDAP_DEBUG_TRACE, "WARNING: passwordPolicy modify error %d on entry '%s'\n", - ret, dn, 0); - } + /* Retreive and duplicate the response controls since they will be + * destroyed along with the pblock used for the internal operation. */ + slapi_pblock_get(&pb, SLAPI_RESCONTROLS, &pb_resp_controls); + if (pb_resp_controls) { + slapi_add_controls(resp_controls, pb_resp_controls, 1); + } - pblock_done(&pb); + if (ret != LDAP_SUCCESS){ + LDAPDebug(LDAP_DEBUG_TRACE, "WARNING: passwordPolicy modify error %d on entry '%s'\n", + ret, dn, 0); + } + + pblock_done(&pb); } LDAPDebug( LDAP_DEBUG_TRACE, "<= passwd_apply_mods: %d\n", ret, 0, 0 ); @@ -180,7 +195,8 @@ static int passwd_apply_mods(const char *dn, Slapi_Mods *mods) /* Modify the userPassword attribute field of the entry */ -static int passwd_modify_userpassword(Slapi_Entry *targetEntry, const char *newPasswd) +static int passwd_modify_userpassword(Slapi_Entry *targetEntry, const char *newPasswd, + LDAPControl **req_controls, LDAPControl ***resp_controls) { char *dn = NULL; int ret = 0; @@ -193,7 +209,7 @@ static int passwd_modify_userpassword(Slapi_Entry *targetEntry, const char *newP slapi_mods_add_string(&smods, LDAP_MOD_REPLACE, SLAPI_USERPWD_ATTR, newPasswd); - ret = passwd_apply_mods(dn, &smods); + ret = passwd_apply_mods(dn, &smods, req_controls, resp_controls); slapi_mods_done(&smods); @@ -432,15 +448,18 @@ passwd_modify_extop( Slapi_PBlock *pb ) char *oldPasswd = NULL; char *newPasswd = NULL; char *errMesg = NULL; - int ret=0, rc=0, sasl_ssf=0; + int ret=0, rc=0, sasl_ssf=0, need_pwpolicy_ctrl=0; ber_tag_t tag=0; ber_len_t len=(ber_len_t)-1; struct berval *extop_value = NULL; struct berval *gen_passwd = NULL; BerElement *ber = NULL; BerElement *response_ber = NULL; - Slapi_Entry *targetEntry=NULL; + Slapi_Entry *targetEntry=NULL; Connection *conn = NULL; + LDAPControl **req_controls = NULL; + LDAPControl **resp_controls = NULL; + passwdPolicy *pwpolicy = NULL; /* Slapi_DN sdn; */ LDAPDebug( LDAP_DEBUG_TRACE, "=> passwd_modify_extop\n", 0, 0, 0 ); @@ -589,33 +608,31 @@ parse_req_done: } if (oldPasswd == NULL || *oldPasswd == '\0') { - /* If user is authenticated, they already gave their password during - the bind operation (or used sasl or client cert auth or OS creds) */ - slapi_pblock_get(pb, SLAPI_CONN_AUTHMETHOD, &authmethod); - if (!authmethod || !strcmp(authmethod, SLAPD_AUTH_NONE)) { - errMesg = "User must be authenticated to the directory server.\n"; - rc = LDAP_INSUFFICIENT_ACCESS; - goto free_and_return; - } + /* If user is authenticated, they already gave their password during + * the bind operation (or used sasl or client cert auth or OS creds) */ + slapi_pblock_get(pb, SLAPI_CONN_AUTHMETHOD, &authmethod); + if (!authmethod || !strcmp(authmethod, SLAPD_AUTH_NONE)) { + errMesg = "User must be authenticated to the directory server.\n"; + rc = LDAP_INSUFFICIENT_ACCESS; + goto free_and_return; + } } + + /* Fetch the password policy. We need this in case we need to + * generate a password as well as for some policy checks. */ + pwpolicy = new_passwdPolicy( pb, dn ); /* A new password was not supplied in the request, so we need to generate * a random one and return it to the user in a response. */ if (newPasswd == NULL || *newPasswd == '\0') { - passwdPolicy *pwpolicy; int rval; /* Do a free of newPasswd here to be safe, otherwise we may leak 1 byte */ slapi_ch_free_string( &newPasswd ); - - pwpolicy = new_passwdPolicy( pb, dn ); - /* Generate a new password */ rval = passwd_modify_generate_passwd( pwpolicy, &newPasswd, &errMesg ); - delete_passwdPolicy(&pwpolicy); - if (rval != LDAP_SUCCESS) { if (!errMesg) errMesg = "Error generating new password.\n"; @@ -659,8 +676,8 @@ parse_req_done: /* Did they give us a DN ? */ if (dn == NULL || *dn == '\0') { /* Get the DN from the bind identity on this connection */ - slapi_ch_free_string(&dn); - dn = slapi_ch_strdup(bindDN); + slapi_ch_free_string(&dn); + dn = slapi_ch_strdup(bindDN); LDAPDebug( LDAP_DEBUG_ANY, "Missing userIdentity in request, using the bind DN instead.\n", 0, 0, 0 ); @@ -703,8 +720,14 @@ parse_req_done: slapi_pblock_set(pb, SLAPI_BACKEND, be); } + /* Check if the pwpolicy control is present */ + slapi_pblock_get( pb, SLAPI_PWPOLICY, &need_pwpolicy_ctrl ); + ret = slapi_access_allowed ( pb, targetEntry, SLAPI_USERPWD_ATTR, NULL, SLAPI_ACL_WRITE ); - if ( ret != LDAP_SUCCESS ) { + if ( ret != LDAP_SUCCESS ) { + if (need_pwpolicy_ctrl) { + slapi_pwpolicy_make_response_control ( pb, -1, -1, LDAP_PWPOLICY_PWDMODNOTALLOWED ); + } errMesg = "Insufficient access rights\n"; rc = LDAP_INSUFFICIENT_ACCESS; goto free_and_return; @@ -714,21 +737,50 @@ parse_req_done: * They gave us a password (old), check it against the target entry * Is the old password valid ? */ - if (oldPasswd && *oldPasswd) { - ret = passwd_check_pwd(targetEntry, oldPasswd); - if (ret) { - /* No, then we fail this operation */ - errMesg = "Invalid oldPasswd value.\n"; - rc = ret; - goto free_and_return; - } - } - + if (oldPasswd && *oldPasswd) { + ret = passwd_check_pwd(targetEntry, oldPasswd); + if (ret) { + /* No, then we fail this operation */ + errMesg = "Invalid oldPasswd value.\n"; + rc = ret; + goto free_and_return; + } + } + + /* Check if password policy allows users to change their passwords. We need to do + * this here since the normal modify code doesn't perform this check for + * internal operations. */ + if (!pb->pb_op->o_isroot && !pb->pb_conn->c_needpw && !pwpolicy->pw_change) { + Slapi_DN *bindSDN = slapi_sdn_new_dn_byref(bindDN); + /* Is this a user modifying their own password? */ + if (slapi_sdn_compare(bindSDN, slapi_entry_get_sdn(targetEntry))==0) { + if (need_pwpolicy_ctrl) { + slapi_pwpolicy_make_response_control ( pb, -1, -1, LDAP_PWPOLICY_PWDMODNOTALLOWED ); + } + errMesg = "User is not allowed to change password\n"; + rc = LDAP_UNWILLING_TO_PERFORM; + slapi_sdn_free(&bindSDN); + goto free_and_return; + } + slapi_sdn_free(&bindSDN); + } + /* Fetch any present request controls so we can use them when + * performing the modify operation. */ + slapi_pblock_get(pb, SLAPI_REQCONTROLS, &req_controls); + /* Now we're ready to make actual password change */ - ret = passwd_modify_userpassword(targetEntry, newPasswd); + ret = passwd_modify_userpassword(targetEntry, newPasswd, req_controls, &resp_controls); + + /* Set the response controls if necessary. We want to do this now + * so it is set for both the success and failure cases. The pblock + * will now own the controls. */ + if (resp_controls) { + slapi_pblock_set(pb, SLAPI_RESCONTROLS, resp_controls); + } + if (ret != LDAP_SUCCESS) { - /* Failed to modify the password, e.g. because insufficient access allowed */ + /* Failed to modify the password, e.g. because password policy, etc. */ errMesg = "Failed to update password\n"; rc = ret; goto free_and_return; @@ -742,7 +794,7 @@ parse_req_done: LDAPDebug( LDAP_DEBUG_TRACE, "<= passwd_modify_extop: %d\n", rc, 0, 0 ); /* Free anything that we allocated above */ - free_and_return: +free_and_return: slapi_ch_free_string(&bindDN); /* slapi_pblock_get SLAPI_CONN_DN does strdup */ slapi_ch_free_string(&oldPasswd); slapi_ch_free_string(&newPasswd); @@ -756,6 +808,7 @@ parse_req_done: slapi_ch_free_string(&otdn); slapi_pblock_set( pb, SLAPI_ORIGINAL_TARGET, NULL ); slapi_ch_free_string(&authmethod); + delete_passwdPolicy(&pwpolicy); if ( targetEntry != NULL ){ slapi_entry_free (targetEntry);
0
bcbad8745ceae93bc724229194febc239b025232
389ds/389-ds-base
Issue 5722 - RFE When a filter contains 'nsrole', improve response time by rewriting the filter (#5723) Bug description: 'nsrole' is a virtual attribute and is not indexed. With a poorly selective filter like below the search may be not indexed "(&(nsrole=cn=managed_role,cn=suffix)(objectclass=posixAccount)))" The RFE is to rewrite the filter component contains 'nsrole' attribute type. Rewritten component can then been indexed Fix description: For managed role, it replaces 'nsrole' with 'nsroleDN' attribute type For filtered roled, it replace the 'nsrole' component with the nsRoleFilter value relates: #5722 Reviewed by: Pierre Rogier (Thanks)
commit bcbad8745ceae93bc724229194febc239b025232 Author: tbordaz <[email protected]> Date: Tue May 9 15:06:55 2023 +0200 Issue 5722 - RFE When a filter contains 'nsrole', improve response time by rewriting the filter (#5723) Bug description: 'nsrole' is a virtual attribute and is not indexed. With a poorly selective filter like below the search may be not indexed "(&(nsrole=cn=managed_role,cn=suffix)(objectclass=posixAccount)))" The RFE is to rewrite the filter component contains 'nsrole' attribute type. Rewritten component can then been indexed Fix description: For managed role, it replaces 'nsrole' with 'nsroleDN' attribute type For filtered roled, it replace the 'nsrole' component with the nsRoleFilter value relates: #5722 Reviewed by: Pierre Rogier (Thanks) diff --git a/dirsrvtests/tests/suites/roles/basic_test.py b/dirsrvtests/tests/suites/roles/basic_test.py index bc227a2d7..13d188d68 100644 --- a/dirsrvtests/tests/suites/roles/basic_test.py +++ b/dirsrvtests/tests/suites/roles/basic_test.py @@ -13,16 +13,22 @@ Importing necessary Modules. import logging import time +import ldap import os import pytest -from lib389._constants import PW_DM, DEFAULT_SUFFIX +from lib389._constants import ErrorLog, PW_DM, DEFAULT_SUFFIX, DEFAULT_BENAME from lib389.idm.user import UserAccount, UserAccounts from lib389.idm.organization import Organization from lib389.idm.organizationalunit import OrganizationalUnit from lib389.topologies import topology_st as topo from lib389.idm.role import FilteredRoles, ManagedRoles, NestedRoles from lib389.idm.domain import Domain +from lib389.dbgen import dbgen_users +from lib389.tasks import ImportTask +from lib389.utils import get_default_db_lib +from lib389.rewriters import * +from lib389.backend import Backends logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) @@ -504,6 +510,255 @@ def test_vattr_on_managed_role(topo, request): request.addfinalizer(fin) +def test_managed_and_filtered_role_rewrite(topo, request): + """Test that filter components containing 'nsrole=xxx' + are reworked if xxx is either a filtered role or a managed + role. + + :id: e30ff5ed-4f8b-48db-bb88-66f150fca31f + :setup: server + :steps: + 1. Setup nsrole rewriter + 2. Add a 'nsroleDN' indexes for managed roles + 3. Create an 90K ldif files + This is large so that unindex search will last long + 4. import/restart the instance + 5. Create a managed role and add 4 entries in that role + 6. Check that a search 'nsrole=managed_role' is fast + 7. Create a filtered role that use an indexed attribute (givenName) + 8. Check that a search 'nsrole=filtered_role' is fast + :expectedresults: + 1. Operation should succeed + 2. Operation should succeed + 3. Operation should succeed + 4. Operation should succeed + 5. Operation should succeed + 6. Operation should succeed + 7. Operation should succeed + 8. Operation should succeed + """ + # Setup nsrole rewriter + rewriters = Rewriters(topo.standalone) + rewriter = rewriters.ensure_state(properties={"cn": "nsrole", "nsslapd-libpath": 'libroles-plugin'}) + try: + rewriter.add('nsslapd-filterrewriter', "role_nsRole_filter_rewriter") + except: + pass + + # Create an index for nsRoleDN that is used by managed role + attrname = 'nsRoleDN' + backends = Backends(topo.standalone) + backend = backends.get(DEFAULT_BENAME) + indexes = backend.get_indexes() + try: + index = indexes.create(properties={ + 'cn': attrname, + 'nsSystemIndex': 'false', + 'nsIndexType': ['eq', 'pres'] + }) + except: + pass + + # Build LDIF file + bdb_values = { + 'wait30': 30 + } + + # Note: I still sometime get failure with a 60s timeout so lets use 90s + mdb_values = { + 'wait30': 90 + } + + if get_default_db_lib() == 'bdb': + values = bdb_values + else: + values = mdb_values + + ldif_dir = topo.standalone.get_ldif_dir() + import_ldif = ldif_dir + '/perf_import.ldif' + + RDN="userNew" + PARENT="ou=people,%s" % DEFAULT_SUFFIX + dbgen_users(topo.standalone, 90000, import_ldif, DEFAULT_SUFFIX, entry_name=RDN, generic=True, parent=PARENT) + + # online import + import_task = ImportTask(topo.standalone) + import_task.import_suffix_from_ldif(ldiffile=import_ldif, suffix=DEFAULT_SUFFIX) + import_task.wait(values['wait30']) # If things go wrong import takes a lot longer than this + assert import_task.is_complete() + + # Restart server + topo.standalone.restart() + + # Create Managed role entry + managed_roles = ManagedRoles(topo.standalone, DEFAULT_SUFFIX) + role = managed_roles.create(properties={"cn": 'MANAGED_ROLE'}) + + # Assign managed role to 4 entries out of the 90K + for i in range(1, 5): + dn = "uid=%s0000%d,%s" % (RDN, i, PARENT) + topo.standalone.modify_s(dn, [(ldap.MOD_REPLACE, 'nsRoleDN', [role.dn.encode()])]) + + + # Now check that search is fast, evaluating only 4 entries + search_start = time.time() + entries = topo.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "(nsrole=%s)" % role.dn) + duration = time.time() - search_start + log.info("Duration of the search was %f", duration) + assert(len(entries) == 4) + assert (duration < 1) + + # Restart server to refresh entrycache + topo.standalone.restart() + + # Create Filtered Role entry + # it uses 'givenName' attribute that is indexed (eq) by default + filtered_roles = FilteredRoles(topo.standalone, DEFAULT_SUFFIX) + role = filtered_roles.create(properties={'cn': 'FILTERED_ROLE', 'nsRoleFilter': 'givenName=Technical'}) + + # Now check that search is fast + search_start = time.time() + entries = topo.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "(nsrole=%s)" % role.dn) + duration = time.time() - search_start + log.info("Duration of the search was %f", duration) + assert (duration < 1) + + def fin(): + topo.standalone.restart() + try: + managed_roles = ManagedRoles(topo.standalone, DEFAULT_SUFFIX) + for i in managed_roles.list(): + i.delete() + filtered_roles = FilteredRoles(topo.standalone, DEFAULT_SUFFIX) + for i in filtered_roles.list(): + i.delete() + except: + pass + os.remove(import_ldif) + + request.addfinalizer(fin) + +def test_not_such_entry_role_rewrite(topo, request): + """Test that filter components containing 'nsrole=xxx' + ,where xxx does not refer to any role definition, + replace the component by 'nsuniqueid=-1' + + :id: b098dda5-fc77-46c4-84a7-5d0c7035bb77 + :setup: server + :steps: + 1. Setup nsrole rewriter + 2. Add a 'nsroleDN' indexes for managed roles + 3. Create an 90K ldif files + This is large so that unindex search will last long + 4. import/restart the instance + 5. Create a managed role and add 4 entries in that role + 6. Enable plugin log level to capture role plugin message + 7. Check that a search is fast "(OR(nsrole=managed_role)(nsrole=not_existing_role))" + 8. Stop the instance + 9. Check that a message like this was logged: replace (nsrole=not_existing_role) by (nsuniqueid=-1) + :expectedresults: + 1. Operation should succeed + 2. Operation should succeed + 3. Operation should succeed + 4. Operation should succeed + 5. Operation should succeed + 6. Operation should succeed + 7. Operation should succeed + 8. Operation should succeed + 9. Operation should succeed + """ + # Setup nsrole rewriter + rewriters = Rewriters(topo.standalone) + rewriter = rewriters.ensure_state(properties={"cn": "nsrole", "nsslapd-libpath": 'libroles-plugin'}) + try: + rewriter.add('nsslapd-filterrewriter', "role_nsRole_filter_rewriter") + except: + pass + + # Create an index for nsRoleDN that is used by managed role + attrname = 'nsRoleDN' + backends = Backends(topo.standalone) + backend = backends.get(DEFAULT_BENAME) + indexes = backend.get_indexes() + try: + index = indexes.create(properties={ + 'cn': attrname, + 'nsSystemIndex': 'false', + 'nsIndexType': ['eq', 'pres'] + }) + except: + pass + + # Build LDIF file + bdb_values = { + 'wait60': 60 + } + + # Note: I still sometime get failure with a 60s timeout so lets use 90s + mdb_values = { + 'wait60': 90 + } + + if get_default_db_lib() == 'bdb': + values = bdb_values + else: + values = mdb_values + + ldif_dir = topo.standalone.get_ldif_dir() + import_ldif = ldif_dir + '/perf_import.ldif' + + RDN="userNew" + PARENT="ou=people,%s" % DEFAULT_SUFFIX + dbgen_users(topo.standalone, 90000, import_ldif, DEFAULT_SUFFIX, entry_name=RDN, generic=True, parent=PARENT) + + # online import + import_task = ImportTask(topo.standalone) + import_task.import_suffix_from_ldif(ldiffile=import_ldif, suffix=DEFAULT_SUFFIX) + import_task.wait(values['wait60']) # If things go wrong import takes a lot longer than this + assert import_task.is_complete() + + # Restart server + topo.standalone.restart() + + # Create Managed role entry + managed_roles = ManagedRoles(topo.standalone, DEFAULT_SUFFIX) + role = managed_roles.create(properties={"cn": 'MANAGED_ROLE'}) + + # Assign managed role to 4 entries out of the 90K + for i in range(1, 5): + dn = "uid=%s0000%d,%s" % (RDN, i, PARENT) + topo.standalone.modify_s(dn, [(ldap.MOD_REPLACE, 'nsRoleDN', [role.dn.encode()])]) + + # Enable plugin level to check message + topo.standalone.config.loglevel(vals=(ErrorLog.DEFAULT,ErrorLog.PLUGIN)) + + # Now check that search is fast, evaluating only 4 entries + search_start = time.time() + entries = topo.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "(|(nsrole=%s)(nsrole=cn=not_such_entry_role,%s))" % (role.dn, DEFAULT_SUFFIX)) + duration = time.time() - search_start + log.info("Duration of the search was %f", duration) + assert(len(entries) == 4) + assert (duration < 1) + + # Restart server to refresh entrycache + topo.standalone.stop() + + # Check that when the role does not exist it is translated into 'nsuniqueid=-1' + pattern = ".*replace \(nsRole=cn=not_such_entry_role,dc=example,dc=com\) by \(nsuniqueid=-1\).*" + assert topo.standalone.ds_error_log.match(pattern) + + def fin(): + topo.standalone.restart() + try: + managed_roles = ManagedRoles(topo.standalone, DEFAULT_SUFFIX) + for i in managed_roles.list(): + i.delete() + except: + pass + os.remove(import_ldif) + + request.addfinalizer(fin) + if __name__ == "__main__": CURRENT_FILE = os.path.realpath(__file__) pytest.main("-s -v %s" % CURRENT_FILE) diff --git a/ldap/servers/plugins/roles/roles_cache.c b/ldap/servers/plugins/roles/roles_cache.c index 5fd52e7b9..fce5c3b81 100644 --- a/ldap/servers/plugins/roles/roles_cache.c +++ b/ldap/servers/plugins/roles/roles_cache.c @@ -2131,3 +2131,206 @@ roles_cache_dump(caddr_t data, caddr_t arg __attribute__((unused))) return 0; } + + +/* This is an example callback to substitute + * attribute type 'from' with 'to' in all + * the filter components + * example_substitute_type is a callback (FILTER_APPLY_FN) used by slapi_filter_apply + * typedef int (FILTER_APPLY_FN)(Slapi_Filter f, void *arg) + * To stick to the definition, the callback is defined using 'int' rather 'int32_t' + */ +typedef struct { + char *attrtype_from; + char *attrtype_to; +} role_substitute_type_arg_t; + + +static void +_rewrite_nsrole_component(Slapi_Filter *f, role_substitute_type_arg_t *substitute_arg) +{ + char *type; + struct berval *bval; + char *attrs[3] = {SLAPI_ATTR_OBJECTCLASS, ROLE_FILTER_ATTR_NAME, NULL}; + Slapi_Entry *nsrole_entry = NULL; + Slapi_DN *sdn = NULL; + char *rolefilter = NULL; + int rc; + char **oc_values = NULL; + + /* Substitution is only valid if original attribute is NSROLEATTR */ + if ((substitute_arg == NULL) || + (substitute_arg->attrtype_from == NULL) || + (substitute_arg->attrtype_to == NULL)) { + return; + } + if (strcasecmp(substitute_arg->attrtype_from, NSROLEATTR)) { + return; + } + if (slapi_filter_get_choice(f) != LDAP_FILTER_EQUALITY) { + /* only equality filter are handled + * else it is not possible to retrieve the role + * via its DN. + * Safety checking, at this point filter component has + * already been tested as equality match. + */ + return; + } + + /* Check that assertion does not refer to a filter/nested role */ + if (slapi_filter_get_ava(f, &type, &bval)) { + return; + } + sdn = slapi_sdn_new_dn_byref(bval->bv_val); + if (rc = slapi_search_internal_get_entry(sdn, attrs, &nsrole_entry, roles_get_plugin_identity())) { + if (rc == LDAP_NO_SUCH_OBJECT) { + /* the role does not exist (nsrole=<unknown role>) + * that means no entry match this component. To speed up + * the built of candidate list we may replace this component + * with an indexed component that return empty IDL. + * nsuniqueid is indexed and -1 is an invalid value. + */ + char *empty_IDL_filter; + empty_IDL_filter = slapi_ch_smprintf("(%s=-1)", SLAPI_ATTR_UNIQUEID); + slapi_filter_replace_strfilter(f, empty_IDL_filter); + slapi_log_err(SLAPI_LOG_PLUGIN, ROLES_PLUGIN_SUBSYSTEM, "_rewrite_nsrole_component: replace (%s=%s) by %s\n", + substitute_arg->attrtype_from, (char *)slapi_sdn_get_ndn(sdn), empty_IDL_filter); + slapi_ch_free_string(&empty_IDL_filter); + + } + goto bail; + } + oc_values = slapi_entry_attr_get_charray(nsrole_entry, SLAPI_ATTR_OBJECTCLASS); + rolefilter = slapi_entry_attr_get_charptr(nsrole_entry, ROLE_FILTER_ATTR_NAME); + for (size_t i = 0; oc_values && oc_values[i]; ++i) { + if (!strcasecmp(oc_values[i], (char *)"nsSimpleRoleDefinition") || + !strcasecmp(oc_values[i], (char *)"nsManagedRoleDefinition")) { + /* This is a managed role, okay to rewrite the attribute type */ + slapi_filter_changetype(f, substitute_arg->attrtype_to); + goto bail; + } else if (!strcasecmp(oc_values[i], (char *)"nsFilteredRoleDefinition") && rolefilter) { + /* filtered role, okay to rewrite the filter with + * the value of the nsRoleFilter + */ + slapi_filter_replace_strfilter(f, rolefilter); + goto bail; + } else if (!strcasecmp(oc_values[i], (char *)"nsNestedRoleDefinition")) { + /* nested roles can not be rewritten */ + goto bail; + } + } + +bail: + slapi_ch_free_string(&rolefilter); + slapi_ch_array_free(oc_values); + slapi_entry_free(nsrole_entry); + slapi_sdn_free(&sdn); + return; +} + +/* It calls _rewrite_nsrole_component for each filter component with + * - filter choice LDAP_FILTER_EQUALITY + * - filter attribute type nsRole + */ +static int +role_substitute_type(Slapi_Filter *f, void *arg) +{ + role_substitute_type_arg_t *substitute_arg = (role_substitute_type_arg_t *) arg; + char *filter_type; + + if ((substitute_arg == NULL) || + (substitute_arg->attrtype_from == NULL) || + (substitute_arg->attrtype_to == NULL)) { + return SLAPI_FILTER_SCAN_STOP; + } + + if (slapi_filter_get_choice(f) != LDAP_FILTER_EQUALITY) { + /* only equality filter are handled + * else it is not possible to retrieve the role + * via its DN + */ + return SLAPI_FILTER_SCAN_CONTINUE; + } + + /* In case this is expected attribute type and assertion + * Substitute 'from' by 'to' attribute type in the filter + */ + if (slapi_filter_get_attribute_type(f, &filter_type) == 0) { + if ((strcasecmp(filter_type, substitute_arg->attrtype_from) == 0)) { + _rewrite_nsrole_component(f, substitute_arg); + } + } + + /* Return continue because we should + * substitute 'from' in all filter components + */ + return SLAPI_FILTER_SCAN_CONTINUE; +} + + +/* + * During PRE_SEARCH, rewriters (a.k.a computed attributes) are called + * to rewrite some search filter components. + * Rewriters callbacks are registered by main and are retrieved from + * config entries under cn=rewriters,cn=config. + * + * In order to register the role rewriter, the following record + * is added to the config. + * + * dn: cn=role,cn=rewriters,cn=config + * objectClass: top + * objectClass: extensibleObject + * cn: role + * nsslapd-libpath: /lib/dirsrv/libslapd.so + * nsslapd-filterrewriter: role_nsRole_filter_rewriter + * + * The role rewriter supports: + * - 'nsrole' attribute type + * - LDAP_FILTER_EQUALITY filter choice + * - assertion being a managed/filtered role DN + * + * - Input '(nsrole=cn=admin1,dc=example,dc=com)' + * Output '(nsroleDN=cn=admin1,dc=example,dc=com)' + * - Input '(nsrole=cn=SalesManagerFilter,ou=people,dc=example,dc=com)' + * Output '(manager=user008762)' + * + * dn: cn=admin1,dc=example,dc=com + * ... + * objectClass: nsRoleDefinition + * objectClass: nsManagedRoleDefinition + * ... + * + * dn: cn=SalesManagerFilter,ou=people,dc=example,dc=com + * ... + * objectclass: nsRoleDefinition + * objectclass: nsFilteredRoleDefinition + * ... + * nsRoleFilter: manager=user008762 + * + * return code (from computed.c:compute_rewrite_search_filter): + * -1 : keep looking + * 0 : rewrote OK + * 1 : refuse to do this search + * 2 : operations error + */ +int32_t +role_nsRole_filter_rewriter(Slapi_PBlock *pb) +{ + Slapi_Filter *clientFilter = NULL; + int error_code = 0; + int rc; + role_substitute_type_arg_t arg; + arg.attrtype_from = NSROLEATTR; + arg.attrtype_to = ROLE_MANAGED_ATTR_NAME; + + slapi_pblock_get(pb, SLAPI_SEARCH_FILTER, &clientFilter); + rc = slapi_filter_apply(clientFilter, role_substitute_type, &arg, &error_code); + if (rc == SLAPI_FILTER_SCAN_NOMORE) { + return SEARCH_REWRITE_CALLBACK_CONTINUE; /* Let's others rewriter play */ + } else { + slapi_log_err(SLAPI_LOG_ERR, + "example_foo2cn_filter_rewriter", "Could not update the search filter - error %d (%d)\n", + rc, error_code); + return SEARCH_REWRITE_CALLBACK_ERROR; /* operation error */ + } +} diff --git a/ldap/servers/slapd/back-ldbm/ldbm_attr.c b/ldap/servers/slapd/back-ldbm/ldbm_attr.c index 1d86c2db4..07f3058a3 100644 --- a/ldap/servers/slapd/back-ldbm/ldbm_attr.c +++ b/ldap/servers/slapd/back-ldbm/ldbm_attr.c @@ -1039,33 +1039,6 @@ ldbm_compute_evaluator(computed_attr_context *c, char *type, Slapi_Entry *e, sla */ -/* Before calling this function, you must free all the parts - which will be overwritten, this function dosn't know - how to do that */ -static int -replace_filter(Slapi_Filter *f, char *s) -{ - Slapi_Filter *newf = NULL; - Slapi_Filter *temp = NULL; - char *buf = slapi_ch_strdup(s); - - newf = slapi_str2filter(buf); - slapi_ch_free((void **)&buf); - - if (NULL == newf) { - return -1; - } - - /* Now take the parts of newf and put them in f */ - /* An easy way to do this is to preserve the "next" ptr */ - temp = f->f_next; - *f = *newf; - f->f_next = temp; - /* Free the new filter husk */ - slapi_ch_free((void **)&newf); - return 0; -} - static void find_our_friends(char *s, int *has, int *num) { @@ -1075,30 +1048,6 @@ find_our_friends(char *s, int *has, int *num) } } -/* Free the parts of a filter we're about to overwrite */ -void -free_the_filter_bits(Slapi_Filter *f) -{ - /* We need to free: */ - switch (f->f_choice) { - case LDAP_FILTER_EQUALITY: - case LDAP_FILTER_GE: - case LDAP_FILTER_LE: - case LDAP_FILTER_APPROX: - ava_done(&f->f_ava); - break; - - case LDAP_FILTER_PRESENT: - if (f->f_type != NULL) { - slapi_ch_free((void **)&(f->f_type)); - } - break; - - default: - break; - } -} - static int grok_and_rewrite_filter(Slapi_Filter *f) { @@ -1116,11 +1065,9 @@ grok_and_rewrite_filter(Slapi_Filter *f) rhs = f->f_ava.ava_value.bv_val; if (has) { if (0 == strcasecmp(rhs, "TRUE")) { - free_the_filter_bits(f); - replace_filter(f, "(&(numsubordinates=*)(numsubordinates>=1))"); + slapi_filter_replace_strfilter(f, "(&(numsubordinates=*)(numsubordinates>=1))"); } else if (0 == strcasecmp(rhs, "FALSE")) { - free_the_filter_bits(f); - replace_filter(f, "(&(objectclass=*)(!(numsubordinates=*)))"); + slapi_filter_replace_strfilter(f, "(&(objectclass=*)(!(numsubordinates=*)))"); } else { return 1; /* Filter we can't rewrite */ } @@ -1133,7 +1080,7 @@ grok_and_rewrite_filter(Slapi_Filter *f) char *theType = f->f_ava.ava_type; rhs_berval = f->f_ava.ava_value; - replace_filter(f, "(&(numsubordinates=*)(numsubordinates=x))"); + slapi_filter_replace_ex(f, "(&(numsubordinates=*)(numsubordinates=x))"); /* Now fixup the resulting filter so that x = rhs */ slapi_ch_free((void **)&(f->f_and->f_next->f_ava.ava_value.bv_val)); /*free type also */ @@ -1143,8 +1090,7 @@ grok_and_rewrite_filter(Slapi_Filter *f) } else { if (rhs_number == 0) { /* This is the same as hassubordinates=FALSE */ - free_the_filter_bits(f); - replace_filter(f, "(&(objectclass=*)(!(numsubordinates=*)))"); + slapi_filter_replace_strfilter(f, "(&(objectclass=*)(!(numsubordinates=*)))"); } else { return 1; } @@ -1166,14 +1112,13 @@ grok_and_rewrite_filter(Slapi_Filter *f) rhs_num = atoi(rhs); if (0 == rhs_num) { /* If so, rewrite to same as numsubordinates=* */ - free_the_filter_bits(f); - replace_filter(f, "(objectclass=*)"); + slapi_filter_replace_strfilter(f, "(objectclass=*)"); } else { /* Rewrite to present and GE the rhs */ char *theType = f->f_ava.ava_type; rhs_berval = f->f_ava.ava_value; - replace_filter(f, "(&(numsubordinates=*)(numsubordinates>=x))"); + slapi_filter_replace_ex(f, "(&(numsubordinates=*)(numsubordinates>=x))"); /* Now fixup the resulting filter so that x = rhs */ slapi_ch_free((void **)&(f->f_and->f_next->f_ava.ava_value.bv_val)); /*free type also */ diff --git a/ldap/servers/slapd/filter.c b/ldap/servers/slapd/filter.c index 1f0873c34..44b726a34 100644 --- a/ldap/servers/slapd/filter.c +++ b/ldap/servers/slapd/filter.c @@ -1032,6 +1032,72 @@ slapi_filter_get_subfilt( return (0); } +/* + * Before calling this function, you must free all the parts + * which will be overwritten (i.e. slapi_free_the_filter_bits), + * this function dosn't know how to do that + */ +int +slapi_filter_replace_ex(Slapi_Filter *f, char *s) +{ + Slapi_Filter *newf = NULL; + Slapi_Filter *temp = NULL; + char *buf = slapi_ch_strdup(s); + + newf = slapi_str2filter(buf); + slapi_ch_free((void **)&buf); + + if (NULL == newf) { + return -1; + } + + /* Now take the parts of newf and put them in f */ + /* An easy way to do this is to preserve the "next" ptr */ + temp = f->f_next; + *f = *newf; + f->f_next = temp; + /* Free the new filter husk */ + slapi_ch_free((void **)&newf); + return 0; +} + +/* + * Free the parts of a filter we're about to overwrite + * moved from ldbm_attr.c + */ +void +slapi_filter_free_bits(Slapi_Filter *f) +{ + /* We need to free: */ + switch (f->f_choice) { + case LDAP_FILTER_EQUALITY: + case LDAP_FILTER_GE: + case LDAP_FILTER_LE: + case LDAP_FILTER_APPROX: + ava_done(&f->f_ava); + break; + + case LDAP_FILTER_PRESENT: + if (f->f_type != NULL) { + slapi_ch_free((void **)&(f->f_type)); + } + break; + + default: + break; + } +} + +/* + * it replaces the bits of the Slapi_Filter with the ones taken from strfilter + */ +int +slapi_filter_replace_strfilter(Slapi_Filter *f, char *strfilter) +{ + slapi_filter_free_bits(f); + return (slapi_filter_replace_ex(f, strfilter)); +} + static void filter_normalize_ava(struct slapi_filter *f, PRBool norm_values) { diff --git a/ldap/servers/slapd/slapi-plugin.h b/ldap/servers/slapd/slapi-plugin.h index 0f1d9cf00..d9d697a49 100644 --- a/ldap/servers/slapd/slapi-plugin.h +++ b/ldap/servers/slapd/slapi-plugin.h @@ -5241,6 +5241,9 @@ int slapi_filter_get_choice(Slapi_Filter *f); int slapi_filter_get_ava(Slapi_Filter *f, char **type, struct berval **bval); int slapi_filter_get_attribute_type(Slapi_Filter *f, char **type); int slapi_filter_get_subfilt(Slapi_Filter *f, char **type, char **initial, char ***any, char ** final); +int slapi_filter_replace_ex(Slapi_Filter *f, char *s); +void slapi_filter_free_bits(Slapi_Filter *f); +int slapi_filter_replace_strfilter(Slapi_Filter *f, char *strfilter); Slapi_Filter *slapi_filter_list_first(Slapi_Filter *f); Slapi_Filter *slapi_filter_list_next(Slapi_Filter *f, Slapi_Filter *fprev); Slapi_Filter *slapi_str2filter(char *str);
0
71a120d4ec452ef5e5178b6d12d2384873db1f27
389ds/389-ds-base
Ticket 47732 - ds logs many "SLAPI_PLUGIN_BE_TXN_POST_DELETE_FN plugin returned error" messages Bug Description: there are many error messages relate to postop delete plugins whichare not justified Fix Description: the usn plugin always returns an error when an attempt is made to delete a non-existing entry, checking the ldap code in the pblock. But the plugin executes correctly and the error is aleady set so return succes https://fedorahosted.org/389/ticket/47732 Reviewed by: richm, thanks
commit 71a120d4ec452ef5e5178b6d12d2384873db1f27 Author: Ludwig Krispenz <[email protected]> Date: Thu Mar 13 15:34:08 2014 +0100 Ticket 47732 - ds logs many "SLAPI_PLUGIN_BE_TXN_POST_DELETE_FN plugin returned error" messages Bug Description: there are many error messages relate to postop delete plugins whichare not justified Fix Description: the usn plugin always returns an error when an attempt is made to delete a non-existing entry, checking the ldap code in the pblock. But the plugin executes correctly and the error is aleady set so return succes https://fedorahosted.org/389/ticket/47732 Reviewed by: richm, thanks diff --git a/ldap/servers/plugins/usn/usn.c b/ldap/servers/plugins/usn/usn.c index 8b1b714f8..73323b31e 100644 --- a/ldap/servers/plugins/usn/usn.c +++ b/ldap/servers/plugins/usn/usn.c @@ -520,8 +520,8 @@ usn_bepostop (Slapi_PBlock *pb) /* if op is not successful, don't increment the counter */ slapi_pblock_get(pb, SLAPI_RESULT_CODE, &rc); if (LDAP_SUCCESS != rc) { - slapi_pblock_set(pb, SLAPI_RESULT_CODE, &rc); - rc = SLAPI_PLUGIN_FAILURE; + /* no plugin failure */ + rc = SLAPI_PLUGIN_SUCCESS; goto bail; } @@ -557,7 +557,8 @@ usn_bepostop_modify (Slapi_PBlock *pb) /* if op is not successful, don't increment the counter */ slapi_pblock_get(pb, SLAPI_RESULT_CODE, &rc); if (LDAP_SUCCESS != rc) { - rc = SLAPI_PLUGIN_FAILURE; + /* no plugin failure */ + rc = SLAPI_PLUGIN_SUCCESS; goto bail; } @@ -606,7 +607,8 @@ usn_bepostop_delete (Slapi_PBlock *pb) /* if op is not successful, don't increment the counter */ slapi_pblock_get(pb, SLAPI_RESULT_CODE, &rc); if (LDAP_SUCCESS != rc) { - rc = SLAPI_PLUGIN_FAILURE; + /* no plugin failure */ + rc = SLAPI_PLUGIN_SUCCESS; goto bail; } diff --git a/ldap/servers/slapd/back-ldbm/ldbm_delete.c b/ldap/servers/slapd/back-ldbm/ldbm_delete.c index ce2a154b6..13d02acbc 100644 --- a/ldap/servers/slapd/back-ldbm/ldbm_delete.c +++ b/ldap/servers/slapd/back-ldbm/ldbm_delete.c @@ -1089,7 +1089,7 @@ ldbm_back_delete( Slapi_PBlock *pb ) /* call the transaction post delete plugins just before the commit */ if (plugin_call_plugins(pb, SLAPI_PLUGIN_BE_TXN_POST_DELETE_FN)) { - LDAPDebug0Args( LDAP_DEBUG_ANY, "SLAPI_PLUGIN_BE_TXN_POST_DELETE_FN plugin " + LDAPDebug0Args( LDAP_DEBUG_TRACE, "SLAPI_PLUGIN_BE_TXN_POST_DELETE_FN plugin " "returned error code\n" ); if (!ldap_result_code) { slapi_pblock_get(pb, SLAPI_RESULT_CODE, &ldap_result_code); @@ -1198,7 +1198,7 @@ error_return: /* call the transaction post delete plugins just before the abort */ if (plugin_call_plugins(pb, SLAPI_PLUGIN_BE_TXN_POST_DELETE_FN)) { - LDAPDebug1Arg( LDAP_DEBUG_ANY, "SLAPI_PLUGIN_BE_TXN_POST_DELETE_FN plugin " + LDAPDebug1Arg( LDAP_DEBUG_TRACE, "SLAPI_PLUGIN_BE_TXN_POST_DELETE_FN plugin " "returned error code %d\n", retval ); if (!ldap_result_code) { slapi_pblock_get(pb, SLAPI_RESULT_CODE, &ldap_result_code);
0
249947e6f25f2bdb6da4757c5dbe174edc77f015
389ds/389-ds-base
Ticket 48661 - Agreement test suite fails at the test_changes case Bug Description: At the test_changes case at the agreement test suite, change to the master can't be replicated to consumer due some reasons. Error is not always reproducible. Sometimes change can be replicated. Fix Description: Put RA Schedule to "Always" as the TearDown action in the end of the previous test case (test_setProperties). https://fedorahosted.org/389/ticket/48661 Review by: wibrown (Thanks!)
commit 249947e6f25f2bdb6da4757c5dbe174edc77f015 Author: Simon Pichugin <[email protected]> Date: Tue Feb 16 19:44:54 2016 +0100 Ticket 48661 - Agreement test suite fails at the test_changes case Bug Description: At the test_changes case at the agreement test suite, change to the master can't be replicated to consumer due some reasons. Error is not always reproducible. Sometimes change can be replicated. Fix Description: Put RA Schedule to "Always" as the TearDown action in the end of the previous test case (test_setProperties). https://fedorahosted.org/389/ticket/48661 Review by: wibrown (Thanks!) diff --git a/src/lib389/lib389/tests/agreement_test.py b/src/lib389/lib389/tests/agreement_test.py index 2390ea9f6..4be609d90 100644 --- a/src/lib389/lib389/tests/agreement_test.py +++ b/src/lib389/lib389/tests/agreement_test.py @@ -311,6 +311,9 @@ def test_setProperties(topology): assert properties[RA_SCHEDULE][0] == test_schedule assert properties[RA_DESCRIPTION][0] == test_desc + # Set RA Schedule back to "always" + topology.master.agreement.schedule(ents[0].dn, Agreement.ALWAYS) + def test_changes(topology): """Test the changes counter behaviour after making some changes
0
f931ca2ce713f0b46a44115259a3fb2b560c48e8
389ds/389-ds-base
Bug 610119 - fix coverify Defect Type: Null pointer dereferences issues 12167 - 12199 https://bugzilla.redhat.com/show_bug.cgi?id=610119 Resolves: bug 610119 Bug description: Fix coverify Defect Type: Null pointer dereferences issues 12167 - 12199 Fix description: Catch possible NULL pointer in ldbm_config_load_dse_info().
commit f931ca2ce713f0b46a44115259a3fb2b560c48e8 Author: Endi S. Dewata <[email protected]> Date: Thu Jul 1 23:37:04 2010 -0500 Bug 610119 - fix coverify Defect Type: Null pointer dereferences issues 12167 - 12199 https://bugzilla.redhat.com/show_bug.cgi?id=610119 Resolves: bug 610119 Bug description: Fix coverify Defect Type: Null pointer dereferences issues 12167 - 12199 Fix description: Catch possible NULL pointer in ldbm_config_load_dse_info(). diff --git a/ldap/servers/slapd/back-ldbm/ldbm_config.c b/ldap/servers/slapd/back-ldbm/ldbm_config.c index 9550258cc..74b20b5ee 100644 --- a/ldap/servers/slapd/back-ldbm/ldbm_config.c +++ b/ldap/servers/slapd/back-ldbm/ldbm_config.c @@ -1350,7 +1350,16 @@ int ldbm_config_load_dse_info(struct ldbminfo *li) rval = 1; goto bail; } + search_pb = slapi_pblock_new(); + if (!search_pb) { + LDAPDebug(LDAP_DEBUG_ANY, + "ldbm_config_load_dse_info: Out of memory\n", + 0, 0, 0); + rval = 1; + goto bail; + } + slapi_search_internal_set_pb(search_pb, dn, LDAP_SCOPE_BASE, "objectclass=*", NULL, 0, NULL, NULL, li->li_identity, 0); slapi_search_internal_pb (search_pb);
0
ca02529c797903139c56bd9b8b90e8cd5bd0c3f4
389ds/389-ds-base
Ticket #569 - examine replication code to reduce amount of stored state information Description: commit c7f6f161f4967635d6f02b029be571d88ec61961 made this change: In case the deleted value list in an attribute is empty: * this means the entry is deleted and has no more attributes, * when writing the attr to disk we would loose the AD-csn. * Add an empty value to the set of deleted values. This will * never be seen by any client. It will never be moved to the * present values and is only used to preserve the AD-csn. The AD-csn size was not counted for the buffer size to allocate. This patch adds the size. https://fedorahosted.org/389/ticket/569 Reviewed by Nathan (Thanks!!)
commit ca02529c797903139c56bd9b8b90e8cd5bd0c3f4 Author: Noriko Hosoi <[email protected]> Date: Thu Jun 13 18:22:21 2013 -0700 Ticket #569 - examine replication code to reduce amount of stored state information Description: commit c7f6f161f4967635d6f02b029be571d88ec61961 made this change: In case the deleted value list in an attribute is empty: * this means the entry is deleted and has no more attributes, * when writing the attr to disk we would loose the AD-csn. * Add an empty value to the set of deleted values. This will * never be seen by any client. It will never be moved to the * present values and is only used to preserve the AD-csn. The AD-csn size was not counted for the buffer size to allocate. This patch adds the size. https://fedorahosted.org/389/ticket/569 Reviewed by Nathan (Thanks!!) diff --git a/ldap/servers/slapd/entry.c b/ldap/servers/slapd/entry.c index ea86fcc5a..f730ae0a1 100644 --- a/ldap/servers/slapd/entry.c +++ b/ldap/servers/slapd/entry.c @@ -1559,7 +1559,17 @@ entry2str_internal_size_attrlist( const Slapi_Attr *attrlist, int entry2str_ctrl /* ";adcsn-" + a->a_deletioncsn */ if ( a->a_deletioncsn ) { - elen+= 1 + LDIF_CSNPREFIX_MAXLENGTH + CSN_STRSIZE; + elen += 1 + LDIF_CSNPREFIX_MAXLENGTH + CSN_STRSIZE; + } + if ( valueset_isempty(&a->a_deleted_values)) { + /* this means the entry is deleted and has no more attributes, + * when writing the attr to disk we would loose the AD-csn. + * Add an empty value to the set of deleted values. This will + * never be seen by any client. It will never be moved to the + * present values and is only used to preserve the AD-csn + * We need to add the size for that. + */ + elen += 1 + LDIF_CSNPREFIX_MAXLENGTH + CSN_STRSIZE; } } } @@ -1731,7 +1741,7 @@ entry2str_internal_put_attrlist( const Slapi_Attr *attrlist, int attr_state, int * present values and is only used to preserve the AD-csn */ valueset_add_string ((Slapi_ValueSet *)&a->a_deleted_values, "", CSN_TYPE_VALUE_DELETED, a->a_deletioncsn); - } + } entry2str_internal_put_valueset(a->a_type, a->a_deletioncsn, CSN_TYPE_ATTRIBUTE_DELETED, attr_state, &a->a_deleted_values, VALUE_DELETED, ecur, typebuf, typebuf_len, entry2str_ctrl); }
0
1747f910df2e95977ad8f2d354c25a2760e5a64b
389ds/389-ds-base
Issue 50689 - Failed db restore task does not report an error Bug Description: If you have a back up that contains a backend that is not configured the restore fails, but a success return code is returned to the client. This happens becuase the return code gets overwritten after the failure. Fix Description: Preserve the error code upon failure and properly update the task exit code. relates: https://pagure.io/389-ds-base/issue/50689 Reviewed by: tboardaz & lkrispen(Thanks!!) Never rewrite the orginal error code
commit 1747f910df2e95977ad8f2d354c25a2760e5a64b Author: Mark Reynolds <[email protected]> Date: Mon Nov 4 15:45:55 2019 -0500 Issue 50689 - Failed db restore task does not report an error Bug Description: If you have a back up that contains a backend that is not configured the restore fails, but a success return code is returned to the client. This happens becuase the return code gets overwritten after the failure. Fix Description: Preserve the error code upon failure and properly update the task exit code. relates: https://pagure.io/389-ds-base/issue/50689 Reviewed by: tboardaz & lkrispen(Thanks!!) Never rewrite the orginal error code diff --git a/dirsrvtests/tests/suites/backups/backup_test.py b/dirsrvtests/tests/suites/backups/backup_test.py new file mode 100644 index 000000000..e93891449 --- /dev/null +++ b/dirsrvtests/tests/suites/backups/backup_test.py @@ -0,0 +1,73 @@ +import logging +import pytest +import os +from datetime import datetime +from lib389._constants import DEFAULT_SUFFIX, INSTALL_LATEST_CONFIG +from lib389.properties import BACKEND_SAMPLE_ENTRIES, TASK_WAIT +from lib389.topologies import topology_st as topo +from lib389.backend import Backend +from lib389.tasks import BackupTask, RestoreTask + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + + +def test_missing_backend(topo): + """Test that an error is returned when a restore is performed for a + backend that is no longer present. + + :id: 889b8028-35cf-41d7-91f6-bc5193683646 + :setup: Standalone Instance + :steps: + 1. Create a second backend + 2. Perform a back up + 3. Remove one of the backends from the config + 4. Perform a restore + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Failure + """ + + # Create a new backend + BE_NAME = 'backupRoot' + BE_SUFFIX = 'dc=back,dc=up' + props = { + 'cn': BE_NAME, + 'nsslapd-suffix': BE_SUFFIX, + BACKEND_SAMPLE_ENTRIES: INSTALL_LATEST_CONFIG + } + be = Backend(topo.standalone) + backend_entry = be.create(properties=props) + + # perform backup + backup_dir_name = "backup-%s" % datetime.now().strftime("%Y_%m_%d_%H_%M_%S") + archive = os.path.join(topo.standalone.ds_paths.backup_dir, backup_dir_name) + backup_task = BackupTask(topo.standalone) + task_properties = {'nsArchiveDir': archive} + backup_task.create(properties=task_properties) + backup_task.wait() + assert backup_task.get_exit_code() == 0 + + # Remove new backend + backend_entry.delete() + + # Restore the backup - it should fail + restore_task = RestoreTask(topo.standalone) + task_properties = {'nsArchiveDir': archive} + restore_task.create(properties=task_properties) + restore_task.wait() + assert restore_task.get_exit_code() != 0 + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main(["-s", CURRENT_FILE]) + diff --git a/ldap/servers/slapd/back-ldbm/archive.c b/ldap/servers/slapd/back-ldbm/archive.c index e103395a3..4eb0eb593 100644 --- a/ldap/servers/slapd/back-ldbm/archive.c +++ b/ldap/servers/slapd/back-ldbm/archive.c @@ -19,14 +19,14 @@ int ldbm_back_archive2ldbm(Slapi_PBlock *pb) { struct ldbminfo *li; - char *rawdirectory = NULL; /* -a <directory> */ - char *directory = NULL; /* normalized */ - int return_value = -1; - int task_flags = 0; - int run_from_cmdline = 0; Slapi_Task *task; - int is_old_to_new = 0; ldbm_instance *inst = NULL; + char *rawdirectory = NULL; /* -a <directory> */ + char *directory = NULL; /* normalized */ + int32_t return_value = -1; + int32_t task_flags = 0; + int32_t run_from_cmdline = 0; + int32_t is_old_to_new = 0; slapi_pblock_get(pb, SLAPI_PLUGIN_PRIVATE, &li); slapi_pblock_get(pb, SLAPI_SEQ_VAL, &rawdirectory); @@ -165,8 +165,7 @@ ldbm_back_archive2ldbm(Slapi_PBlock *pb) "the backup set. error=%d (%s)\n", return_value, dblayer_strerror(return_value)); if (task) { - slapi_task_log_notice(task, "Failed to read the backup file set " - "from %s", + slapi_task_log_notice(task, "Failed to read the backup file set from %s", directory); } } @@ -177,7 +176,7 @@ ldbm_back_archive2ldbm(Slapi_PBlock *pb) char *p; char c; char *bakup_dir = NULL; - int skipinit = SLAPI_UPGRADEDB_SKIPINIT; + int32_t skipinit = SLAPI_UPGRADEDB_SKIPINIT; p = strrchr(directory, '/'); if (NULL == p) { @@ -204,25 +203,26 @@ ldbm_back_archive2ldbm(Slapi_PBlock *pb) return_value = ldbm_back_upgradedb(pb); } } else { - ldbm_instance *inst; Object *inst_obj; - int ret; + int32_t ret; if (0 != return_value) { - /* error case (607331) - * just to go back to the previous state if possible */ - if ((return_value = dblayer_start(li, DBLAYER_NORMAL_MODE))) { + /* + * error case (607331) + * just to go back to the previous state if possible (preserve + * original error for now) + */ + if ((ret = dblayer_start(li, DBLAYER_NORMAL_MODE))) { slapi_log_err(SLAPI_LOG_ERR, "ldbm_back_archive2ldbm", "Unable to to start database in [%s]\n", li->li_directory); if (task) { - slapi_task_log_notice(task, "Failed to start the database in " - "%s", + slapi_task_log_notice(task, "Failed to start the database in %s", li->li_directory); } - goto out; } } + /* bring all backends and changelog back online */ plugin_call_plugins(pb, SLAPI_PLUGIN_BE_POST_OPEN_FN); for (inst_obj = objset_first_obj(li->li_instance_set); inst_obj; @@ -234,8 +234,7 @@ ldbm_back_archive2ldbm(Slapi_PBlock *pb) "ldbm_back_archive2ldbm", "Unable to restart '%s'\n", inst->inst_name); if (task) { - slapi_task_log_notice(task, "Unable to restart '%s'", - inst->inst_name); + slapi_task_log_notice(task, "Unable to restart '%s'", inst->inst_name); } } else { slapi_mtn_be_enable(inst->inst_be); @@ -243,6 +242,7 @@ ldbm_back_archive2ldbm(Slapi_PBlock *pb) } } } + out: if (run_from_cmdline && (0 == return_value)) { dblayer_restore_file_update(li, directory); diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c index 8a51aedd8..55a9f2582 100644 --- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c +++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c @@ -5294,8 +5294,7 @@ bdb_restore(struct ldbminfo *li, char *src_dir, Slapi_Task *task) "exist.\n", src_dir); if (task) { - slapi_task_log_notice(task, "Restore: backup directory %s does not " - "exist.\n", + slapi_task_log_notice(task, "Restore: backup directory %s does not exist.", src_dir); } return LDAP_UNWILLING_TO_PERFORM; @@ -5304,8 +5303,7 @@ bdb_restore(struct ldbminfo *li, char *src_dir, Slapi_Task *task) "a directory.\n", src_dir); if (task) { - slapi_task_log_notice(task, "Restore: backup directory %s is not " - "a directory.\n", + slapi_task_log_notice(task, "Restore: backup directory %s is not a directory.", src_dir); } return LDAP_UNWILLING_TO_PERFORM; @@ -5345,12 +5343,13 @@ bdb_restore(struct ldbminfo *li, char *src_dir, Slapi_Task *task) inst = ldbm_instance_find_by_name(li, (char *)direntry->name); if (inst == NULL) { slapi_log_err(SLAPI_LOG_ERR, - "bdb_restore", "Target server has no %s configured\n", + "bdb_restore", "Target server has no backend (%s) configured\n", direntry->name); if (task) { slapi_task_log_notice(task, - "bdb_restore - Target server has no %s configured\n", + "bdb_restore - Target server has no backend (%s) configured", direntry->name); + slapi_task_cancel(task, LDAP_UNWILLING_TO_PERFORM); } PR_CloseDir(dirhandle); return_value = LDAP_UNWILLING_TO_PERFORM; @@ -5363,7 +5362,7 @@ bdb_restore(struct ldbminfo *li, char *src_dir, Slapi_Task *task) src_dir, inst->inst_parent_dir_name); if (task) { slapi_task_log_notice(task, - "Restore: backup dir %s and target dir %s are identical\n", + "Restore: backup dir %s and target dir %s are identical", src_dir, inst->inst_parent_dir_name); } PR_CloseDir(dirhandle); @@ -5398,7 +5397,7 @@ bdb_restore(struct ldbminfo *li, char *src_dir, Slapi_Task *task) "bdb_restore", "Failed to open the directory \"%s\"\n", real_src_dir); if (task) { slapi_task_log_notice(task, - "Restore: failed to open the directory \"%s\"\n", real_src_dir); + "Restore: failed to open the directory \"%s\"", real_src_dir); } return_value = -1; goto error_out; @@ -5431,7 +5430,7 @@ bdb_restore(struct ldbminfo *li, char *src_dir, Slapi_Task *task) changelogdir); if (task) { slapi_task_log_notice(task, - "Restore: broken changelog dir path %s\n", + "Restore: broken changelog dir path %s", changelogdir); } goto error_out; @@ -5540,8 +5539,7 @@ bdb_restore(struct ldbminfo *li, char *src_dir, Slapi_Task *task) char *dataversion = NULL; if (bdb_version_read(li, home_dir, &ldbmversion, &dataversion) != 0) { - slapi_log_err(SLAPI_LOG_WARNING, "bdb_restore", "Unable to read dbversion " - "file in %s\n", + slapi_log_err(SLAPI_LOG_WARNING, "bdb_restore", "Unable to read dbversion file in %s\n", home_dir); } else { adjust_idl_switch(ldbmversion, li); diff --git a/src/lib389/lib389/cli_conf/backup.py b/src/lib389/lib389/cli_conf/backup.py index 1e73fbadb..c53a39b94 100644 --- a/src/lib389/lib389/cli_conf/backup.py +++ b/src/lib389/lib389/cli_conf/backup.py @@ -26,11 +26,12 @@ def backup_restore(inst, basedn, log, args): task = inst.restore_online(archive=args.archive, db_type=args.db_type) task.wait() result = task.get_exit_code() + task_log = task.get_task_log() if task.is_complete() and result == 0: log.info("The backup restore task has finished successfully") else: - raise ValueError("The backup restore task has failed with the error code: ({})".format(result)) + raise ValueError("The backup restore task has failed with the error code: {}\n{}".format(result, task_log)) def create_parser(subparsers):
0
238d8fea48787f8693798447b566e44478fbae0c
389ds/389-ds-base
Ticket #47746 - ldap/servers/slapd/back-ldbm/dblayer.c: possible minor problem with sscanf Description: Warning in read_metadata (dblayer.c): > sscanf(thisline,"%[a-z]%c%s",attribute,&delimiter,value); (warning) scanf without field width limits can crash with huge input data. Adding the size of the buffer to the sscanf format. Thanks to dcb for reporting the bug and fix. Reviewed by [email protected].
commit 238d8fea48787f8693798447b566e44478fbae0c Author: Noriko Hosoi <[email protected]> Date: Mon Jul 14 18:11:00 2014 -0700 Ticket #47746 - ldap/servers/slapd/back-ldbm/dblayer.c: possible minor problem with sscanf Description: Warning in read_metadata (dblayer.c): > sscanf(thisline,"%[a-z]%c%s",attribute,&delimiter,value); (warning) scanf without field width limits can crash with huge input data. Adding the size of the buffer to the sscanf format. Thanks to dcb for reporting the bug and fix. Reviewed by [email protected]. diff --git a/ldap/servers/slapd/back-ldbm/dblayer.c b/ldap/servers/slapd/back-ldbm/dblayer.c index 4f388457c..f3159a9cd 100644 --- a/ldap/servers/slapd/back-ldbm/dblayer.c +++ b/ldap/servers/slapd/back-ldbm/dblayer.c @@ -5302,7 +5302,7 @@ static int read_metadata(struct ldbminfo *li) nextline++; } } - sscanf(thisline,"%[a-z]%c%s",attribute,&delimiter,value); + sscanf(thisline,"%512[a-z]%c%128s",attribute,&delimiter,value); if (0 == strcmp("cachesize",attribute)) { priv->dblayer_previous_cachesize = strtoul(value, NULL, 10); } else if (0 == strcmp("ncache",attribute)) {
0
e873a84559ad23cec56370b3d8511f658d476d99
389ds/389-ds-base
Bug 612264 - ACI issue with (targetattr='userPassword') If an ACI has a targetattr of userPassword and uses the USERDN keyword, the ACI may not be evaluated correctly for password change operations. This is caused by the fact that we use a dummy target entry to check if the pasword change is allowed early in the operation. This dummy entry will not have any attributes that the ACI may use. The fix is to actually fetch the target entry with all of it's attributes. We still create a dummy entry if the target doesn't exist to prevent returning a no such entry error when we should be returning an access denied or insufficient access error.
commit e873a84559ad23cec56370b3d8511f658d476d99 Author: Nathan Kinder <[email protected]> Date: Wed Sep 1 10:13:13 2010 -0700 Bug 612264 - ACI issue with (targetattr='userPassword') If an ACI has a targetattr of userPassword and uses the USERDN keyword, the ACI may not be evaluated correctly for password change operations. This is caused by the fact that we use a dummy target entry to check if the pasword change is allowed early in the operation. This dummy entry will not have any attributes that the ACI may use. The fix is to actually fetch the target entry with all of it's attributes. We still create a dummy entry if the target doesn't exist to prevent returning a no such entry error when we should be returning an access denied or insufficient access error. diff --git a/ldap/servers/slapd/modify.c b/ldap/servers/slapd/modify.c index 55308a309..3e5b19e3a 100644 --- a/ldap/servers/slapd/modify.c +++ b/ldap/servers/slapd/modify.c @@ -978,11 +978,16 @@ static int op_shared_allow_pw_change (Slapi_PBlock *pb, LDAPMod *mod, char **old mods[0] = mod; mods[1] = NULL; - /* Create a bogus entry with just the target dn. This will - * only be used for checking the ACIs. */ - e = slapi_entry_alloc(); - slapi_entry_init( e, NULL, NULL ); - slapi_sdn_set_dn_byref(slapi_entry_get_sdn(e), dn); + /* We need to actually fetch the target here to use for ACI checking. */ + slapi_search_internal_get_entry(&sdn, NULL, &e, (void *)plugin_get_default_component_id()); + + /* Create a bogus entry with just the target dn if we were unable to + * find the actual entry. This will only be used for checking the ACIs. */ + if (e == NULL) { + e = slapi_entry_alloc(); + slapi_entry_init( e, NULL, NULL ); + slapi_sdn_set_dn_byref(slapi_entry_get_sdn(e), dn); + } /* Set the backend in the pblock. The slapi_access_allowed function * needs this set to work properly. */
0
ec4a669cd38bebe90abd3e0c9fb1d6ecd35ef153
389ds/389-ds-base
ticket 49551 - correctly handle subordinates and tombstone numsubordinates Additional fix for a case where an ADD is directly turned into a tombstone and teh tombstone is resurrected as conflict Reviewed by: Mark, thanks
commit ec4a669cd38bebe90abd3e0c9fb1d6ecd35ef153 Author: Ludwig Krispenz <[email protected]> Date: Thu Feb 22 15:26:35 2018 +0100 ticket 49551 - correctly handle subordinates and tombstone numsubordinates Additional fix for a case where an ADD is directly turned into a tombstone and teh tombstone is resurrected as conflict Reviewed by: Mark, thanks diff --git a/ldap/servers/slapd/back-ldbm/ldbm_add.c b/ldap/servers/slapd/back-ldbm/ldbm_add.c index f0a3262ec..412e1d394 100644 --- a/ldap/servers/slapd/back-ldbm/ldbm_add.c +++ b/ldap/servers/slapd/back-ldbm/ldbm_add.c @@ -849,7 +849,12 @@ ldbm_back_add(Slapi_PBlock *pb) subordinate count specifically */ if (parententry) { int op = is_resurect_operation ? PARENTUPDATE_RESURECT : PARENTUPDATE_ADD; - if (is_cenotaph_operation ) op |= PARENTUPDATE_CREATE_TOMBSTONE; + if (is_cenotaph_operation || (is_tombstone_operation && !is_ruv)) { + /* if we directly add a tombstone the tombstone numsubordinates have to be increased + * (does not apply to adding the RUV) + */ + op |= PARENTUPDATE_CREATE_TOMBSTONE; + } retval = parent_update_on_childchange(&parent_modify_c, op, NULL); slapi_log_err(SLAPI_LOG_BACKLDBM, "ldbm_back_add", "conn=%lu op=%d parent_update_on_childchange: old_entry=0x%p, new_entry=0x%p, rc=%d\n", diff --git a/ldap/servers/slapd/back-ldbm/parents.c b/ldap/servers/slapd/back-ldbm/parents.c index 1afc795c0..4583885f1 100644 --- a/ldap/servers/slapd/back-ldbm/parents.c +++ b/ldap/servers/slapd/back-ldbm/parents.c @@ -141,9 +141,11 @@ parent_update_on_childchange(modify_context *mc, int op, size_t *new_sub_count) } /* tombstoneNumSubordinates has to be updated if a tombstone child has been - * deleted or a tombstone has been directly added (cenotaph) */ + * deleted or a tombstone has been directly added (cenotaph) + * or a tombstone is resurrected + */ current_sub_count = LDAP_MAXINT; - if (repl_op) { + if (repl_op || (PARENTUPDATE_RESURECT == op)) { ret = slapi_entry_attr_find(mc->old_entry->ep_entry, tombstone_numsubordinates, &read_attr); if (0 == ret) {
0
17a56dbb676bbf4a4fd4f752eaa5bda616c4696c
389ds/389-ds-base
Ticket 48846 - Rlimit checks should detect RLIM_INFINITY Bug Description: On certain platforms (32 bit) it was possible for dsktune and other checks to fail as rlim.cur and rlim.max was -1. This would result in a pattern of rlim.cur < some base limit, failing, even though rlimit implied infinite. This did not affect 64bit platforms as rlim_infinity for them is uint64 max. Fix Description: Fix our various checks to look for and detect RLIM_INFINITY when set correctly. https://fedorahosted.org/389/ticket/48846 Author: wibrown Review by: nhosoi (Thanks!)
commit 17a56dbb676bbf4a4fd4f752eaa5bda616c4696c Author: William Brown <[email protected]> Date: Fri May 20 10:20:16 2016 +1000 Ticket 48846 - Rlimit checks should detect RLIM_INFINITY Bug Description: On certain platforms (32 bit) it was possible for dsktune and other checks to fail as rlim.cur and rlim.max was -1. This would result in a pattern of rlim.cur < some base limit, failing, even though rlimit implied infinite. This did not affect 64bit platforms as rlim_infinity for them is uint64 max. Fix Description: Fix our various checks to look for and detect RLIM_INFINITY when set correctly. https://fedorahosted.org/389/ticket/48846 Author: wibrown Review by: nhosoi (Thanks!) diff --git a/ldap/servers/slapd/util.c b/ldap/servers/slapd/util.c index b0fd73bc4..e7b02fa24 100644 --- a/ldap/servers/slapd/util.c +++ b/ldap/servers/slapd/util.c @@ -1571,6 +1571,7 @@ int util_info_sys_pages(size_t *pagesize, size_t *pages, size_t *procpages, size size_t vmsize = 0; size_t freesize = 0; + size_t rlimsize = 0; *pagesize = getpagesize(); @@ -1632,8 +1633,14 @@ int util_info_sys_pages(size_t *pagesize, size_t *pages, size_t *procpages, size freesize /= (*pagesize / 1024); /* procpages is now in kb not pages... */ *procpages /= (*pagesize / 1024); - /* This is in bytes, make it pages */ - *availpages = util_getvirtualmemsize() / *pagesize; + + rlimsize = util_getvirtualmemsize(); + /* On a 64 bit system, this is uint64 max, but on 32 it's -1 */ + /* Either way, we should be ignoring it at this point if it's infinite */ + if (rlimsize != RLIM_INFINITY) { + /* This is in bytes, make it pages */ + rlimsize = rlimsize / *pagesize; + } /* Now we have vmsize, the availpages from getrlimit, our freesize */ vmsize /= (*pagesize / 1024); @@ -1655,19 +1662,25 @@ int util_info_sys_pages(size_t *pagesize, size_t *pages, size_t *procpages, size * - getrlimit (availpages) * - freesize */ - LDAPDebug(LDAP_DEBUG_TRACE,"util_info_sys_pages pages=%lu, getrlim=%lu, freesize=%lu\n", - (unsigned long)*pages, (unsigned long)*availpages, (unsigned long)freesize); - if (*pages < *availpages && *pages < freesize) { + if (rlimsize == RLIM_INFINITY) { + LDAPDebug(LDAP_DEBUG_TRACE,"util_info_sys_pages pages=%lu, getrlim=RLIM_INFINITY, freesize=%lu\n", + (unsigned long)*pages, (unsigned long)freesize, 0); + } else { + LDAPDebug(LDAP_DEBUG_TRACE,"util_info_sys_pages pages=%lu, getrlim=%lu, freesize=%lu\n", + (unsigned long)*pages, (unsigned long)*availpages, (unsigned long)freesize); + } + + if (rlimsize != RLIM_INFINITY && rlimsize < freesize && rlimsize < *pages) { + LDAPDebug(LDAP_DEBUG_TRACE,"util_info_sys_pages using getrlim for availpages \n",0,0,0); + *availpages = rlimsize; + } else if (*pages < freesize) { LDAPDebug(LDAP_DEBUG_TRACE,"util_info_sys_pages using pages for availpages \n",0,0,0); *availpages = *pages; - } else if ( freesize < *pages && freesize < *availpages ) { + } else { LDAPDebug(LDAP_DEBUG_TRACE,"util_info_sys_pages using freesize for availpages \n",0,0,0); *availpages = freesize; - } else { - LDAPDebug(LDAP_DEBUG_TRACE,"util_info_sys_pages using getrlim for availpages \n",0,0,0); } - } #endif /* linux */ diff --git a/ldap/systools/idsktune.c b/ldap/systools/idsktune.c index 6a24b0ee1..c7e76e73c 100644 --- a/ldap/systools/idsktune.c +++ b/ldap/systools/idsktune.c @@ -2380,7 +2380,12 @@ static void disk_tests(void) if (flag_debug) { printf("DEBUG : RLIMIT_CORE is %ld, %ld\n", r.rlim_cur, r.rlim_max); } - if (r.rlim_cur == (unsigned long)-1 || r.rlim_cur >= 2147483647) { + +#if defined(RLIM_INFINITY) + if (r.rlim_cur == RLIM_INFINITY || r.rlim_cur >= 2147483647) { +#else + if (r.rlim_cur >= 2147483647) { +#endif if (swap_mb <2048) { max_core = swap_mb; } else { @@ -2482,7 +2487,7 @@ static void check_mem_size(int ro,char *rn) rprev = r.rlim_cur; r.rlim_cur = r.rlim_max; setrlimit(ro,&r); - getrlimit(ro,&r); + getrlimit(ro,&r); if (flag_debug) printf("DEBUG : %s (%d) max %d prev %d.\n", rn, ro, (int)r.rlim_cur, rprev); @@ -2490,11 +2495,17 @@ static void check_mem_size(int ro,char *rn) if (r.rlim_cur <= 0L) { return; } -#endif +#endif if (r.rlim_cur <= 0) { return; } +#if defined(RLIM_INFINITY) + if (r.rlim_cur == RLIM_INFINITY) { + return; + } +#endif + m_mb = r.rlim_cur / 1048576; if (m_mb < mem_min) { /* 64 MB */ @@ -2522,7 +2533,11 @@ static void limits_tests(void) #if defined(RLIMIT_NOFILE) getrlimit(RLIMIT_NOFILE,&r); +#if defined(RLIM_INFINITY) + if (r.rlim_max <= 1024 && r.rlim_max != RLIM_INFINITY) { +#else if (r.rlim_max <= 1024) { +#endif if (flag_html) printf("<P>\n"); if (flag_carrier) { @@ -2568,7 +2583,11 @@ static void limits_tests(void) } } +#if defined(RLIM_INFINITY) + if (r.rlim_cur <= 1024 && r.rlim_max != RLIM_INFINITY) { +#else if (r.rlim_cur <= 1024) { +#endif if (flag_html) { printf("<P>\n"); }
0