commit_id
string
repo
string
commit_message
string
diff
string
label
int64
ac8ceb91e42c31672147018d9d654879970dd38d
389ds/389-ds-base
Ticket 628 - crash in aci evaluation Bug Description: When trying to get effective rights on a entry that does not exist, can lead to a crash from dereferencing a NULL pointer. Fix Description: Check for NULL entry pointers and return the appropriate error. https://fedorahosted.org/389/ticket/628 Reviewed by: nhosoi(Thanks Noriko!)
commit ac8ceb91e42c31672147018d9d654879970dd38d Author: Mark Reynolds <[email protected]> Date: Mon Mar 25 10:35:31 2013 -0400 Ticket 628 - crash in aci evaluation Bug Description: When trying to get effective rights on a entry that does not exist, can lead to a crash from dereferencing a NULL pointer. Fix Description: Check for NULL entry pointers and return the appropriate error. https://fedorahosted.org/389/ticket/628 Reviewed by: nhosoi(Thanks Noriko!) diff --git a/ldap/servers/slapd/entry.c b/ldap/servers/slapd/entry.c index 17af1bfc7..d7dbf61e3 100644 --- a/ldap/servers/slapd/entry.c +++ b/ldap/servers/slapd/entry.c @@ -2363,7 +2363,11 @@ slapi_entry_next_attr( const Slapi_Entry *e, Slapi_Attr *prevattr, Slapi_Attr ** int slapi_entry_attr_find( const Slapi_Entry *e, const char *type, Slapi_Attr **a ) { - int r= -1; + int r = -1; + + if(e == NULL){ + return r; + } *a = attrlist_find( e->e_attrs, type ); if (*a != NULL) { @@ -2374,11 +2378,11 @@ slapi_entry_attr_find( const Slapi_Entry *e, const char *type, Slapi_Attr **a ) * Our state information storage scheme can cause this, since * we have to hang onto the deleted value state information. */ - *a= NULL; + *a = NULL; } else { - r= 0; + r = 0; } } return r; @@ -3026,18 +3030,20 @@ slapi_entry_attr_has_syntax_value(const Slapi_Entry *e, const char *type, const Slapi_Value *value) { - int r= 0; - Slapi_Attr *attr; + int r = 0; + Slapi_Attr *attr; + if(e == NULL){ + return r; + } if(slapi_entry_attr_find(e, type, &attr)==0) { - const struct berval *bv = slapi_value_get_berval(value); + const struct berval *bv = slapi_value_get_berval(value); - if ( bv != NULL) { - r = (slapi_attr_value_find(attr, bv) == 0); - } - - } + if ( bv != NULL) { + r = (slapi_attr_value_find(attr, bv) == 0); + } + } return r; }
0
5d9b48948f980619901a3653d092c8dc655231bb
389ds/389-ds-base
610281 - fix coverity Defect Type: Control flow issues https://bugzilla.redhat.com/show_bug.cgi?id=610281 11809 DEADCODE Triaged Unassigned Bug Minor Ignore _replica_reap_tombstones() ds/ldap/servers/plugins/replication/repl5_replica.c Comment: If replica_name is NULL, _replica_reap_tombstones has returned at the line 2460. Thus there is no need to check "replica_name" is NULL or not at line 2555. 11810 DEADCODE Triaged Unassigned Bug Moderate Fix Required replica_check_for_data_reload() ds/ldap/servers/plugins/replication/repl5_replica.c Comment: At the line 1478, !cl_cover_be is always true. Therefore, there is no possibility that "<" is chosen in slapi_log_error.
commit 5d9b48948f980619901a3653d092c8dc655231bb Author: Noriko Hosoi <[email protected]> Date: Fri Jul 2 16:04:39 2010 -0700 610281 - fix coverity Defect Type: Control flow issues https://bugzilla.redhat.com/show_bug.cgi?id=610281 11809 DEADCODE Triaged Unassigned Bug Minor Ignore _replica_reap_tombstones() ds/ldap/servers/plugins/replication/repl5_replica.c Comment: If replica_name is NULL, _replica_reap_tombstones has returned at the line 2460. Thus there is no need to check "replica_name" is NULL or not at line 2555. 11810 DEADCODE Triaged Unassigned Bug Moderate Fix Required replica_check_for_data_reload() ds/ldap/servers/plugins/replication/repl5_replica.c Comment: At the line 1478, !cl_cover_be is always true. Therefore, there is no possibility that "<" is chosen in slapi_log_error. diff --git a/ldap/servers/plugins/replication/repl5_replica.c b/ldap/servers/plugins/replication/repl5_replica.c index 0d2f19561..5042f322c 100644 --- a/ldap/servers/plugins/replication/repl5_replica.c +++ b/ldap/servers/plugins/replication/repl5_replica.c @@ -1476,11 +1476,11 @@ int replica_check_for_data_reload (Replica *r, void *arg) /* We can't use existing changelog - remove existing file */ slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name, "replica_check_for_data_reload: " - "Warning: data for replica %s was reloaded and it no longer matches the data " + "Warning: data for replica %s was reloaded and it no longer matches the data " "in the changelog (replica data %s changelog). Recreating the changelog file. This could affect replication " "with replica's consumers in which case the consumers should be reinitialized.\n", escape_string(slapi_sdn_get_dn(r->repl_root),ebuf), - ((!be_cover_cl && !cl_cover_be) ? "<>" : (!be_cover_cl ? "<" : ">")) ); + ((!be_cover_cl) ? "<>" : ">") ); rc = cl5DeleteDBSync (r_obj); @@ -2554,7 +2554,7 @@ _replica_reap_tombstones(void *arg) { slapi_log_error(SLAPI_LOG_REPL, repl_plugin_name, "Info: No purge CSN for tombstone reap for replica %s.\n", - replica_name ? replica_name : "(null)"); + replica_name); } done:
0
9f88491132cb2d1c5f11fd5475e1a6826e1dc7ee
389ds/389-ds-base
Bug 553027 - Support for nsUniqueId and alias in Retro Changelog. This patch adds support for storing the target's nsUniqueId in change log record and renaming any additional attribute using an alias.
commit 9f88491132cb2d1c5f11fd5475e1a6826e1dc7ee Author: Endi S. Dewata <[email protected]> Date: Tue Jan 12 14:43:08 2010 -0600 Bug 553027 - Support for nsUniqueId and alias in Retro Changelog. This patch adds support for storing the target's nsUniqueId in change log record and renaming any additional attribute using an alias. diff --git a/ldap/servers/plugins/retrocl/retrocl.c b/ldap/servers/plugins/retrocl/retrocl.c index 836361a0b..c586e378e 100644 --- a/ldap/servers/plugins/retrocl/retrocl.c +++ b/ldap/servers/plugins/retrocl/retrocl.c @@ -79,6 +79,7 @@ Slapi_Backend *retrocl_be_changelog = NULL; PRLock *retrocl_internal_lock = NULL; int retrocl_nattributes = 0; char **retrocl_attributes = NULL; +char **retrocl_aliases = NULL; /* ----------------------------- Retrocl Plugin */ @@ -302,6 +303,7 @@ static int retrocl_start (Slapi_PBlock *pb) static int retrocl_started = 0; int rc = 0; Slapi_Entry *e = NULL; + char **values = NULL; if (retrocl_started) { return rc; @@ -323,12 +325,49 @@ static int retrocl_start (Slapi_PBlock *pb) return -1; } - retrocl_attributes = slapi_entry_attr_get_charray(e, "nsslapd-attribute"); - if (retrocl_attributes != NULL) { + values = slapi_entry_attr_get_charray(e, "nsslapd-attribute"); + if (values != NULL) { + int n = 0; + int i = 0; + + slapi_log_error(SLAPI_LOG_PLUGIN, RETROCL_PLUGIN_NAME, "nsslapd-attribute:\n"); + + for (n=0; values && values[n]; n++) { + slapi_log_error(SLAPI_LOG_PLUGIN, RETROCL_PLUGIN_NAME, " - %s\n", values[n]); + } + + retrocl_nattributes = n; + + retrocl_attributes = (char **)slapi_ch_calloc(n, sizeof(char *)); + retrocl_aliases = (char **)slapi_ch_calloc(n, sizeof(char *)); + slapi_log_error(SLAPI_LOG_PLUGIN, RETROCL_PLUGIN_NAME, "Attributes:\n"); - for (retrocl_nattributes=0; retrocl_attributes && retrocl_attributes[retrocl_nattributes]; retrocl_nattributes++) { - slapi_log_error(SLAPI_LOG_PLUGIN, RETROCL_PLUGIN_NAME, " - %s\n", retrocl_attributes[retrocl_nattributes]); + + for (i=0; i<n; i++) { + char *value = values[i]; + size_t length = strlen(value); + + char *pos = strchr(value, ':'); + if (pos == NULL) { + retrocl_attributes[i] = slapi_ch_strdup(value); + retrocl_aliases[i] = NULL; + + slapi_log_error(SLAPI_LOG_PLUGIN, RETROCL_PLUGIN_NAME, " - %s\n", + retrocl_attributes[i]); + + } else { + retrocl_attributes[i] = slapi_ch_malloc(pos-value+1); + strncpy(retrocl_attributes[i], value, pos-value); + + retrocl_aliases[i] = slapi_ch_malloc(value+length-pos); + strcpy(retrocl_aliases[i], pos+1); + + slapi_log_error(SLAPI_LOG_PLUGIN, RETROCL_PLUGIN_NAME, " - %s [%s]\n", + retrocl_attributes[i], retrocl_aliases[i]); + } } + + slapi_ch_array_free(values); } retrocl_started = 1; @@ -352,6 +391,7 @@ static int retrocl_stop (Slapi_PBlock *pb) int rc = 0; slapi_ch_array_free(retrocl_attributes); + slapi_ch_array_free(retrocl_aliases); retrocl_stop_trimming(); retrocl_be_changelog = NULL; diff --git a/ldap/servers/plugins/retrocl/retrocl.h b/ldap/servers/plugins/retrocl/retrocl.h index 5c0cad1c7..c4354d47e 100644 --- a/ldap/servers/plugins/retrocl/retrocl.h +++ b/ldap/servers/plugins/retrocl/retrocl.h @@ -115,6 +115,7 @@ extern void* g_plg_identity [PLUGIN_MAX]; extern Slapi_Backend *retrocl_be_changelog; extern int retrocl_nattributes; extern char** retrocl_attributes; +extern char** retrocl_aliases; extern const char *attr_changenumber; extern const char *attr_targetdn; @@ -125,6 +126,7 @@ extern const char *attr_deleteoldrdn; extern const char *attr_changes; extern const char *attr_changetime; extern const char *attr_objectclass; +extern const char *attr_nsuniqueid; extern const char *attr_isreplicated; extern PRLock *retrocl_internal_lock; diff --git a/ldap/servers/plugins/retrocl/retrocl_po.c b/ldap/servers/plugins/retrocl/retrocl_po.c index 93c677690..0e537eee7 100644 --- a/ldap/servers/plugins/retrocl/retrocl_po.c +++ b/ldap/servers/plugins/retrocl/retrocl_po.c @@ -64,6 +64,7 @@ const char *attr_changes = "changes"; const char *attr_newsuperior = "newsuperior"; const char *attr_changetime = "changetime"; const char *attr_objectclass = "objectclass"; +const char *attr_nsuniqueid = "nsuniqueid"; const char *attr_isreplicated = "isreplicated"; /* @@ -176,6 +177,7 @@ write_replog_db( Slapi_PBlock *newPb = NULL; changeNumber changenum; int i; + int extensibleObject = 0; PR_Lock(retrocl_internal_lock); changenum = retrocl_assign_changenumber(); @@ -207,24 +209,51 @@ write_replog_db( val.bv_len = 14; slapi_entry_add_values( e, "objectclass", vals ); - val.bv_val = "extensibleObject"; - val.bv_len = 16; - slapi_entry_add_values( e, "objectclass", vals ); - for ( i=0; i<retrocl_nattributes; i++ ) { char* attributeName = retrocl_attributes[i]; + char* attributeAlias = retrocl_aliases[i]; + + if ( attributeAlias == NULL ) { + attributeAlias = attributeName; + } + + if ( strcasecmp( attributeName, attr_nsuniqueid ) == 0 ) { + Slapi_Entry *entry = NULL; + char *uniqueId = NULL; + + slapi_pblock_get( pb, SLAPI_ENTRY_POST_OP, &entry ); + if ( entry == NULL ) { + slapi_pblock_get( pb, SLAPI_ENTRY_PRE_OP, &entry ); + } + + uniqueId = slapi_entry_get_uniqueid( entry ); - if ( strcasecmp( attributeName, attr_isreplicated ) == 0 ) { + slapi_log_error( SLAPI_LOG_PLUGIN, RETROCL_PLUGIN_NAME, + "write_replog_db: add %s: \"%s\"\n", attributeAlias, uniqueId ); + + val.bv_val = uniqueId; + val.bv_len = strlen( uniqueId ); + + slapi_entry_add_values( e, attributeAlias, vals ); + + extensibleObject = 1; + + } else if ( strcasecmp( attributeName, attr_isreplicated ) == 0 ) { int isReplicated = 0; char *attributeValue = NULL; slapi_pblock_get( pb, SLAPI_IS_REPLICATED_OPERATION, &isReplicated ); attributeValue = isReplicated ? "TRUE" : "FALSE"; + slapi_log_error( SLAPI_LOG_PLUGIN, RETROCL_PLUGIN_NAME, + "write_replog_db: add %s: \"%s\"\n", attributeAlias, attributeValue ); + val.bv_val = attributeValue; val.bv_len = strlen( attributeValue ); - slapi_entry_add_values( e, attributeName, vals ); + slapi_entry_add_values( e, attributeAlias, vals ); + + extensibleObject = 1; } else { Slapi_Entry *entry = NULL; @@ -250,11 +279,22 @@ write_replog_db( if ( valueSet == NULL ) continue; - slapi_entry_add_valueset( e, attributeName, valueSet ); + slapi_log_error( SLAPI_LOG_PLUGIN, RETROCL_PLUGIN_NAME, + "write_replog_db: add %s\n", attributeAlias ); + + slapi_entry_add_valueset( e, attributeAlias, valueSet ); slapi_vattr_values_free( &valueSet, &actual_type_name, buffer_flags ); + + extensibleObject = 1; } } + if ( extensibleObject ) { + val.bv_val = "extensibleObject"; + val.bv_len = 16; + slapi_entry_add_values( e, "objectclass", vals ); + } + /* Set the changeNumber attribute */ sprintf( chnobuf, "%lu", changenum ); val.bv_val = chnobuf;
0
babae553ed4912bfca8171b6b3b6a605fa4f3e8b
389ds/389-ds-base
[172411] Use system SASL on RHEL
commit babae553ed4912bfca8171b6b3b6a605fa4f3e8b Author: Noriko Hosoi <[email protected]> Date: Fri Nov 4 02:45:20 2005 +0000 [172411] Use system SASL on RHEL diff --git a/components.mk b/components.mk index 2ec85975e..008445d05 100644 --- a/components.mk +++ b/components.mk @@ -354,14 +354,17 @@ LIBLDAP = $(addprefix $(LDAP_LIBPATH)/, $(LDAPOBJNAME)) ### SASL package ########################################## -ifdef SASL_SOURCE_ROOT - SASL_LIBPATH = $(SASL_SOURCE_ROOT)/lib - SASL_BINPATH = $(SASL_SOURCE_ROOT)/bin - SASL_INCDIR = $(SASL_SOURCE_ROOT)/include +ifeq ($(ARCH), Linux) + SASL_LIBPATH = /usr/lib + SASL_INCDIR = /usr/include/sasl else - SASL_LIBPATH = $(SASL_BUILD_DIR)/lib - SASL_BINPATH = $(SASL_BUILD_DIR)/bin - SASL_INCDIR = $(SASL_BUILD_DIR)/include + ifdef SASL_SOURCE_ROOT + SASL_LIBPATH = $(SASL_SOURCE_ROOT)/lib + SASL_INCDIR = $(SASL_SOURCE_ROOT)/include + else + SASL_LIBPATH = $(SASL_BUILD_DIR)/lib + SASL_INCDIR = $(SASL_BUILD_DIR)/include + endif endif SASL_INCLUDE = $(SASL_INCDIR) @@ -374,7 +377,7 @@ else SASL_LIB_ROOT_NAME = sasl2 SASL_LIBS = lib$(SASL_LIB_ROOT_NAME).a ifeq ($(ARCH), Linux) - GSSAPI_LIBS=-L/usr/kerberos/lib -lgssapi_krb5 + GSSAPI_LIBS=-lgssapi_krb5 endif ifeq ($(ARCH), SOLARIS) GSSAPI_LIBS=-lgss diff --git a/internal_comp_deps.mk b/internal_comp_deps.mk index ece5cda3e..847c26ce3 100644 --- a/internal_comp_deps.mk +++ b/internal_comp_deps.mk @@ -244,6 +244,7 @@ endif endif # LDAPSDK_SOURCE_ROOT ifndef SASL_SOURCE_ROOT +ifneq ($(ARCH), Linux) #SASL_RELEASE = $(COMPONENTS_DIR_DEV)/sasl/$(SASL_VERSDIR)/$(SASL_RELDATE)/$(NSOBJDIR_NAME) SASL_RELEASE = $(COMPONENTS_DIR)/sasl/$(SASL_VERSDIR)/$(SASL_RELDATE)/$(NSOBJDIR_NAME) SASL_DEP = $(SASL_INCLUDE)/sasl.h @@ -255,14 +256,12 @@ $(SASL_DEP): $(NSCP_DISTDIR_FULL_RTL) ifdef COMPONENT_DEPS $(FTP_PULL) -method $(SASL_PULL_METHOD) \ -objdir $(SASL_BUILD_DIR) -componentdir $(SASL_RELEASE) \ - -files include - $(FTP_PULL) -method $(SASL_PULL_METHOD) \ - -objdir $(SASL_BUILD_DIR)/lib -componentdir $(SASL_RELEASE)/lib \ - -files $(SASL_LIBS) + -files include,lib endif -@if [ ! -f $@ ] ; \ then echo "Error: could not get component SASL file $@" ; \ fi +endif # not Linux endif # SASL_SOURCE_ROOT ifndef ICU_SOURCE_ROOT
0
6eab2d20988e565b4b0308f23eb40473459c1a1c
389ds/389-ds-base
Issue 6713 - ns-slapd crash during mdb offline import (#6714) Bug description: A segmentation fault is triggered in dbmdb_import_prepare_worker_entry() during an mdb offline import. The import producer thread parses, validates and writes ldif entries to the worker queue, while the import worker threads simultaneously read, format and index entries before adding them to the DB. A race condition occurs when a worker thread reads an entry before the producer has fully written it, leading to a segmentation fault. Fix description: Ensure thread safe access by locking the worker queue before writing entries. Fixes: https://github.com/389ds/389-ds-base/issues/6713 Reviewed by: @progier389, @bordaz (Thank you)
commit 6eab2d20988e565b4b0308f23eb40473459c1a1c Author: James Chapman <[email protected]> Date: Sun Apr 6 21:23:08 2025 +0000 Issue 6713 - ns-slapd crash during mdb offline import (#6714) Bug description: A segmentation fault is triggered in dbmdb_import_prepare_worker_entry() during an mdb offline import. The import producer thread parses, validates and writes ldif entries to the worker queue, while the import worker threads simultaneously read, format and index entries before adding them to the DB. A race condition occurs when a worker thread reads an entry before the producer has fully written it, leading to a segmentation fault. Fix description: Ensure thread safe access by locking the worker queue before writing entries. Fixes: https://github.com/389ds/389-ds-base/issues/6713 Reviewed by: @progier389, @bordaz (Thank you) diff --git a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c index de8d337b7..aa10d3704 100644 --- a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c +++ b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c @@ -417,13 +417,14 @@ dbmdb_import_workerq_push(ImportQueue_t *q, WorkerQueueData_t *data) safe_cond_wait(&q->cv, &q->mutex); } } - pthread_mutex_unlock(&q->mutex); if (q->job->flags & FLAG_ABORT) { /* in this case worker thread does not free the data so we should do it */ dbmdb_import_workerq_free_data(data); + pthread_mutex_unlock(&q->mutex); return -1; } dbmdb_dup_worker_slot(q, data, slot); + pthread_mutex_unlock(&q->mutex); return 0; }
0
350142039530c439bd98f3af5ec858d98134ac25
389ds/389-ds-base
Bug 627738 - The cn=monitor statistics entries for the dnentry cache do not change or change very rarely https://bugzilla.redhat.com/show_bug.cgi?id=627738 Description: Change made in this commit: cc36301a7cae6737d9f8a0e53bed653a52130a1d for the following bug introduced a missing-cache-return bug: id2entry_add_ext replaces an dn instance in the dn cache even if the DN value is identical. Replace it only when they don't match. Thanks to [email protected] for finding it out: https://bugzilla.redhat.com/show_bug.cgi?id=627738#c5
commit 350142039530c439bd98f3af5ec858d98134ac25 Author: Noriko Hosoi <[email protected]> Date: Thu Sep 2 10:49:01 2010 -0700 Bug 627738 - The cn=monitor statistics entries for the dnentry cache do not change or change very rarely https://bugzilla.redhat.com/show_bug.cgi?id=627738 Description: Change made in this commit: cc36301a7cae6737d9f8a0e53bed653a52130a1d for the following bug introduced a missing-cache-return bug: id2entry_add_ext replaces an dn instance in the dn cache even if the DN value is identical. Replace it only when they don't match. Thanks to [email protected] for finding it out: https://bugzilla.redhat.com/show_bug.cgi?id=627738#c5 diff --git a/ldap/servers/slapd/back-ldbm/id2entry.c b/ldap/servers/slapd/back-ldbm/id2entry.c index 405121467..15d742c7a 100644 --- a/ldap/servers/slapd/back-ldbm/id2entry.c +++ b/ldap/servers/slapd/back-ldbm/id2entry.c @@ -102,9 +102,10 @@ id2entry_add_ext( backend *be, struct backentry *e, back_txn *txn, int encrypt /* If the ID already exists in the DN cache && the DNs do not match, * replace it. */ - if ((CACHE_ADD( &inst->inst_dncache, bdn, &oldbdn ) == 1) && - (slapi_sdn_compare(sdn, oldbdn->dn_sdn))) { - cache_replace( &inst->inst_dncache, oldbdn, bdn ); + if (CACHE_ADD( &inst->inst_dncache, bdn, &oldbdn ) == 1) { + if (slapi_sdn_compare(sdn, oldbdn->dn_sdn)) { + cache_replace( &inst->inst_dncache, oldbdn, bdn ); + } CACHE_RETURN(&inst->inst_dncache, &oldbdn); /* to free oldbdn */ }
0
f325f378c3e2e2f883689fb84f2ef1a3b5bce56b
389ds/389-ds-base
Simplify program flow: eliminate unnecessary continue Reviewed by: rmeggins
commit f325f378c3e2e2f883689fb84f2ef1a3b5bce56b Author: Ken Rossato <[email protected]> Date: Mon Aug 27 17:14:03 2012 -0400 Simplify program flow: eliminate unnecessary continue Reviewed by: rmeggins diff --git a/ldap/servers/plugins/posix-winsync/posix-group-func.c b/ldap/servers/plugins/posix-winsync/posix-group-func.c index d6b68c514..2f9478e6d 100644 --- a/ldap/servers/plugins/posix-winsync/posix-group-func.c +++ b/ldap/servers/plugins/posix-winsync/posix-group-func.c @@ -479,11 +479,9 @@ addGroupMembership(Slapi_Entry *entry, Slapi_Entry *ad_entry) } else { v = slapi_value_new_string(uid); slapi_ch_free_string(&uid); - if (slapi_attr_value_find(muid_attr, slapi_value_get_berval(v)) == 0) { - slapi_value_free(&v); - continue; + if (slapi_attr_value_find(muid_attr, slapi_value_get_berval(v)) != 0) { + slapi_valueset_add_value(newvs, v); } - slapi_valueset_add_value(newvs, v); slapi_value_free(&v); } }
0
1f48f639b5ff5417a49e2ce1aa73d3860eb4da28
389ds/389-ds-base
Issue 6429 - UI - clicking on a database suffix under the Monitor tab crashes UI (#6610) Bug description: Clicking on a db suffix under the Monitor tab causes the UI to crash when the instance is configured with the mdb db engine. Fix description: Introduced separate database and suffix monitor classes tailored for mdb. Parent class detects the configured db engine and calls the appropriate monitor class. Fixes: https://github.com/389ds/389-ds-base/issues/6429 Reviewed by: @mreynolds389, @droideck (Thank you)
commit 1f48f639b5ff5417a49e2ce1aa73d3860eb4da28 Author: James Chapman <[email protected]> Date: Thu Mar 6 13:26:37 2025 +0000 Issue 6429 - UI - clicking on a database suffix under the Monitor tab crashes UI (#6610) Bug description: Clicking on a db suffix under the Monitor tab causes the UI to crash when the instance is configured with the mdb db engine. Fix description: Introduced separate database and suffix monitor classes tailored for mdb. Parent class detects the configured db engine and calls the appropriate monitor class. Fixes: https://github.com/389ds/389-ds-base/issues/6429 Reviewed by: @mreynolds389, @droideck (Thank you) diff --git a/src/cockpit/389-console/src/lib/database/databaseConfig.jsx b/src/cockpit/389-console/src/lib/database/databaseConfig.jsx index a5a60c0fe..4c7fce706 100644 --- a/src/cockpit/389-console/src/lib/database/databaseConfig.jsx +++ b/src/cockpit/389-console/src/lib/database/databaseConfig.jsx @@ -1253,7 +1253,6 @@ export class GlobalDatabaseConfigMDB extends React.Component { // Check if a setting was changed, if so enable the save button for (const config_attr of check_attrs) { if (this.state[config_attr] !== this.state['_' + config_attr]) { - // jc console.log(config_attr); saveBtnDisabled = false; break; } diff --git a/src/cockpit/389-console/src/lib/monitor/dbMonitor.jsx b/src/cockpit/389-console/src/lib/monitor/dbMonitor.jsx index f3f51733b..08aa1aaea 100644 --- a/src/cockpit/389-console/src/lib/monitor/dbMonitor.jsx +++ b/src/cockpit/389-console/src/lib/monitor/dbMonitor.jsx @@ -326,7 +326,6 @@ export class DatabaseMonitor extends React.Component { </GridItem> </Grid> </Tab> - <Tab eventKey={1} title={<TabTitleText>{_("Normalized DN Cache")}</TabTitleText>}> <div className="ds-margin-top-lg"> <Grid hasGutter> @@ -511,12 +510,394 @@ export class DatabaseMonitor extends React.Component { // Prop types and defaults DatabaseMonitor.propTypes = { + data: PropTypes.object, serverId: PropTypes.string, enableTree: PropTypes.func, }; DatabaseMonitor.defaultProps = { + data: {}, serverId: "", }; -export default DatabaseMonitor; +export class DatabaseMonitorMDB extends React.Component { + constructor (props) { + super(props); + this.state = { + activeTabKey: 0, + data: {}, + loading: true, + // refresh chart + cache_refresh: "", + count: 10, + ndnCount: 5, + dbCacheList: [], + ndnCacheList: [], + ndnCacheUtilList: [] + }; + + // Toggle currently active tab + this.handleNavSelect = (event, tabIndex) => { + this.setState({ + activeTabKey: tabIndex + }); + }; + + this.startCacheRefresh = this.startCacheRefresh.bind(this); + this.refreshCache = this.refreshCache.bind(this); + } + + componentDidMount() { + this.resetChartData(); + this.refreshCache(); + this.startCacheRefresh(); + this.props.enableTree(); + } + + componentWillUnmount() { + this.stopCacheRefresh(); + } + + resetChartData() { + this.setState({ + data: { + normalizeddncachehitratio: [0], + maxnormalizeddncachesize: [0], + currentnormalizeddncachesize: [0], + normalizeddncachetries: [0], + normalizeddncachehits: [0], + normalizeddncacheevictions: [0], + currentnormalizeddncachecount: [0], + normalizeddncachethreadsize: [0], + normalizeddncachethreadslots: [0], + }, + ndnCacheList: [ + { name: "", x: "1", y: 0 }, + { name: "", x: "2", y: 0 }, + { name: "", x: "3", y: 0 }, + { name: "", x: "4", y: 0 }, + { name: "", x: "5", y: 0 }, + ], + ndnCacheUtilList: [ + { name: "", x: "1", y: 0 }, + { name: "", x: "2", y: 0 }, + { name: "", x: "3", y: 0 }, + { name: "", x: "4", y: 0 }, + { name: "", x: "5", y: 0 }, + ], + }); + } + + refreshCache() { + // Search for db cache stat and update state + const cmd = [ + "dsconf", "-j", "ldapi://%2fvar%2frun%2fslapd-" + this.props.serverId + ".socket", + "monitor", "ldbm" + ]; + cockpit + .spawn(cmd, { superuser: true, err: "message" }) + .done(content => { + const config = JSON.parse(content); + let count = this.state.count + 1; + const ndnCount = this.state.ndnCount + 1; + if (count > 100) { + // Keep progress count in check + count = 1; + } + + // Build up the DB Cache chart data + const dbratio = config.attrs.dbcachehitratio[0]; + const chart_data = this.state.dbCacheList; + chart_data.shift(); + chart_data.push({ name: _("Cache Hit Ratio"), x: count.toString(), y: parseInt(dbratio) }); + + // Build up the NDN Cache chart data + const ndnratio = config.attrs.normalizeddncachehitratio[0]; + const ndn_chart_data = this.state.ndnCacheList; + ndn_chart_data.shift(); + ndn_chart_data.push({ name: _("Cache Hit Ratio"), x: count.toString(), y: parseInt(ndnratio) }); + + // Build up the DB Cache Util chart data + const ndn_util_chart_data = this.state.ndnCacheUtilList; + const currNDNSize = parseInt(config.attrs.currentnormalizeddncachesize[0]); + const maxNDNSize = parseInt(config.attrs.maxnormalizeddncachesize[0]); + const ndn_utilization = (currNDNSize / maxNDNSize) * 100; + ndn_util_chart_data.shift(); + ndn_util_chart_data.push({ name: _("Cache Utilization"), x: ndnCount.toString(), y: parseInt(ndn_utilization) }); + + this.setState({ + data: config.attrs, + loading: false, + dbCacheList: chart_data, + ndnCacheList: ndn_chart_data, + ndnCacheUtilList: ndn_util_chart_data, + count, + ndnCount + }); + }) + .fail(() => { + this.resetChartData(); + }); + } + + startCacheRefresh() { + this.setState({ + cache_refresh: setInterval(this.refreshCache, 2000), + }); + } + + stopCacheRefresh() { + clearInterval(this.state.cache_refresh); + } + + render() { + let chartColor = ChartThemeColor.green; + let ndnChartColor = ChartThemeColor.green; + let ndnUtilColor = ChartThemeColor.green; + let dbcachehit = 0; + let ndncachehit = 0; + let ndncachemax = 0; + let ndncachecurr = 0; + let utilratio = 0; + let content = ( + <div className="ds-margin-top-xlg ds-center"> + <TextContent> + <Text component={TextVariants.h3}> + {_("Loading database monitor information ...")} + </Text> + </TextContent> + <Spinner className="ds-margin-top-lg" size="xl" /> + </div> + ); + + if (!this.state.loading) { + dbcachehit = parseInt(this.state.data.dbcachehitratio[0]); + ndncachehit = parseInt(this.state.data.normalizeddncachehitratio[0]); + ndncachemax = parseInt(this.state.data.maxnormalizeddncachesize[0]); + ndncachecurr = parseInt(this.state.data.currentnormalizeddncachesize[0]); + utilratio = Math.round((ndncachecurr / ndncachemax) * 100); + if (utilratio === 0) { + // Just round up to 1 + utilratio = 1; + } + + // Database cache + if (dbcachehit > 89) { + chartColor = ChartThemeColor.green; + } else if (dbcachehit > 74) { + chartColor = ChartThemeColor.orange; + } else { + chartColor = ChartThemeColor.purple; + } + // NDN cache ratio + if (ndncachehit > 89) { + ndnChartColor = ChartThemeColor.green; + } else if (ndncachehit > 74) { + ndnChartColor = ChartThemeColor.orange; + } else { + ndnChartColor = ChartThemeColor.purple; + } + // NDN cache utilization + if (utilratio > 95) { + ndnUtilColor = ChartThemeColor.purple; + } else if (utilratio > 90) { + ndnUtilColor = ChartThemeColor.orange; + } else { + ndnUtilColor = ChartThemeColor.green; + } + + content = ( + <Tabs activeKey={this.state.activeTabKey} onSelect={this.handleNavSelect}> + <Tab eventKey={0} title={<TabTitleText>{_("Normalized DN Cache")}</TabTitleText>}> + <div className="ds-margin-top-lg"> + <Grid hasGutter> + <GridItem span={6}> + <Card isSelectable> + <CardBody> + <div className="ds-container"> + <div className="ds-center"> + <TextContent className="ds-margin-top-xlg" title={_("The normalized DN cache hit ratio (normalizeddncachehitratio).")}> + <Text component={TextVariants.h3}> + {_("Cache Hit Ratio")} + </Text> + </TextContent> + <TextContent> + <Text component={TextVariants.h2}> + <b>{ndncachehit}%</b> + </Text> + </TextContent> + </div> + <div className="ds-margin-left" style={{ height: '200px', width: '350px' }}> + <Chart + ariaDesc="NDN Cache" + ariaTitle={_("Live Normalized DN Cache Statistics")} + containerComponent={<ChartVoronoiContainer labels={({ datum }) => `${datum.name}: ${datum.y}`} constrainToVisibleArea />} + height={200} + maxDomain={{ y: 100 }} + minDomain={{ y: 0 }} + padding={{ + bottom: 40, + left: 60, + top: 10, + right: 15, + }} + width={350} + themeColor={ndnChartColor} + > + <ChartAxis /> + <ChartAxis dependentAxis showGrid tickValues={[25, 50, 75, 100]} /> + <ChartGroup> + <ChartArea + data={this.state.ndnCacheList} + /> + </ChartGroup> + </Chart> + </div> + </div> + </CardBody> + </Card> + </GridItem> + <GridItem span={6}> + <Card isSelectable> + <CardBody> + <div className="ds-container"> + <div className="ds-center"> + <TextContent className="ds-margin-top-lg" title={_("The amount of the cache that is being used: max size (maxnormalizeddncachesize) vs current size (currentnormalizeddncachesize)")}> + <Text component={TextVariants.h2}> + {_("Cache Utilization")} + </Text> + </TextContent> + <TextContent> + <Text component={TextVariants.h3}> + <b>{utilratio}%</b> + </Text> + </TextContent> + <TextContent className="ds-margin-top-xlg"> + <Text component={TextVariants.h5}> + {_("Cached DN's")} + </Text> + </TextContent> + <b>{numToCommas(this.state.data.currentnormalizeddncachecount[0])}</b> + </div> + <div className="ds-margin-left" style={{ height: '200px', width: '350px' }}> + <Chart + ariaDesc="NDN Cache Utilization" + ariaTitle={_("Live Normalized DN Cache Utilization Statistics")} + containerComponent={<ChartVoronoiContainer labels={({ datum }) => `${datum.name}: ${datum.y}`} constrainToVisibleArea />} + height={200} + maxDomain={{ y: 100 }} + minDomain={{ y: 0 }} + padding={{ + bottom: 40, + left: 60, + top: 10, + right: 15, + }} + width={350} + themeColor={ndnUtilColor} + > + <ChartAxis /> + <ChartAxis dependentAxis showGrid tickValues={[25, 50, 75, 100]} /> + <ChartGroup> + <ChartArea + data={this.state.ndnCacheUtilList} + /> + </ChartGroup> + </Chart> + </div> + </div> + </CardBody> + </Card> + </GridItem> + </Grid> + + <Grid hasGutter className="ds-margin-top-xlg"> + <GridItem span={3}> + {_("NDN Cache Hit Ratio:")} + </GridItem> + <GridItem span={2}> + <b>{this.state.data.normalizeddncachehitratio}%</b> + </GridItem> + <GridItem span={3}> + {_("NDN Cache Max Size:")} + </GridItem> + <GridItem span={2}> + <b>{displayBytes(this.state.data.maxnormalizeddncachesize)}</b> + </GridItem> + <GridItem span={3}> + {_("NDN Cache Tries:")} + </GridItem> + <GridItem span={2}> + <b>{numToCommas(this.state.data.normalizeddncachetries)}</b> + </GridItem> + <GridItem span={3}> + {_("NDN Current Cache Size:")} + </GridItem> + <GridItem span={2}> + <b>{displayBytes(this.state.data.currentnormalizeddncachesize)}</b> + </GridItem> + <GridItem span={3}> + {_("NDN Cache Hits:")} + </GridItem> + <GridItem span={2}> + <b>{numToCommas(this.state.data.normalizeddncachehits)}</b> + </GridItem> + <GridItem span={3}> + {_("NDN Cache DN Count:")} + </GridItem> + <GridItem span={2}> + <b>{numToCommas(this.state.data.currentnormalizeddncachecount)}</b> + </GridItem> + <GridItem span={3}> + {_("NDN Cache Evictions:")} + </GridItem> + <GridItem span={2}> + <b>{numToCommas(this.state.data.normalizeddncacheevictions)}</b> + </GridItem> + <GridItem span={3}> + {_("NDN Cache Thread Size:")} + </GridItem> + <GridItem span={2}> + <b>{numToCommas(this.state.data.normalizeddncachethreadsize)}</b> + </GridItem> + <GridItem span={3}> + {_("NDN Cache Thread Slots:")} + </GridItem> + <GridItem span={2}> + <b>{numToCommas(this.state.data.normalizeddncachethreadslots)}</b> + </GridItem> + </Grid> + </div> + </Tab> + </Tabs> + ); + } + + return ( + <div id="db-content"> + <TextContent> + <Text className="ds-sub-header" component={TextVariants.h2}> + {_("Database Performance Statistics")} + </Text> + </TextContent> + <div className="ds-margin-top-lg"> + {content} + </div> + + </div> + ); + } +} + +// Prop types and defaults + +DatabaseMonitorMDB.propTypes = { + data: PropTypes.object, + serverId: PropTypes.string, + enableTree: PropTypes.func, +}; + +DatabaseMonitorMDB.defaultProps = { + data: {}, + serverId: "", +}; diff --git a/src/cockpit/389-console/src/lib/monitor/suffixMonitor.jsx b/src/cockpit/389-console/src/lib/monitor/suffixMonitor.jsx index 464137731..ec78dbdc2 100644 --- a/src/cockpit/389-console/src/lib/monitor/suffixMonitor.jsx +++ b/src/cockpit/389-console/src/lib/monitor/suffixMonitor.jsx @@ -626,4 +626,365 @@ SuffixMonitor.defaultProps = { bename: "", }; -export default SuffixMonitor; +export class SuffixMonitorMDB extends React.Component { + constructor (props) { + super(props); + this.state = { + activeTabKey: 0, + data: {}, + loading: true, + // refresh charts + cache_refresh: "", + count: 10, + utilCount: 5, + entryCacheList: [], + entryUtilCacheList: [], + }; + + // Toggle currently active tab + this.handleNavSelect = (event, tabIndex) => { + this.setState({ + activeTabKey: tabIndex + }); + }; + + this.startCacheRefresh = this.startCacheRefresh.bind(this); + this.refreshSuffixCache = this.refreshSuffixCache.bind(this); + } + + componentDidMount() { + this.resetChartData(); + this.refreshSuffixCache(); + this.startCacheRefresh(); + this.props.enableTree(); + } + + componentWillUnmount() { + this.stopCacheRefresh(); + } + + resetChartData() { + this.setState({ + data: { + // Entry cache + entrycachehitratio: [0], + entrycachetries: [0], + entrycachehits: [0], + maxentrycachesize: [0], + currententrycachesize: [0], + maxentrycachecount: [0], + currententrycachecount: [0], + }, + entryCacheList: [ + { name: "", x: "1", y: 0 }, + { name: "", x: "2", y: 0 }, + { name: "", x: "3", y: 0 }, + { name: "", x: "4", y: 0 }, + { name: "", x: "5", y: 0 }, + { name: "", x: "6", y: 0 }, + { name: "", x: "7", y: 0 }, + { name: "", x: "8", y: 0 }, + { name: "", x: "9", y: 0 }, + { name: "", x: "10", y: 0 }, + ], + }); + } + + refreshSuffixCache() { + // Search for db cache stat and update state + const cmd = [ + "dsconf", "-j", "ldapi://%2fvar%2frun%2fslapd-" + this.props.serverId + ".socket", + "monitor", "backend", this.props.suffix + ]; + log_cmd("refreshSuffixCache", "Get suffix monitor", cmd); + cockpit + .spawn(cmd, { superuser: true, err: "message" }) + .done(content => { + const config = JSON.parse(content); + let count = this.state.count + 1; + const utilCount = this.state.utilCount + 1; + if (count > 100) { + // Keep progress count in check + count = 1; + } + + // Build up the Entry Cache chart data + const entryRatio = config.attrs.entrycachehitratio[0]; + const entry_data = this.state.entryCacheList; + entry_data.shift(); + entry_data.push({ name: _("Cache Hit Ratio"), x: count.toString(), y: parseInt(entryRatio) }); + + // Build up the Entry Util chart data + const entry_util_data = this.state.entryUtilCacheList; + let maxsize = config.attrs.maxentrycachesize[0]; + let currsize = config.attrs.currententrycachesize[0]; + let utilratio = Math.round((currsize / maxsize) * 100); + if (utilratio === 0) { + utilratio = 1; + } + entry_util_data.shift(); + entry_util_data.push({ name: _("Cache Utilization"), x: utilCount.toString(), y: parseInt(utilratio) }); + + this.setState({ + data: config.attrs, + loading: false, + entryCacheList: entry_data, + entryUtilCacheList: entry_util_data, + count, + utilCount + }); + }) + .fail(() => { + this.resetChartData(); + }); + } + + startCacheRefresh() { + this.setState({ + cache_refresh: setInterval(this.refreshSuffixCache, 2000) + }); + } + + stopCacheRefresh() { + clearInterval(this.state.cache_refresh); + } + + render() { + let entryChartColor = ChartThemeColor.green; + let entryUtilChartColor = ChartThemeColor.green; + let cachehit = 1; + let cachemax = 0; + let cachecurr = 0; + let cachecount = 0; + let utilratio = 1; + let SuffixIcon = TreeIcon; + + if (this.props.dbtype === "subsuffix") { + SuffixIcon = LeafIcon; + } + + let content = ( + <div className="ds-margin-top-xlg ds-center"> + <TextContent> + <Text component={TextVariants.h3}> + {_("Loading Suffix Monitor Information ...")} + </Text> + </TextContent> + <Spinner className="ds-margin-top-lg" size="xl" /> + </div> + ); + + if (!this.state.loading) { + // Entry cache + cachehit = parseInt(this.state.data.entrycachehitratio[0]); + cachemax = parseInt(this.state.data.maxentrycachesize[0]); + cachecurr = parseInt(this.state.data.currententrycachesize[0]); + cachecount = parseInt(this.state.data.currententrycachecount[0]); + utilratio = Math.round((cachecurr / cachemax) * 100); + + // Adjust ratios if needed + if (utilratio === 0) { + utilratio = 1; + } + + // Entry cache chart color + if (cachehit > 89) { + entryChartColor = ChartThemeColor.green; + } else if (cachehit > 74) { + entryChartColor = ChartThemeColor.orange; + } else { + entryChartColor = ChartThemeColor.purple; + } + // Entry cache utilization + if (utilratio > 95) { + entryUtilChartColor = ChartThemeColor.purple; + } else if (utilratio > 90) { + entryUtilChartColor = ChartThemeColor.orange; + } else { + entryUtilChartColor = ChartThemeColor.green; + } + + content = ( + <div id="monitor-suffix-page"> + <Tabs activeKey={this.state.activeTabKey} onSelect={this.handleNavSelect}> + <Tab eventKey={0} title={<TabTitleText>{_("Entry Cache")}</TabTitleText>}> + <div className="ds-margin-top"> + <Grid hasGutter> + <GridItem span={6}> + <Card isSelectable> + <CardBody> + <div className="ds-container"> + <div className="ds-center"> + <TextContent title={_("The entry cache hit ratio (entrycachehitratio)")}> + <Text className="ds-margin-top" component={TextVariants.h3}> + {_("Cache Hit Ratio")} + </Text> + </TextContent> + <TextContent> + <Text className="ds-margin-top" component={TextVariants.h2}> + <b>{cachehit}%</b> + </Text> + </TextContent> + </div> + <div className="ds-margin-left" style={{ height: '200px', width: '350px' }}> + <Chart + ariaDesc="Entry Cache" + ariaTitle={_("Live Entry Cache Statistics")} + containerComponent={<ChartVoronoiContainer labels={({ datum }) => `${datum.name}: ${datum.y}`} constrainToVisibleArea />} + height={200} + maxDomain={{ y: 100 }} + minDomain={{ y: 0 }} + padding={{ + bottom: 40, + left: 60, + top: 10, + right: 15, + }} + width={350} + themeColor={entryChartColor} + > + <ChartAxis /> + <ChartAxis dependentAxis showGrid tickValues={[25, 50, 75, 100]} /> + <ChartGroup> + <ChartArea + data={this.state.entryCacheList} + /> + </ChartGroup> + </Chart> + </div> + </div> + </CardBody> + </Card> + </GridItem> + <GridItem span={6}> + <Card isSelectable> + <CardBody> + <div className="ds-container"> + <div className="ds-center"> + <TextContent title={_("The amount of the cache that is being used: max size (maxentrycachesize) vs current size (currententrycachesize)")}> + <Text className="ds-margin-top" component={TextVariants.h3}> + {_("Cache Utilization")} + </Text> + </TextContent> + <TextContent> + <Text component={TextVariants.h2}> + <b>{utilratio}%</b> + </Text> + </TextContent> + <TextContent> + <Text className="ds-margin-top-lg" component={TextVariants.h5}> + {_("Cached Entries")} + </Text> + </TextContent> + <b>{cachecount}</b> + </div> + <div className="ds-margin-left" style={{ height: '200px', width: '350px' }}> + <Chart + ariaDesc="Entry Cache Utilization" + ariaTitle={_("Live Entry Cache Utilization Statistics")} + containerComponent={<ChartVoronoiContainer labels={({ datum }) => `${datum.name}: ${datum.y}`} constrainToVisibleArea />} + height={200} + maxDomain={{ y: 100 }} + minDomain={{ y: 0 }} + padding={{ + bottom: 40, + left: 60, + top: 10, + right: 15, + }} + width={350} + themeColor={entryUtilChartColor} + > + <ChartAxis /> + <ChartAxis dependentAxis showGrid tickValues={[25, 50, 75, 100]} /> + <ChartGroup> + <ChartArea + data={this.state.entryUtilCacheList} + /> + </ChartGroup> + </Chart> + </div> + </div> + </CardBody> + </Card> + </GridItem> + </Grid> + </div> + <Grid hasGutter className="ds-margin-top-xlg"> + <GridItem span={3}> + {_("Entry Cache Hit Ratio:")} + </GridItem> + <GridItem span={2}> + <b>{this.state.data.entrycachehitratio[0]}%</b> + </GridItem> + <GridItem span={3}> + {_("Entry Cache Max Size:")} + </GridItem> + <GridItem span={2}> + <b>{displayBytes(cachemax)} </b> + </GridItem> + + <GridItem span={3}> + {_("Entry Cache Hits:")} + </GridItem> + <GridItem span={2}> + <b>{numToCommas(this.state.data.entrycachehits[0])}</b> + </GridItem> + <GridItem span={3}> + {_("Entry Cache Current Size:")} + </GridItem> + <GridItem span={2}> + <b>{displayBytes(cachecurr)}</b> + </GridItem> + <GridItem span={3}> + {_("Entry Cache Tries:")} + </GridItem> + <GridItem span={2}> + <b>{numToCommas(this.state.data.entrycachetries[0])}</b> + </GridItem> + <GridItem span={3}> + {_("Entry Cache Max Entries:")} + </GridItem> + <GridItem span={2}> + <b>{numToCommas(this.state.data.maxentrycachecount[0])}</b> + </GridItem> + <GridItem span={3}> + {_("Entry Cache Count:")} + </GridItem> + <GridItem span={2}> + <b>{numToCommas(this.state.data.currententrycachecount[0])}</b> + </GridItem> + </Grid> + </Tab> + </Tabs> + </div> + ); + } + + return ( + <div> + <TextContent> + <Text component={TextVariants.h2}> + <SuffixIcon /> {this.props.suffix} (<b>{this.props.bename}</b>) + </Text> + </TextContent> + <div className="ds-margin-top-lg"> + {content} + </div> + </div> + ); + } +} + +SuffixMonitorMDB.propTypes = { + serverId: PropTypes.string, + suffix: PropTypes.string, + bename: PropTypes.string, + enableTree: PropTypes.func, +}; + +SuffixMonitorMDB.defaultProps = { + serverId: "", + suffix: "", + bename: "", +}; diff --git a/src/cockpit/389-console/src/monitor.jsx b/src/cockpit/389-console/src/monitor.jsx index da959be07..7e0e0c5d4 100644 --- a/src/cockpit/389-console/src/monitor.jsx +++ b/src/cockpit/389-console/src/monitor.jsx @@ -3,8 +3,8 @@ import React from "react"; import { log_cmd } from "./lib/tools.jsx"; import PropTypes from "prop-types"; import ServerMonitor from "./lib/monitor/serverMonitor.jsx"; -import DatabaseMonitor from "./lib/monitor/dbMonitor.jsx"; -import SuffixMonitor from "./lib/monitor/suffixMonitor.jsx"; +import { DatabaseMonitor, DatabaseMonitorMDB } from "./lib/monitor/dbMonitor.jsx"; +import { SuffixMonitor, SuffixMonitorMDB } from "./lib/monitor/suffixMonitor.jsx"; import ChainingMonitor from "./lib/monitor/chainingMonitor.jsx"; import AccessLogMonitor from "./lib/monitor/accesslog.jsx"; import AuditLogMonitor from "./lib/monitor/auditlog.jsx"; @@ -35,6 +35,8 @@ import { const _ = cockpit.gettext; +const BE_IMPL_MDB = "mdb"; + export class Monitor extends React.Component { constructor(props) { super(props); @@ -82,6 +84,8 @@ export class Monitor extends React.Component { auditlogLocation: "", auditfaillogLocation: "", securitylogLocation: "", + // DB engine, bdb or mdb (default) + dbEngine: BE_IMPL_MDB, }; // Bindings @@ -98,6 +102,7 @@ export class Monitor extends React.Component { this.loadMonitorChaining = this.loadMonitorChaining.bind(this); this.loadDiskSpace = this.loadDiskSpace.bind(this); this.reloadDisks = this.reloadDisks.bind(this); + this.getDBEngine = this.getDBEngine.bind(this); // Replication this.onHandleLoadMonitorReplication = this.onHandleLoadMonitorReplication.bind(this); this.loadCleanTasks = this.loadCleanTasks.bind(this); @@ -114,6 +119,10 @@ export class Monitor extends React.Component { this.loadMonitor = this.loadMonitor.bind(this); } + componentDidMount() { + this.getDBEngine(); + } + componentDidUpdate(prevProps) { if (this.props.wasActiveList.includes(6)) { if (this.state.firstLoad) { @@ -580,6 +589,32 @@ export class Monitor extends React.Component { }); } + getDBEngine () { + const cmd = [ + "dsconf", "-j", "ldapi://%2fvar%2frun%2fslapd-" + this.props.serverId + ".socket", + "backend", "config", "get" + ]; + log_cmd("getDBEngine", "Get DB Implementation", cmd); + cockpit + .spawn(cmd, { superuser: true, err: "message" }) + .done(content => { + const config = JSON.parse(content); + const attrs = config.attrs; + if ('nsslapd-backend-implement' in attrs) { + this.setState({ + dbEngine: attrs['nsslapd-backend-implement'][0], + }); + } + }) + .fail(err => { + const errMsg = JSON.parse(err); + this.props.addNotification( + "error", + cockpit.format("Error detecting DB implementation type - $0", errMsg.desc) + ); + }); + } + reloadSNMP() { const cmd = [ "dsconf", "-j", "ldapi://%2fvar%2frun%2fslapd-" + this.props.serverId + ".socket", @@ -955,13 +990,24 @@ export class Monitor extends React.Component { </div> ); } else { - monitor_element = ( - <DatabaseMonitor - data={this.state.ldbmData} - enableTree={this.enableTree} - serverId={this.props.serverId} - /> - ); + if (this.state.dbEngine === BE_IMPL_MDB) { + monitor_element = ( + <DatabaseMonitorMDB + data={this.state.ldbmData} + enableTree={this.enableTree} + serverId={this.props.serverId} + /> + ); + } else { + monitor_element = ( + <DatabaseMonitor + data={this.state.ldbmData} + enableTree={this.enableTree} + serverId={this.props.serverId} + /> + ); + } + } } else if (this.state.node_name === "server-monitor") { if (this.state.serverLoading) { @@ -1142,16 +1188,29 @@ export class Monitor extends React.Component { ); } else { // Suffix - monitor_element = ( - <SuffixMonitor - serverId={this.props.serverId} - suffix={this.state.node_text} - bename={this.state.bename} - enableTree={this.enableTree} - key={this.state.node_text} - addNotification={this.props.addNotification} - /> - ); + if (this.state.dbEngine === BE_IMPL_MDB) { + monitor_element = ( + <SuffixMonitorMDB + serverId={this.props.serverId} + suffix={this.state.node_text} + bename={this.state.bename} + enableTree={this.enableTree} + key={this.state.node_text} + addNotification={this.props.addNotification} + /> + ); + } else { + monitor_element = ( + <SuffixMonitor + serverId={this.props.serverId} + suffix={this.state.node_text} + bename={this.state.bename} + enableTree={this.enableTree} + key={this.state.node_text} + addNotification={this.props.addNotification} + /> + ); + } } } }
0
804684254516c0c04d6d00080d38e3cbd0dd06b3
389ds/389-ds-base
Ticket 50344 - tidy rpm vs build systemd flag handling Bug Description: In rpm builds we would read with_systemd from defaults.inf, which has a diffeent value to hand-building. AS a result this caused as issue in dscontainer on opensuse where it believed systemd was present. Fix Description: Simplify the systemd handling to a single flag which is possible to override in a container env. https://pagure.io/389-ds-base/issue/50344 Author: William Brown <[email protected]> Review by: ???
commit 804684254516c0c04d6d00080d38e3cbd0dd06b3 Author: William Brown <[email protected]> Date: Wed May 1 11:38:11 2019 +1000 Ticket 50344 - tidy rpm vs build systemd flag handling Bug Description: In rpm builds we would read with_systemd from defaults.inf, which has a diffeent value to hand-building. AS a result this caused as issue in dscontainer on opensuse where it believed systemd was present. Fix Description: Simplify the systemd handling to a single flag which is possible to override in a container env. https://pagure.io/389-ds-base/issue/50344 Author: William Brown <[email protected]> Review by: ??? diff --git a/src/lib389/lib389/__init__.py b/src/lib389/lib389/__init__.py index 0c5852acc..df1add7e6 100644 --- a/src/lib389/lib389/__init__.py +++ b/src/lib389/lib389/__init__.py @@ -415,6 +415,9 @@ class DirSrv(SimpleLDAPObject, object): self.confdir = None self.ds_paths = Paths(instance=self) + # Set the default systemd status. This MAY be overidden in the setup utils + # as required. + self.systemd = self.ds_paths.with_systemd # Reset the args (py.test reuses the args_instance for each test case) # We allocate a "default" prefix here which allows an un-allocate or @@ -423,7 +426,6 @@ class DirSrv(SimpleLDAPObject, object): # ds = lib389.DirSrv() # ds.list(all=True) # self.ds_paths.prefix = args_instance[SER_DEPLOYED_DIR] - self.containerised = False self.__wrapmethods() self.__add_brookers__() @@ -1126,12 +1128,14 @@ class DirSrv(SimpleLDAPObject, object): if self.status() is True: return - if self.with_systemd() and not self.containerised: + if self.with_systemd(): + self.log.debug("systemd status -> True") # Do systemd things here ... subprocess.check_call(["systemctl", "start", "dirsrv@%s" % self.serverid]) else: + self.log.debug("systemd status -> False") # Start the process. # Wait for it to terminate # This means the server is probably ready to go .... @@ -1190,12 +1194,14 @@ class DirSrv(SimpleLDAPObject, object): if self.status() is False: return - if self.with_systemd() and not self.containerised: + if self.with_systemd(): + self.log.debug("systemd status -> True") # Do systemd things here ... subprocess.check_call(["systemctl", "stop", "dirsrv@%s" % self.serverid]) else: + self.log.debug("systemd status -> False") # TODO: Make the pid path in the files things # TODO: use the status call instead!!!! count = timeout @@ -1217,7 +1223,8 @@ class DirSrv(SimpleLDAPObject, object): Will update the self.state parameter. """ - if self.with_systemd() and not self.containerised: + if self.with_systemd(): + self.log.debug("systemd status -> True") # Do systemd things here ... rc = subprocess.call(["systemctl", "is-active", "--quiet", @@ -1229,6 +1236,7 @@ class DirSrv(SimpleLDAPObject, object): self.state = DIRSRV_STATE_OFFLINE return False else: + self.log.debug("systemd status -> False") # TODO: Make the pid path in the files things # TODO: use the status call instead!!!! pid = pid_from_file(self.ds_paths.pid_file) @@ -1706,7 +1714,7 @@ class DirSrv(SimpleLDAPObject, object): return self.ds_paths.asan_enabled def with_systemd(self): - return self.ds_paths.with_systemd + return self.systemd def get_server_tls_subject(self): """ Get the servers TLS subject line for enrollment purposes. diff --git a/src/lib389/lib389/instance/setup.py b/src/lib389/lib389/instance/setup.py index 757450d39..c6ca83c72 100644 --- a/src/lib389/lib389/instance/setup.py +++ b/src/lib389/lib389/instance/setup.py @@ -804,7 +804,8 @@ class SetupDs(object): # Should I move this import? I think this prevents some recursion from lib389 import DirSrv ds_instance = DirSrv(self.verbose) - ds_instance.containerised = self.containerised + if self.containerised: + ds_instance.systemd = general['systemd'] args = { SER_PORT: slapd['port'], SER_SERVERID_PROP: slapd['instance_name'],
0
7adcd8af03afa7bd8934abc2bcc10e19f36ded7b
389ds/389-ds-base
Issue 6481 - UI - When ports that are in use are used to update a DS instance the error message is not helpful (#6482) Bug description: When updating port values on a DS instance, if the port value is already in use the error message displayed by the UI is not helpful. Fix description: Add a UI method that checks if the updated port value is already in use. If it is, disable the save button. Fixes: https://github.com/389ds/389-ds-base/issues/6481 Reviewed by: @mreynolds389, @droideck (Thank you)
commit 7adcd8af03afa7bd8934abc2bcc10e19f36ded7b Author: James Chapman <[email protected]> Date: Tue Jan 14 21:17:00 2025 +0000 Issue 6481 - UI - When ports that are in use are used to update a DS instance the error message is not helpful (#6482) Bug description: When updating port values on a DS instance, if the port value is already in use the error message displayed by the UI is not helpful. Fix description: Add a UI method that checks if the updated port value is already in use. If it is, disable the save button. Fixes: https://github.com/389ds/389-ds-base/issues/6481 Reviewed by: @mreynolds389, @droideck (Thank you) diff --git a/src/cockpit/389-console/src/lib/server/settings.jsx b/src/cockpit/389-console/src/lib/server/settings.jsx index 8aba81202..ccc1debc5 100644 --- a/src/cockpit/389-console/src/lib/server/settings.jsx +++ b/src/cockpit/389-console/src/lib/server/settings.jsx @@ -1,6 +1,6 @@ import cockpit from "cockpit"; import React from "react"; -import { log_cmd, valid_dn, isValidIpAddress } from "../tools.jsx"; +import { log_cmd, valid_dn, isValidIpAddress, is_port_in_use } from "../tools.jsx"; import { Button, Checkbox, @@ -189,11 +189,14 @@ export class ServerSettings extends React.Component { this.reloadDiskMonitoring = this.reloadDiskMonitoring.bind(this); this.handleSaveAdvanced = this.handleSaveAdvanced.bind(this); this.reloadAdvanced = this.reloadAdvanced.bind(this); + this.validateSaveBtn = this.validateSaveBtn.bind(this); + this.onMinusConfig = (id, nav_tab) => { this.setState({ [id]: Number(this.state[id]) - 1 }, () => { this.validateSaveBtn(nav_tab, id, Number(this.state[id])) }); - }; + } + this.onConfigChange = (event, id, min, max, nav_tab) => { let maxValue = this.maxValue; if (max !== 0) { @@ -203,14 +206,14 @@ export class ServerSettings extends React.Component { newValue = newValue > maxValue ? maxValue : newValue < min ? min : newValue; this.setState({ [id]: newValue - }, () => { this.validateSaveBtn(nav_tab, id, newValue) }); - }; + }, () => { this.validateSaveBtn(nav_tab, id, Number(this.state[id])) }); + } + this.onPlusConfig = (id, nav_tab) => { this.setState({ [id]: Number(this.state[id]) + 1 }, () => { this.validateSaveBtn(nav_tab, id, Number(this.state[id])) }); - }; - this.validateSaveBtn = this.validateSaveBtn.bind(this); + } } componentDidMount() { @@ -253,7 +256,7 @@ export class ServerSettings extends React.Component { } } - validateSaveBtn(nav_tab, attr, value) { +async validateSaveBtn(nav_tab, attr, value) { let disableSaveBtn = true; let disableBtnName = ""; let config_attrs = []; @@ -300,6 +303,25 @@ export class ServerSettings extends React.Component { valueErr = true; disableSaveBtn = true; } + if (attr === 'nsslapd-port' || attr === 'nsslapd-secureport') { + const portValue = Number(value) + if (!isNaN(portValue)) { + try { + // Check port value is not already in use. + const portInUse = await is_port_in_use(portValue); + if (portInUse) { + disableSaveBtn = true; + if (portValue !== Number(this.state['_' + attr])) { + valueErr = true; + } + } + } catch (error) { + console.error("Error checking port:", error); + disableSaveBtn = true; + valueErr = true; + } + } + } } else if (nav_tab === "rootdn") { // Handle validating passwords are in sync if (attr === 'nsslapd-rootpw') { diff --git a/src/cockpit/389-console/src/lib/tools.jsx b/src/cockpit/389-console/src/lib/tools.jsx index b4415687a..c7460f88a 100644 --- a/src/cockpit/389-console/src/lib/tools.jsx +++ b/src/cockpit/389-console/src/lib/tools.jsx @@ -212,6 +212,32 @@ export function valid_port(val) { return result; } +export function is_port_in_use(port) { + // Check if a port number is being used + return new Promise((resolve, reject) => { + // First check port number is within range + if (!valid_port(port)) { + reject('Invalid port number'); + return; + } + + let cmd = ['bash', '-c', `sudo lsof -i :${port} || echo "free"`]; + log_cmd("is_port_in_use", cmd); + + cockpit + .spawn(cmd, { superuser: true, err: "message" }) + .done((result) => { + const isPortInUse = result.trim() !== "free"; + // Resolve the promise with a result + resolve(isPortInUse); + }) + .fail((error) => { + // Reject the promise on error + reject('Error checking port'); + }); + }); +} + export function valid_dn(dn) { // Validate value is a valid DN (sanity validation) if (dn === "" || dn.endsWith(",")) {
0
659b777ac2d2835bba5de0979a9a48ccc7ea0665
389ds/389-ds-base
Ticket 47629 - random crashes related to sync repl Bug Description: if there is no cookie, the persist thread starts before the initial refresh is complete, both threads use the same operation structure and there is a race condition on setting and freeing the search entry. Fix Description: ensure that the persist thread only starts sending updates once the refresh is complete https://fedorahosted.org/389/ticket/47629 Reviewed by: Rich, thanks
commit 659b777ac2d2835bba5de0979a9a48ccc7ea0665 Author: Ludwig Krispenz <[email protected]> Date: Fri Dec 20 14:52:44 2013 +0100 Ticket 47629 - random crashes related to sync repl Bug Description: if there is no cookie, the persist thread starts before the initial refresh is complete, both threads use the same operation structure and there is a race condition on setting and freeing the search entry. Fix Description: ensure that the persist thread only starts sending updates once the refresh is complete https://fedorahosted.org/389/ticket/47629 Reviewed by: Rich, thanks diff --git a/ldap/servers/plugins/sync/sync.h b/ldap/servers/plugins/sync/sync.h index cf73ce941..8cdc7d028 100644 --- a/ldap/servers/plugins/sync/sync.h +++ b/ldap/servers/plugins/sync/sync.h @@ -115,7 +115,7 @@ int sync_is_active (Slapi_Entry *e, Slapi_PBlock *pb); int sync_is_active_scope (const Slapi_DN *dn, Slapi_PBlock *pb); int sync_refresh_update_content(Slapi_PBlock *pb, Sync_Cookie *client_cookie, Sync_Cookie *session_cookie); -int sync_refresh_initial_content(Slapi_PBlock *pb, int persist, Sync_Cookie *session_cookie); +int sync_refresh_initial_content(Slapi_PBlock *pb, int persist, PRThread *tid, Sync_Cookie *session_cookie); int sync_read_entry_from_changelog( Slapi_Entry *cl_entry, void *cb_data); int sync_send_entry_from_changelog( Slapi_PBlock *pb, int chg_req, char *uniqueid); void sync_send_deleted_entries (Slapi_PBlock *pb, Sync_UpdateNode *upd, int chg_count, Sync_Cookie *session_cookie); @@ -190,7 +190,8 @@ typedef struct sync_request_list { #define SYNC_FLAG_SEND_INTERMEDIATE 0x08 typedef struct sync_op_info { - int send_flag; /* hint for preop plugins what to send */ - Sync_Cookie *cookie; /* cookie to add in control */ + int send_flag; /* hint for preop plugins what to send */ + Sync_Cookie *cookie;/* cookie to add in control */ + PRThread *tid; /* thread for persistent phase */ } SyncOpInfo; diff --git a/ldap/servers/plugins/sync/sync_persist.c b/ldap/servers/plugins/sync/sync_persist.c index 7cd65c85b..680bcf689 100644 --- a/ldap/servers/plugins/sync/sync_persist.c +++ b/ldap/servers/plugins/sync/sync_persist.c @@ -351,7 +351,8 @@ sync_persist_terminate (PRThread *tid) cur = sync_request_list->sync_req_head; while ( NULL != cur ) { if ( cur->req_tid == tid ) { - cur->req_active = PR_TRUE; + cur->req_active = PR_FALSE; + cur->req_complete = PR_TRUE; rc = 0; break; } diff --git a/ldap/servers/plugins/sync/sync_refresh.c b/ldap/servers/plugins/sync/sync_refresh.c index 71a0edd35..18f788467 100644 --- a/ldap/servers/plugins/sync/sync_refresh.c +++ b/ldap/servers/plugins/sync/sync_refresh.c @@ -37,7 +37,7 @@ #include "sync.h" -static SyncOpInfo *new_SyncOpInfo(int flag, Sync_Cookie *cookie); +static SyncOpInfo *new_SyncOpInfo(int flag, PRThread *tid, Sync_Cookie *cookie); static int sync_extension_type; static int sync_extension_handle; @@ -157,7 +157,7 @@ int sync_srch_refresh_pre_search(Slapi_PBlock *pb) sync_result_err(pb,rc, "Invalid session cookie"); } } else { - rc = sync_refresh_initial_content (pb, sync_persist, session_cookie); + rc = sync_refresh_initial_content (pb, sync_persist, tid, session_cookie); if (rc == 0 && !sync_persist) /* maintained in postop code */ session_cookie = NULL; @@ -172,8 +172,9 @@ int sync_srch_refresh_pre_search(Slapi_PBlock *pb) Slapi_Operation *operation; slapi_pblock_get(pb, SLAPI_OPERATION, &operation); - rc = sync_persist_startup(tid, session_cookie); - + if (client_cookie) { + rc = sync_persist_startup(tid, session_cookie); + } if (rc == 0) { session_cookie = NULL; /* maintained in persist code */ slapi_operation_set_flag(operation, OP_FLAG_SYNC_PERSIST); @@ -216,6 +217,8 @@ int sync_srch_refresh_post_search(Slapi_PBlock *pb) * depending on the operation type, reset flag */ info->send_flag &= ~SYNC_FLAG_ADD_STATE_CTRL; + /* activate the persistent phase thread*/ + sync_persist_startup(info->tid, info->cookie); } if (info->send_flag & SYNC_FLAG_ADD_DONE_CTRL) { LDAPControl **ctrl = (LDAPControl **)slapi_ch_calloc(2, sizeof (LDAPControl *)); @@ -320,7 +323,7 @@ sync_refresh_update_content(Slapi_PBlock *pb, Sync_Cookie *client_cookie, Sync_C } int -sync_refresh_initial_content(Slapi_PBlock *pb, int sync_persist, Sync_Cookie *sc) +sync_refresh_initial_content(Slapi_PBlock *pb, int sync_persist, PRThread *tid, Sync_Cookie *sc) { /* the entries will be sent in the normal search process, but * - a control has to be sent with each entry @@ -341,11 +344,13 @@ sync_refresh_initial_content(Slapi_PBlock *pb, int sync_persist, Sync_Cookie *sc (SYNC_FLAG_ADD_STATE_CTRL | SYNC_FLAG_SEND_INTERMEDIATE | SYNC_FLAG_NO_RESULT, + tid, sc); } else { info = new_SyncOpInfo (SYNC_FLAG_ADD_STATE_CTRL | SYNC_FLAG_ADD_DONE_CTRL, + tid, sc); } sync_set_operation_extension(pb, info); @@ -672,10 +677,11 @@ sync_send_entry_from_changelog(Slapi_PBlock *pb,int chg_req, char *uniqueid) } static SyncOpInfo* -new_SyncOpInfo(int flag, Sync_Cookie *cookie) { +new_SyncOpInfo(int flag, PRThread *tid, Sync_Cookie *cookie) { SyncOpInfo *spec = (SyncOpInfo *)slapi_ch_calloc(1, sizeof(SyncOpInfo)); spec->send_flag = flag; spec->cookie = cookie; + spec->tid = tid; return spec; }
0
ed8987efe721a5bd27bbe6a398bb996a013c9875
389ds/389-ds-base
Issue 6695 - UI - fix more minor issues Description: fix the following items: MemberOf page - subtree scopes - allows you to create blank values, and does not validate suffixes Local password policy - copy/paste error added a duplicate label for a checkbox (send expiring warning) Create instance modal - did not validate database name Update connection monitor chart to include detailed information for connection status (established, close wait, and time wait) relates: https://github.com/389ds/389-ds-base/issues/6695 Reviewed by: spichugi(Thanks!)
commit ed8987efe721a5bd27bbe6a398bb996a013c9875 Author: Mark Reynolds <[email protected]> Date: Wed Mar 26 16:33:22 2025 -0400 Issue 6695 - UI - fix more minor issues Description: fix the following items: MemberOf page - subtree scopes - allows you to create blank values, and does not validate suffixes Local password policy - copy/paste error added a duplicate label for a checkbox (send expiring warning) Create instance modal - did not validate database name Update connection monitor chart to include detailed information for connection status (established, close wait, and time wait) relates: https://github.com/389ds/389-ds-base/issues/6695 Reviewed by: spichugi(Thanks!) diff --git a/src/cockpit/389-console/src/dsModals.jsx b/src/cockpit/389-console/src/dsModals.jsx index 59e711db3..367cf4759 100644 --- a/src/cockpit/389-console/src/dsModals.jsx +++ b/src/cockpit/389-console/src/dsModals.jsx @@ -4,7 +4,13 @@ import PropTypes from "prop-types"; import { DoubleConfirmModal } from "./lib/notifications.jsx"; import { BackupTable } from "./lib/database/databaseTables.jsx"; import { BackupModal } from "./lib/database/backups.jsx"; -import { log_cmd, bad_file_name, valid_dn, callCmdStreamPassword } from "./lib/tools.jsx"; +import { + log_cmd, + bad_file_name, + valid_dn, + valid_db_name, + callCmdStreamPassword +} from "./lib/tools.jsx"; import { Button, Checkbox, @@ -103,10 +109,6 @@ export class CreateInstanceModal extends React.Component { 'createDM' ]; - const optionalAttrs = [ - 'createDBName' - ]; - // Handle server ID if (this.state.createServerId !== "") { if (this.state.createServerId.length > 80) { @@ -142,11 +144,9 @@ export class CreateInstanceModal extends React.Component { } if (this.state.createDBCheckbox) { - for (const attr of optionalAttrs) { - if (this.state[attr] === "") { - all_good = false; - errObj[attr] = true; - } + if (!valid_db_name(this.state.createDBName)) { + all_good = false; + errObj["createDBName"] = true; } if (!valid_dn(this.state.createDBSuffix)) { all_good = false; @@ -636,7 +636,7 @@ export class CreateInstanceModal extends React.Component { <FormHelperText > <HelperText> <HelperTextItem variant="error"> - {_("Name is required")} + {createDBName === "" ? _("Name is required") : "Invalid database name"} </HelperTextItem> </HelperText> </FormHelperText> diff --git a/src/cockpit/389-console/src/lib/database/localPwp.jsx b/src/cockpit/389-console/src/lib/database/localPwp.jsx index 9db8343b7..cb84be906 100644 --- a/src/cockpit/389-console/src/lib/database/localPwp.jsx +++ b/src/cockpit/389-console/src/lib/database/localPwp.jsx @@ -398,9 +398,6 @@ class CreatePolicy extends React.Component { </GridItem> </Grid> <Grid className="ds-margin-top" title={_("Always return a password expiring control when requested (passwordSendExpiringTime).")}> - <GridItem className="ds-label" span={4}> - {_("Send Password Expiring Warning")} - </GridItem> <GridItem span={4}> <Checkbox id="create_passwordsendexpiringtime" diff --git a/src/cockpit/389-console/src/lib/monitor/serverMonitor.jsx b/src/cockpit/389-console/src/lib/monitor/serverMonitor.jsx index 513e1d792..a736ff532 100644 --- a/src/cockpit/389-console/src/lib/monitor/serverMonitor.jsx +++ b/src/cockpit/389-console/src/lib/monitor/serverMonitor.jsx @@ -37,6 +37,7 @@ import { import { SyncAltIcon } from "@patternfly/react-icons"; const _ = cockpit.gettext; +const refresh_interval = 10000; // 10 seconds export class ServerMonitor extends React.Component { constructor (props) { @@ -47,12 +48,20 @@ export class ServerMonitor extends React.Component { const initResChart = []; const initSwapChart = []; const initConnChart = []; - for (let idx = 1; idx <= 6; idx++) { - initCPUChart.push({ name: 'CPU', x: "0:00:00", y: 0 }); - initResChart.push({ name: 'Resident', x: "0:00:00", y: 0 }); - initVirtChart.push({ name: 'Virtual', x: "0:00:00", y: 0 }); - initSwapChart.push({ name: 'Swap', x: "0:00:00", y: 0 }); - initConnChart.push({ name: 'Connection', x: "0:00:00", y: 0 }); + const initConnEstablishedChart = []; + const initConnTimeWaitChart = []; + const initConnCloseWaitChart = []; + for (let idx = 0; idx <= 5; idx++) { + const value = refresh_interval / 1000; + const x_value = "0:00:" + (idx === 0 ? "00" : value * idx).toString(); + initCPUChart.push({ name: 'CPU', x: x_value, y: 0 }); + initResChart.push({ name: 'Resident', x: x_value, y: 0 }); + initVirtChart.push({ name: 'Virtual', x: x_value, y: 0 }); + initSwapChart.push({ name: 'Swap', x: x_value, y: 0 }); + initConnChart.push({ name: 'Connections', x: x_value, y: 0 }); + initConnTimeWaitChart.push({ name: 'Connections time wait', x: x_value, y: 0 }); + initConnCloseWaitChart.push({ name: 'Connections close wait', x: x_value, y: 0 }); + initConnEstablishedChart.push({ name: 'Connections established', x: x_value, y: 0 }); } this.state = { @@ -71,11 +80,17 @@ export class ServerMonitor extends React.Component { initResChart, initSwapChart, initConnChart, + initConnEstablishedChart, + initConnTimeWaitChart, + initConnCloseWaitChart, cpuChart: [...initCPUChart], memVirtChart: [...initVirtChart], memResChart: [...initResChart], swapChart: [...initSwapChart], connChart: [...initConnChart], + connEstablishedChart: [...initConnEstablishedChart], + connTimeWaitChart: [...initConnTimeWaitChart], + connCloseWaitChart: [...initConnCloseWaitChart], }; this.handleNavSelect = (event, tabIndex) => { @@ -110,6 +125,9 @@ export class ServerMonitor extends React.Component { memResChart: [...this.state.initResChart], swapChart: [...this.state.initSwapChart], connChart: [...this.state.initConnChart], + connCloseWaitChart: [...this.state.initConnCloseWaitChart], + connTimeWaitChart: [...this.state.initConnTimeWaitChart], + connEstablishedChart: [...this.state.initConnEstablishedChart], }); } @@ -123,6 +141,9 @@ export class ServerMonitor extends React.Component { let res_mem = 0; let swap_mem = 0; let current_conns = 0; + let conn_established = 0; + let conn_close_wait = 0; + let conn_time_wait = 0; let total_threads = 0; let conn_highmark = this.state.conn_highmark; let cpu_tick_values = this.state.cpu_tick_values; @@ -147,6 +168,9 @@ export class ServerMonitor extends React.Component { res_mem = attrs['rss'][0]; swap_mem = attrs['swap'][0]; current_conns = attrs['connection_count'][0]; + conn_established = attrs['connection_established_count'][0]; + conn_close_wait = attrs['connection_close_wait_count'][0]; + conn_time_wait = attrs['connection_time_wait_count'][0]; total_threads = attrs['total_threads'][0]; mem_total = attrs['total_mem'][0]; @@ -196,6 +220,18 @@ export class ServerMonitor extends React.Component { connChart.shift(); connChart.push({ name: _("Connections"), x: interval, y: parseInt(current_conns) }); + const connEstablishedChart = this.state.connEstablishedChart; + connEstablishedChart.shift(); + connEstablishedChart.push({ name: _("Connections established"), x: interval, y: parseInt(conn_established) }); + + const connTimeWaitChart = this.state.connTimeWaitChart; + connTimeWaitChart.shift(); + connTimeWaitChart.push({ name: _("Connections time wait"), x: interval, y: parseInt(conn_time_wait) }); + + const connCloseWaitChart = this.state.connCloseWaitChart; + connCloseWaitChart.shift(); + connCloseWaitChart.push({ name: _("Connections close wait"), x: interval, y: parseInt(conn_close_wait) }); + this.setState({ cpu_tick_values, conn_tick_values, @@ -204,8 +240,14 @@ export class ServerMonitor extends React.Component { memResChart, swapChart, connChart, + connTimeWaitChart, + connCloseWaitChart, + connEstablishedChart, conn_highmark, current_conns, + conn_close_wait, + conn_time_wait, + conn_established, mem_virt_size: virt_mem, mem_res_size: res_mem, mem_swap_size: swap_mem, @@ -224,7 +266,7 @@ export class ServerMonitor extends React.Component { startRefresh() { this.setState({ - chart_refresh: setInterval(this.refreshCharts, 10000), + chart_refresh: setInterval(this.refreshCharts, refresh_interval), }); } @@ -236,8 +278,14 @@ export class ServerMonitor extends React.Component { const { cpu, connChart, + connTimeWaitChart, + connCloseWaitChart, + connEstablishedChart, cpuChart, current_conns, + conn_established, + conn_close_wait, + conn_time_wait, memResChart, memVirtChart, swapChart, @@ -312,15 +360,33 @@ export class ServerMonitor extends React.Component { <Card className="ds-margin-top-lg"> <CardBody> <Grid> - <GridItem span="4" className="ds-center" title={_("Established client connections to the server")}> - <TextContent> - <Text className="ds-margin-top-xlg" component={TextVariants.h3}> - {_("Connections")} + <GridItem span="4" title={_("Established client connections to the server")}> + <div className="ds-center" > + <TextContent> + <Text component={TextVariants.h2}> + {_("Connections")} + </Text> + </TextContent> + <TextContent> + <Text component={TextVariants.h6}> + <b>{numToCommas(current_conns)}</b> + </Text> + </TextContent> + <Divider className="ds-margin-top ds-margin-bottom"/> + </div> + <TextContent className="ds-margin-top-lg" title="Connections that are in an ESTABLISHED state"> + <Text component={TextVariants.p}> + Established: &nbsp;&nbsp;<b>{numToCommas(conn_established)}</b> </Text> </TextContent> - <TextContent> - <Text component={TextVariants.h6}> - <b>{numToCommas(current_conns)}</b> + <TextContent className="ds-margin-top-lg" title="Connections that are in a CLOSE_WAIT state"> + <Text component={TextVariants.p}> + Close wait: &nbsp;&nbsp;<b>{numToCommas(conn_close_wait)}</b> + </Text> + </TextContent> + <TextContent className="ds-margin-top-lg" title="Connections that are in a TIME_WAIT state"> + <Text component={TextVariants.p}> + Time wait: &nbsp;&nbsp;<b>{numToCommas(conn_time_wait)}</b> </Text> </TextContent> </GridItem> @@ -329,13 +395,13 @@ export class ServerMonitor extends React.Component { ariaDesc="connection stats" ariaTitle={_("Live Connection Statistics")} containerComponent={<ChartVoronoiContainer labels={({ datum }) => `${datum.name}: ${datum.y}`} constrainToVisibleArea />} - height={200} + height={220} minDomain={{ y: 0 }} padding={{ bottom: 30, - left: 55, + left: 60, top: 10, - right: 25, + right: 30, }} > <ChartAxis /> @@ -344,6 +410,18 @@ export class ServerMonitor extends React.Component { <ChartArea data={connChart} /> + <ChartArea + data={connEstablishedChart} + interpolation="monotoneX" + /> + <ChartArea + data={connTimeWaitChart} + interpolation="monotoneX" + /> + <ChartArea + data={connCloseWaitChart} + interpolation="monotoneX" + /> </ChartGroup> </Chart> </GridItem> @@ -372,7 +450,7 @@ export class ServerMonitor extends React.Component { ariaDesc="cpu" ariaTitle={_("Server CPU Usage")} containerComponent={<ChartVoronoiContainer labels={({ datum }) => `${datum.name}: ${datum.y}%`} constrainToVisibleArea />} - height={200} + height={220} minDomain={{ y: 0 }} padding={{ bottom: 30, diff --git a/src/cockpit/389-console/src/lib/plugins/memberOf.jsx b/src/cockpit/389-console/src/lib/plugins/memberOf.jsx index dc078fbc8..00a334621 100644 --- a/src/cockpit/389-console/src/lib/plugins/memberOf.jsx +++ b/src/cockpit/389-console/src/lib/plugins/memberOf.jsx @@ -225,7 +225,7 @@ class MemberOf extends React.Component { // Handle scope subtree this.handleSubtreeScopeSelect = (event, selection) => { - if (selection === "") { + if (!selection.trim() || !valid_dn(selection)) { this.setState({isSubtreeScopeOpen: false}); return; } @@ -257,7 +257,7 @@ class MemberOf extends React.Component { }, () => { this.validateConfig() }); }; this.handleSubtreeScopeCreateOption = newValue => { - if (newValue && !this.state.memberOfEntryScopeOptions.includes(newValue)) { + if (newValue.trim() && valid_dn(newValue) && !this.state.memberOfEntryScopeOptions.includes(newValue)) { this.setState({ memberOfEntryScopeOptions: [...this.state.memberOfEntryScopeOptions, newValue], isSubtreeScopeOpen: false @@ -267,7 +267,7 @@ class MemberOf extends React.Component { // Handle Exclude Scope subtree this.handleExcludeScopeSelect = (event, selection) => { - if (selection === "") { + if (!selection.trim() || !valid_dn(selection)) { this.setState({isExcludeScopeOpen: false}); return; } @@ -299,7 +299,7 @@ class MemberOf extends React.Component { }, () => { this.validateConfig() }); }; this.handleExcludeCreateOption = newValue => { - if (newValue && !this.state.memberOfEntryScopeOptions.includes(newValue)) { + if (newValue.trim() && valid_dn(newValue) && !this.state.memberOfEntryScopeOptions.includes(newValue)) { this.setState({ memberOfEntryScopeExcludeOptions: [...this.state.memberOfEntryScopeExcludeOptions, newValue], isExcludeScopeOpen: false @@ -310,7 +310,7 @@ class MemberOf extends React.Component { // Modal scope and exclude Scope // Handle scope subtree this.handleConfigScopeSelect = (event, selection) => { - if (selection === "") { + if (selection.trim() === "" || !valid_dn(selection)) { this.setState({isConfigSubtreeScopeOpen: false}); return; } @@ -342,7 +342,7 @@ class MemberOf extends React.Component { }, () => { this.validateModal() }); }; this.handleConfigCreateOption = newValue => { - if (newValue && !this.state.configEntryScopeOptions.includes(newValue)) { + if (newValue.trim() && valid_dn(newValue) && !this.state.configEntryScopeOptions.includes(newValue)) { this.setState({ configEntryScopeOptions: [...this.state.configEntryScopeOptions, newValue], isConfigSubtreeScopeOpen: false @@ -352,7 +352,7 @@ class MemberOf extends React.Component { // Handle Exclude Scope subtree this.handleConfigExcludeScopeSelect = (event, selection) => { - if (selection === "") { + if (selection.trim() === "" || !valid_dn(selection)) { this.setState({isConfigExcludeScopeOpen: false}); return; } @@ -384,7 +384,7 @@ class MemberOf extends React.Component { }, () => { this.validateModal() }); }; this.handleConfigExcludeCreateOption = newValue => { - if (newValue && !this.state.configEntryScopeExcludeOptions.includes(newValue)) { + if (newValue.trim() && valid_dn(newValue) && !this.state.configEntryScopeExcludeOptions.includes(newValue)) { this.setState({ configEntryScopeExcludeOptions: [...this.state.configEntryScopeExcludeOptions, newValue], isConfigExcludeScopeOpen: false @@ -1563,7 +1563,7 @@ class MemberOf extends React.Component { ))} </Select> <FormHelperText > - {_("A subtree is required, and values must be valid DN's")} + {"values must be valid DN's"} </FormHelperText> </GridItem> <GridItem className="ds-left-margin" span={3}> diff --git a/src/lib389/lib389/monitor.py b/src/lib389/lib389/monitor.py index 196577ed5..ec82b0346 100644 --- a/src/lib389/lib389/monitor.py +++ b/src/lib389/lib389/monitor.py @@ -119,14 +119,26 @@ class Monitor(DSLdapObject): sslport = str(self._instance.sslport) conn_count = 0 + conn_established_count = 0 + conn_close_wait_count = 0 + conn_time_wait_count = 0 conns = psutil.net_connections() for conn in conns: if len(conn[4]) > 0: conn_port = str(conn[4][1]) if conn_port in (port, sslport): + if conn[5] == 'TIME_WAIT': + conn_time_wait_count += 1 + if conn[5] == 'CLOSE_WAIT': + conn_close_wait_count += 1 + if conn[5] == 'ESTABLISHED': + conn_established_count += 1 conn_count += 1 stats['connection_count'] = [str(conn_count)] + stats['connection_established_count'] = [str(conn_established_count)] + stats['connection_close_wait_count'] = [str(conn_close_wait_count)] + stats['connection_time_wait_count'] = [str(conn_time_wait_count)] return stats
0
93b6cefb66a5dea5af0d7753f6d26d8d6046d618
389ds/389-ds-base
Ticket 532 - RUV is not getting updated for both Master and consumer Bug Description: If you change the replica type, the RUV does not reflect the change. This is partly due to the nsState attribute not being updated - specifically the csngen. Fix Description: When we change the replica type, update the csn gen, add the new RUV element, delete the old RUV element, clean the changelog RUV, update the state, and notify the agmts. Then you must reinitialize the agmts after changing the replica type. https://fedorahosted.org/389/ticket/532 Reviewed by: richm(Thanks!)
commit 93b6cefb66a5dea5af0d7753f6d26d8d6046d618 Author: Mark Reynolds <[email protected]> Date: Fri Dec 21 14:01:55 2012 -0500 Ticket 532 - RUV is not getting updated for both Master and consumer Bug Description: If you change the replica type, the RUV does not reflect the change. This is partly due to the nsState attribute not being updated - specifically the csngen. Fix Description: When we change the replica type, update the csn gen, add the new RUV element, delete the old RUV element, clean the changelog RUV, update the state, and notify the agmts. Then you must reinitialize the agmts after changing the replica type. https://fedorahosted.org/389/ticket/532 Reviewed by: richm(Thanks!) diff --git a/ldap/servers/plugins/replication/repl5.h b/ldap/servers/plugins/replication/repl5.h index f1d6ef432..7b0d6e4ad 100644 --- a/ldap/servers/plugins/replication/repl5.h +++ b/ldap/servers/plugins/replication/repl5.h @@ -61,6 +61,7 @@ #include "repl5_ruv.h" #include "cl4.h" +#define START_UPDATE_DELAY 2 /* 2 second */ #define REPLICA_TYPE_WINDOWS 1 #define REPLICA_TYPE_MULTIMASTER 0 #define REPL_DIRSYNC_CONTROL_OID "1.2.840.113556.1.4.841" @@ -558,6 +559,7 @@ int replica_add_by_name (const char *name, Object *replica); int replica_delete_by_name (const char *name); Object* replica_get_by_name (const char *name); void replica_flush(Replica *r); +void replica_set_csn_assigned(Replica *r); void replica_get_referrals(const Replica *r, char ***referrals); void replica_set_referrals(Replica *r,const Slapi_ValueSet *vs); int replica_update_csngen_state (Replica *r, const RUV *ruv); @@ -584,6 +586,8 @@ void replica_set_ruv_dirty (Replica *r); void replica_write_ruv (Replica *r); char *replica_get_dn(Replica *r); void replica_check_for_tasks(Replica*r, Slapi_Entry *e); +void replica_update_state (time_t when, void *arg); +void replica_reset_csn_pl(Replica *r); /* The functions below handles the state flag */ /* Current internal state flags */ diff --git a/ldap/servers/plugins/replication/repl5_plugins.c b/ldap/servers/plugins/replication/repl5_plugins.c index efd7e180d..43c1596b3 100644 --- a/ldap/servers/plugins/replication/repl5_plugins.c +++ b/ldap/servers/plugins/replication/repl5_plugins.c @@ -1213,7 +1213,7 @@ write_changelog_and_ruv (Slapi_PBlock *pb) op_params->target_address.uniqueid = slapi_ch_strdup (uniqueid); } - if( is_cleaned_rid(csn_get_replicaid(op_params->csn))){ + if( op_params->csn && is_cleaned_rid(csn_get_replicaid(op_params->csn))){ /* this RID has been cleaned */ object_release (repl_obj); return 0; diff --git a/ldap/servers/plugins/replication/repl5_replica.c b/ldap/servers/plugins/replication/repl5_replica.c index 3d7677b86..82362d604 100644 --- a/ldap/servers/plugins/replication/repl5_replica.c +++ b/ldap/servers/plugins/replication/repl5_replica.c @@ -51,7 +51,6 @@ #include "cl5_api.h" #define RUV_SAVE_INTERVAL (30 * 1000) /* 30 seconds */ -#define START_UPDATE_DELAY 2 /* 2 second */ #define REPLICA_RDN "cn=replica" #define CHANGELOG_RDN "cn=legacy changelog" @@ -107,7 +106,6 @@ static int _replica_check_validity (const Replica *r); static int _replica_init_from_config (Replica *r, Slapi_Entry *e, char *errortext); static int _replica_update_entry (Replica *r, Slapi_Entry *e, char *errortext); static int _replica_configure_ruv (Replica *r, PRBool isLocked); -static void _replica_update_state (time_t when, void *arg); static char * _replica_get_config_dn (const Slapi_DN *root); static char * _replica_type_as_string (const Replica *r); /* DBDB, I think this is probably bogus : */ @@ -248,7 +246,7 @@ replica_new_from_entry (Slapi_Entry *e, char *errortext, PRBool is_add_operation In that case the updated would fail but nothing bad would happen. The next scheduled update would save the state */ repl_name = slapi_ch_strdup (r->repl_name); - r->repl_eqcxt_rs = slapi_eq_repeat(_replica_update_state, repl_name, + r->repl_eqcxt_rs = slapi_eq_repeat(replica_update_state, repl_name, current_time () + START_UPDATE_DELAY, RUV_SAVE_INTERVAL); if (r->tombstone_reap_interval > 0) @@ -295,10 +293,17 @@ replica_flush(Replica *r) PR_Unlock(r->repl_lock); /* This function take the Lock Inside */ /* And also write the RUV */ - _replica_update_state((time_t)0, r->repl_name); + replica_update_state((time_t)0, r->repl_name); } } +void +replica_set_csn_assigned(Replica *r) +{ + PR_Lock(r->repl_lock); + r->repl_csn_assigned = PR_TRUE; + PR_Unlock(r->repl_lock); +} /* * Deallocate a replica. arg should point to the address of a @@ -928,6 +933,19 @@ replica_set_updatedn (Replica *r, const Slapi_ValueSet *vs, int mod_op) PR_Unlock(r->repl_lock); } +void +replica_reset_csn_pl(Replica *r) +{ + PR_Lock(r->repl_lock); + + if (NULL != r->min_csn_pl){ + csnplFree (&r->min_csn_pl); + } + r->min_csn_pl = csnplNew(); + + PR_Unlock(r->repl_lock); +} + /* gets current replica generation for this replica */ char *replica_get_generation (const Replica *r) { @@ -1251,8 +1269,8 @@ replica_set_enabled (Replica *r, PRBool enable) { if (r->repl_eqcxt_rs == NULL) /* event is not already registered */ { - repl_name = slapi_ch_strdup (r->repl_name); - r->repl_eqcxt_rs = slapi_eq_repeat(_replica_update_state, repl_name, + repl_name = slapi_ch_strdup (r->repl_name); + r->repl_eqcxt_rs = slapi_eq_repeat(replica_update_state, repl_name, current_time() + START_UPDATE_DELAY, RUV_SAVE_INTERVAL); } } @@ -2349,8 +2367,8 @@ done: /* NOTE - this is the only non-api function that performs locking because it is called by the event queue */ -static void -_replica_update_state (time_t when, void *arg) +void +replica_update_state (time_t when, void *arg) { int rc; const char *replica_name = (const char *)arg; @@ -2420,7 +2438,7 @@ _replica_update_state (time_t when, void *arg) dn = _replica_get_config_dn (r->repl_root); if (NULL == dn) { slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name, - "_replica_update_state: failed to get the config dn for %s\n", + "replica_update_state: failed to get the config dn for %s\n", slapi_sdn_get_dn (r->repl_root)); PR_Unlock(r->repl_lock); goto done; @@ -2461,7 +2479,7 @@ _replica_update_state (time_t when, void *arg) slapi_pblock_get(pb, SLAPI_PLUGIN_INTOP_RESULT, &rc); if (rc != LDAP_SUCCESS) { - slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name, "_replica_update_state: " + slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name, "replica_update_state: " "failed to update state of csn generator for replica %s: LDAP " "error - %d\n", slapi_sdn_get_dn(r->repl_root), rc); } diff --git a/ldap/servers/plugins/replication/repl5_replica_config.c b/ldap/servers/plugins/replication/repl5_replica_config.c index aef9c1341..dd3e65e34 100644 --- a/ldap/servers/plugins/replication/repl5_replica_config.c +++ b/ldap/servers/plugins/replication/repl5_replica_config.c @@ -500,8 +500,12 @@ replica_config_modify (Slapi_PBlock *pb, Slapi_Entry* entryBefore, Slapi_Entry* if (new_repl_id || new_repl_type) { *returncode = replica_config_change_type_and_id(r, new_repl_type, new_repl_id, errortext, apply_mods); + PR_Unlock (s_configLock); + replica_update_state(0, (void *)replica_get_name(r)); + PR_Lock (s_configLock); slapi_ch_free_string(&new_repl_id); slapi_ch_free_string(&new_repl_type); + agmtlist_notify_all(pb); } } @@ -808,10 +812,35 @@ replica_config_change_type_and_id (Replica *r, const char *new_type, if (apply_mods) { + Object *ruv_obj, *gen_obj; + RUV *ruv; + CSNGen *gen; + + ruv_obj = replica_get_ruv(r); + if(ruv_obj){ + /* we need to rewrite the repl_csngen with the new rid */ + ruv = object_get_data (ruv_obj); + gen_obj = replica_get_csngen (r); + if(gen_obj){ + const char *purl = multimaster_get_local_purl(); + + gen = (CSNGen*) object_get_data (gen_obj); + csngen_rewrite_rid(gen, rid); + if(purl && type == REPLICA_TYPE_UPDATABLE){ + ruv_add_replica(ruv, rid, purl); + replica_reset_csn_pl(r); + } + ruv_delete_replica(ruv, oldrid); + replica_set_ruv_dirty(r); + cl5CleanRUV(oldrid); + replica_set_csn_assigned(r); + } + object_release(ruv_obj); + } replica_set_type (r, type); - replica_set_rid(r, rid); + replica_set_rid(r, rid); - /* Set the mapping tree node, and the list of referrals */ + /* Set the mapping tree node, and the list of referrals */ /* if this server is a 4.0 consumer the referrals are set by legacy plugin */ if (!replica_is_legacy_consumer(r)) consumer5_set_mapping_tree_state_for_replica(r, NULL); diff --git a/ldap/servers/plugins/replication/repl5_ruv.c b/ldap/servers/plugins/replication/repl5_ruv.c index b52dd4912..972b81278 100644 --- a/ldap/servers/plugins/replication/repl5_ruv.c +++ b/ldap/servers/plugins/replication/repl5_ruv.c @@ -466,7 +466,7 @@ ruv_add_replica (RUV *ruv, ReplicaId rid, const char *replica_purl) { replica = ruvAddReplicaNoCSN (ruv, rid, replica_purl); } - + slapi_rwlock_unlock (ruv->lock); if (replica) @@ -514,7 +514,7 @@ ruv_add_index_replica (RUV *ruv, ReplicaId rid, const char *replica_purl, int in { replica = ruvAddIndexReplicaNoCSN (ruv, rid, replica_purl, index); } - + slapi_rwlock_unlock (ruv->lock); if (replica) @@ -1983,14 +1983,14 @@ ruv_move_local_supplier_to_first(RUV *ruv, ReplicaId aRid) PR_ASSERT(ruv); - slapi_rwlock_wrlock (ruv->lock); - + slapi_rwlock_wrlock (ruv->lock); + elem = (RUVElement *)dl_delete(ruv->elements,(const void*)&aRid, ruvReplicaCompare, 0); if (elem) { dl_add_index(ruv->elements, elem, 1); rc = RUV_SUCCESS; } - + slapi_rwlock_unlock (ruv->lock); return rc; diff --git a/ldap/servers/slapd/csngen.c b/ldap/servers/slapd/csngen.c index 7e0bdb630..464a59e07 100644 --- a/ldap/servers/slapd/csngen.c +++ b/ldap/servers/slapd/csngen.c @@ -306,11 +306,22 @@ void csngen_abort_csn (CSNGen *gen, const CSN *csn) _csngen_call_callbacks (gen, csn, 1); } +void csngen_rewrite_rid(CSNGen *gen, ReplicaId rid) +{ + if (gen == NULL){ + return; + } + slapi_rwlock_wrlock (gen->lock); + gen->state.rid = rid; + slapi_rwlock_unlock (gen->lock); + +} + /* this function should be called when a remote CSN for the same part of the dit becomes known to the server (for instance, as part of RUV during replication session. In response, the generator would adjust its notion of time so that it does not generate smaller csns */ -int csngen_adjust_time (CSNGen *gen, const CSN* csn) +int csngen_adjust_time(CSNGen *gen, const CSN* csn) { time_t remote_time, remote_offset, cur_time; PRUint16 remote_seqnum; diff --git a/ldap/servers/slapd/slapi-private.h b/ldap/servers/slapd/slapi-private.h index b3888ef45..225c7aa14 100644 --- a/ldap/servers/slapd/slapi-private.h +++ b/ldap/servers/slapd/slapi-private.h @@ -264,6 +264,8 @@ void csngen_abort_csn (CSNGen *gen, const CSN *csn); int csngen_adjust_time (CSNGen *gen, const CSN* csn); /* returns PR_TRUE if the csn was generated by this generator and PR_FALSE otherwise. */ +void csngen_rewrite_rid(CSNGen *gen, ReplicaId rid); + PRBool csngen_is_local_csn(const CSNGen *gen, const CSN *csn); /* returns current state of the generator so that it can be saved in the DIT */
0
26129080c871eb49d4db9540d88d24de84fa4426
389ds/389-ds-base
Resolves: #466702 Summpary: Memory usage research: checking in the experimental code Comment: added a missing line
commit 26129080c871eb49d4db9540d88d24de84fa4426 Author: Noriko Hosoi <[email protected]> Date: Thu Nov 6 01:02:21 2008 +0000 Resolves: #466702 Summpary: Memory usage research: checking in the experimental code Comment: added a missing line diff --git a/ldap/servers/slapd/mempool.c b/ldap/servers/slapd/mempool.c index 56fd783db..f990363a6 100644 --- a/ldap/servers/slapd/mempool.c +++ b/ldap/servers/slapd/mempool.c @@ -234,6 +234,7 @@ mempool_return(int type, void *object, mempool_cleanup_callback cleanup) if ((maxfreelist > 0) && (my_mempool[type].mempool_count > maxfreelist)) { return LDAP_UNWILLING_TO_PERFORM; } else { + ((struct mempool_object *)object)->mempool_next = mempool[type].mempool_head; my_mempool[type].mempool_head = (struct mempool_object *)object; my_mempool[type].mempool_cleanup_fn = cleanup; my_mempool[type].mempool_count++;
0
286559dac7a968faa0102f0165ba56892795b269
389ds/389-ds-base
Ticket #47750 - Creating a glue fails if one above level is a conflict or missing Bug description: https://fedorahosted.org/389/ticket/47750#comment:16 Fix description: The logic in slapi_is_special_rdn to check if the RDN is a tombstone or not had a problem. Even if the first part of RDN is not nsuniqueid=<UNIQUE_ID>, it was returning true. In this patch, code to check the case is added and it returns false if the first part of RDN is not nsuniqueid=<UNIQUE_ID>. Reviewed by [email protected] and [email protected] (Thank you, Mark and Rich!!)
commit 286559dac7a968faa0102f0165ba56892795b269 Author: Noriko Hosoi <[email protected]> Date: Fri Jun 27 12:51:30 2014 -0700 Ticket #47750 - Creating a glue fails if one above level is a conflict or missing Bug description: https://fedorahosted.org/389/ticket/47750#comment:16 Fix description: The logic in slapi_is_special_rdn to check if the RDN is a tombstone or not had a problem. Even if the first part of RDN is not nsuniqueid=<UNIQUE_ID>, it was returning true. In this patch, code to check the case is added and it returns false if the first part of RDN is not nsuniqueid=<UNIQUE_ID>. Reviewed by [email protected] and [email protected] (Thank you, Mark and Rich!!) diff --git a/ldap/servers/slapd/util.c b/ldap/servers/slapd/util.c index 4c23af2d2..3690bf754 100644 --- a/ldap/servers/slapd/util.c +++ b/ldap/servers/slapd/util.c @@ -1475,7 +1475,7 @@ slapi_is_special_rdn(const char *rdn, int flag) } rp = (char *)rdn; while (rp) { - char *comma = NULL; + char *endp = NULL; if (!PL_strncasecmp(rp, SLAPI_ATTR_UNIQUEID, SLAPI_ATTR_UNIQUEID_LENGTH) && (*(rp + SLAPI_ATTR_UNIQUEID_LENGTH) == '=')) { if (RDN_IS_TOMBSTONE == flag) { @@ -1492,10 +1492,17 @@ slapi_is_special_rdn(const char *rdn, int flag) return 1; } } + } else if (RDN_IS_TOMBSTONE == flag) { + /* If the first part of rdn does not start with SLAPI_ATTR_UNIQUEID, + * it's not a tombstone RDN. */ + return 0; + } + endp = PL_strchr(rp, ','); + if (!endp) { + endp = rp + strlen(rp); } - comma = PL_strchr(rp, ','); rp = PL_strchr(rp, '+'); - if (rp && (rp < comma)) { + if (rp && (rp < endp)) { plus = 1; rp++; }
0
1a9ceaaec49b207f13a499584f637c420af75499
389ds/389-ds-base
Fix NTDS maven location.
commit 1a9ceaaec49b207f13a499584f637c420af75499 Author: Thomas Lackey <[email protected]> Date: Fri May 13 18:46:27 2005 +0000 Fix NTDS maven location. diff --git a/components.mk b/components.mk index 354be636c..49cd30949 100644 --- a/components.mk +++ b/components.mk @@ -601,9 +601,9 @@ MAVEN_DIR = $(MAVEN_RELEASE)/$(MAVEN_VERSION) MAVEN_DEP = $(MAVEN_FILE) MAVEN_REL_DIR=$(subst -bin,,$(subst .zip,,$(MAVEN))) -#MAVEN_EXE=$(NSCP_DISTDIR_FULL_RTL)/maven/$(MAVEN_REL_DIR)/bin/maven -MAVEN_EXE=..\\..\\..\\..\\..\\dist\\WINNT5.0_DBG.OBJ\\maven\\maven-1.0.2\\bin\\maven.bat -#MAVEN_HOME=..\\..\\..\\..\\..\\dist\\WINNT5.0_DBG.OBJ\\maven\\maven-1.0.2 +# CMD does not like the '/' in NSCP_DISTDIR_FULL_RTL, therefore the +# slashes need swapped to '\\' +MAVEN_EXE=cmd /c `echo $(NSCP_DISTDIR_FULL_RTL) | sed 's/\//\\\\/g'`\\maven\\$(MAVEN_REL_DIR)\\bin\\maven.bat MAVEN_HOME=$(NSCP_DISTDIR_FULL_RTL)/maven/$(MAVEN_REL_DIR) ifndef MAVEN_PULL_METHOD
0
9357bf1c46a1cbf38ae694109504ce8786a97590
389ds/389-ds-base
Ticket 47603 - Allow RI plugin to use alternate config area Description: RI plugin can now use an alternate config area. Also, the pluginarg plugin configuration has been deprecated to use unique config attributes instead: old: nsslapd-pluginarg0: 0 nsslapd-pluginarg1: /var/log/dirsrv/slapd-localhost/referint nsslapd-pluginarg2: 0 nsslapd-pluginarg3: member nsslapd-pluginarg4: uniquemember new: referint-update-delay: 0 referint-logfile: /var/log/dirsrv/slapd-localhost/referint referint-logchanges: 0 referint-membership-attr: member referint-membership-attr: uniquemember The plugin still accepts the plugin arg configuration in the plugin entry, but not in the new shared config entry. Making config changes is dynamnic now, and does not require a server restart. Jenkins & Coverity tests: passed https://fedorahosted.org/389/ticket/47603 Reviewed by: richm(Thanks!)
commit 9357bf1c46a1cbf38ae694109504ce8786a97590 Author: Mark Reynolds <[email protected]> Date: Mon Dec 2 17:50:45 2013 -0500 Ticket 47603 - Allow RI plugin to use alternate config area Description: RI plugin can now use an alternate config area. Also, the pluginarg plugin configuration has been deprecated to use unique config attributes instead: old: nsslapd-pluginarg0: 0 nsslapd-pluginarg1: /var/log/dirsrv/slapd-localhost/referint nsslapd-pluginarg2: 0 nsslapd-pluginarg3: member nsslapd-pluginarg4: uniquemember new: referint-update-delay: 0 referint-logfile: /var/log/dirsrv/slapd-localhost/referint referint-logchanges: 0 referint-membership-attr: member referint-membership-attr: uniquemember The plugin still accepts the plugin arg configuration in the plugin entry, but not in the new shared config entry. Making config changes is dynamnic now, and does not require a server restart. Jenkins & Coverity tests: passed https://fedorahosted.org/389/ticket/47603 Reviewed by: richm(Thanks!) diff --git a/ldap/ldif/template-dse.ldif.in b/ldap/ldif/template-dse.ldif.in index 9a52bc55e..af176e97c 100644 --- a/ldap/ldif/template-dse.ldif.in +++ b/ldap/ldif/template-dse.ldif.in @@ -606,13 +606,13 @@ nsslapd-plugininitfunc: referint_postop_init nsslapd-plugintype: betxnpostoperation nsslapd-pluginenabled: off nsslapd-pluginprecedence: 40 -nsslapd-pluginArg0: 0 -nsslapd-pluginArg1: %log_dir%/referint -nsslapd-pluginArg2: 0 -nsslapd-pluginArg3: member -nsslapd-pluginArg4: uniquemember -nsslapd-pluginArg5: owner -nsslapd-pluginArg6: seeAlso +referint-update-delay: 0 +referint-logfile: %log_dir%/referint +referint-logchanges: 0 +referint-membership-attr: member +referint-membership-attr: uniquemember +referint-membership-attr: owner +referint-membership-attr: seeAlso nsslapd-plugin-depends-on-type: database dn: cn=attribute uniqueness,cn=plugins,cn=config diff --git a/ldap/servers/plugins/referint/referint.c b/ldap/servers/plugins/referint/referint.c index 252ff160b..013ffa33f 100644 --- a/ldap/servers/plugins/referint/referint.c +++ b/ldap/servers/plugins/referint/referint.c @@ -65,9 +65,23 @@ #endif #define REFERINT_PLUGIN_SUBSYSTEM "referint-plugin" /* used for logging */ +#define REFERINT_PREOP_DESC "referint preop plugin" +#define REFERINT_ATTR_DELAY "referint-update-delay" +#define REFERINT_ATTR_LOGCHANGES "referint-logchanges" +#define REFERINT_ATTR_LOGFILE "referint-logfile" +#define REFERINT_ATTR_MEMBERSHIP "referint-membership-attr" #define MAX_LINE 2048 #define READ_BUFSIZE 4096 #define MY_EOF 0 +#define STARTUP 2 + +typedef struct referint_config { + int delay; + char *logfile; + int logchanges; + char **attrs; +} referint_config; +Slapi_RWLock *config_rwlock = NULL; /* function prototypes */ int referint_postop_init( Slapi_PBlock *pb ); @@ -75,11 +89,25 @@ int referint_postop_del( Slapi_PBlock *pb ); int referint_postop_modrdn( Slapi_PBlock *pb ); int referint_postop_start( Slapi_PBlock *pb); int referint_postop_close( Slapi_PBlock *pb); -int update_integrity(char **argv, Slapi_DN *sDN, char *newrDN, Slapi_DN *newsuperior, int logChanges); +int update_integrity(Slapi_DN *sDN, char *newrDN, Slapi_DN *newsuperior, int logChanges); int GetNextLine(char *dest, int size_dest, PRFileDesc *stream); int my_fgetc(PRFileDesc *stream); void referint_thread_func(void *arg); void writeintegritylog(Slapi_PBlock *pb, char *logfilename, Slapi_DN *sdn, char *newrdn, Slapi_DN *newsuperior, Slapi_DN *requestorsdn); +int load_config(Slapi_PBlock *pb, Slapi_Entry *config_entry, int apply); +int referint_get_delay(); +int referint_get_logchanges(); +char *referint_get_logfile(); +char **referint_get_attrs(); +int referint_postop_modify(Slapi_PBlock *pb); +int referint_validate_config(Slapi_PBlock *pb); +static int referint_preop_init(Slapi_PBlock *pb); +void referint_set_config_area(Slapi_DN *dn); +Slapi_DN *referint_get_config_area(); +void referint_set_plugin_area(Slapi_DN *sdn); +Slapi_DN *referint_get_plugin_area(); +int referint_sdn_config_cmp(Slapi_DN *sdn); +void referint_get_config(int *delay, int *logchanges, char **logfile); /* global thread control stuff */ static PRLock *referint_mutex = NULL; @@ -88,14 +116,17 @@ static PRLock *keeprunning_mutex = NULL; static PRCondVar *keeprunning_cv = NULL; static int keeprunning = 0; static int refint_started = 0; +static referint_config *config = NULL; +static Slapi_DN* _ConfigAreaDN = NULL; +static Slapi_DN* _pluginDN = NULL; static Slapi_PluginDesc pdesc = { "referint", VENDOR, DS_PACKAGE_VERSION, "referential integrity plugin" }; static int allow_repl = 0; static Slapi_DN *plugin_EntryScope = NULL; static Slapi_DN *plugin_ContainerScope = NULL; static void* referint_plugin_identity = NULL; - static int use_txn = 0; +static int premodfn = SLAPI_PLUGIN_PRE_MODIFY_FN; #ifdef _WIN32 int *module_ldap_debug = 0; @@ -131,6 +162,39 @@ referint_unlock() } } +void +referint_set_config_area(Slapi_DN *dn) +{ + slapi_rwlock_wrlock(config_rwlock); + slapi_sdn_free(&_ConfigAreaDN); + _ConfigAreaDN = slapi_sdn_dup(dn); + slapi_rwlock_unlock(config_rwlock); +} + +/* + * No need to lock here, because this only called from referint_sdn_config_cmp() + * which does take the lock. + */ +Slapi_DN * +referint_get_config_area() +{ + return _ConfigAreaDN; +} + +/* no locking needed for the plugin DN because it is set at initialization */ +void +referint_set_plugin_area(Slapi_DN *sdn) +{ + slapi_sdn_free(&_pluginDN); + _pluginDN = slapi_sdn_dup(sdn); +} + +Slapi_DN * +referint_get_plugin_area() +{ + return _pluginDN; +} + int referint_postop_init( Slapi_PBlock *pb ) { @@ -138,6 +202,8 @@ referint_postop_init( Slapi_PBlock *pb ) char *plugin_type = NULL; int delfn = SLAPI_PLUGIN_POST_DELETE_FN; int mdnfn = SLAPI_PLUGIN_POST_MODRDN_FN; + int modfn = SLAPI_PLUGIN_POST_MODIFY_FN; /* for config changes */ + char *preop_plugin_type = NULL; /* * Get plugin identity and stored it for later use. @@ -146,6 +212,12 @@ referint_postop_init( Slapi_PBlock *pb ) slapi_pblock_get (pb, SLAPI_PLUGIN_IDENTITY, &referint_plugin_identity); PR_ASSERT (referint_plugin_identity); + if((config = (referint_config *)slapi_ch_calloc (1, sizeof (referint_config))) == NULL){ + slapi_log_error( SLAPI_LOG_FATAL, REFERINT_PLUGIN_SUBSYSTEM, "referint_postop_init failed to " + "allocate configuration\n" ); + return ( -1 ); + } + /* get the args */ if ((slapi_pblock_get(pb, SLAPI_PLUGIN_CONFIG_ENTRY, &plugin_entry) == 0) && plugin_entry && @@ -154,8 +226,13 @@ referint_postop_init( Slapi_PBlock *pb ) { delfn = SLAPI_PLUGIN_BE_TXN_POST_DELETE_FN; mdnfn = SLAPI_PLUGIN_BE_TXN_POST_MODRDN_FN; + modfn = SLAPI_PLUGIN_BE_TXN_POST_MODIFY_FN; + preop_plugin_type = "betxnpreoperation"; + premodfn = SLAPI_PLUGIN_BE_TXN_PRE_MODIFY_FN; use_txn = 1; } + slapi_ch_free_string(&plugin_type); + if(plugin_entry){ char *plugin_attr_value; @@ -164,33 +241,37 @@ referint_postop_init( Slapi_PBlock *pb ) allow_repl = 1; } slapi_ch_free_string(&plugin_attr_value); + plugin_attr_value = slapi_entry_attr_get_charptr(plugin_entry, "nsslapd-pluginEntryScope"); if(plugin_attr_value) { - if (slapi_dn_syntax_check(NULL, plugin_attr_value, 1) == 1) { - slapi_log_error(SLAPI_LOG_FATAL, REFERINT_PLUGIN_SUBSYSTEM, - "Error: Ignoring invalid DN used as plugin entry scope: [%s]\n", - plugin_attr_value); - } else { - plugin_EntryScope = slapi_sdn_new_dn_byref(plugin_attr_value); - } - } + if (slapi_dn_syntax_check(NULL, plugin_attr_value, 1) == 1) { + slapi_log_error(SLAPI_LOG_FATAL, REFERINT_PLUGIN_SUBSYSTEM, + "Error: Ignoring invalid DN used as plugin entry scope: [%s]\n", + plugin_attr_value); + } else { + plugin_EntryScope = slapi_sdn_new_dn_byref(plugin_attr_value); + } + } + plugin_attr_value = slapi_entry_attr_get_charptr(plugin_entry, "nsslapd-pluginContainerScope"); if(plugin_attr_value) { - if (slapi_dn_syntax_check(NULL, plugin_attr_value, 1) == 1) { - slapi_log_error(SLAPI_LOG_FATAL, REFERINT_PLUGIN_SUBSYSTEM, - "Error: Ignoring invalid DN used as plugin container scope: [%s]\n", - plugin_attr_value); - } else { - plugin_ContainerScope = slapi_sdn_new_dn_byref(plugin_attr_value); - } - } + if (slapi_dn_syntax_check(NULL, plugin_attr_value, 1) == 1) { + slapi_log_error(SLAPI_LOG_FATAL, REFERINT_PLUGIN_SUBSYSTEM, + "Error: Ignoring invalid DN used as plugin container scope: [%s]\n", + plugin_attr_value); + } else { + plugin_ContainerScope = slapi_sdn_new_dn_byref(plugin_attr_value); + } + } + + referint_set_plugin_area(slapi_entry_get_sdn(plugin_entry)); } - slapi_ch_free_string(&plugin_type); if ( slapi_pblock_set( pb, SLAPI_PLUGIN_VERSION, SLAPI_PLUGIN_VERSION_01 ) != 0 || slapi_pblock_set( pb, SLAPI_PLUGIN_DESCRIPTION, (void *)&pdesc ) != 0 || slapi_pblock_set( pb, delfn, (void *) referint_postop_del ) != 0 || slapi_pblock_set( pb, mdnfn, (void *) referint_postop_modrdn ) != 0 || + slapi_pblock_set( pb, modfn, (void *) (void *)referint_postop_modify ) != 0 || slapi_pblock_set( pb, SLAPI_PLUGIN_START_FN, (void *) referint_postop_start ) != 0 || slapi_pblock_set( pb, SLAPI_PLUGIN_CLOSE_FN, (void *) referint_postop_close ) != 0) { @@ -198,16 +279,321 @@ referint_postop_init( Slapi_PBlock *pb ) return( -1 ); } + /* + * Setup the preop plugin for config validation + */ + if (slapi_register_plugin(preop_plugin_type, /* op type */ + 1, /* Enabled */ + "referint_preop_init", /* this function desc */ + referint_preop_init, /* init func */ + REFERINT_PREOP_DESC, /* plugin desc */ + NULL, /* ? */ + referint_plugin_identity /* access control */)) + { + slapi_log_error( SLAPI_LOG_FATAL, REFERINT_PLUGIN_SUBSYSTEM, "referint_preop_init failed\n" ); + return ( -1 ); + } + return( 0 ); } +/* + * referint-update-delay: 0 + * referint-logfile: /var/log/dirsrv/slapd-localhost/referint + * referint-logchanges: 0 + * referint-membership-attr: member + * referint-membership-attr: uniquemember + * referint-membership-attr: owner + * referint-membership-attr: seeAlso + * + * + * Need to lock this! + */ +int +load_config(Slapi_PBlock *pb, Slapi_Entry *config_entry, int apply) +{ + referint_config *tmp_config = NULL; + char *value = NULL; + char **attrs = NULL; + char **argv = NULL; + int new_config_present = 0; + int argc = 0; + int rc = SLAPI_PLUGIN_SUCCESS; + + slapi_rwlock_wrlock(config_rwlock); + + if(config == NULL){ + slapi_log_error( SLAPI_LOG_FATAL, REFERINT_PLUGIN_SUBSYSTEM, + "load_config: config is NULL\n" ); + rc = SLAPI_PLUGIN_FAILURE; + goto done; + } + if((tmp_config = (referint_config *)slapi_ch_calloc (1, sizeof (referint_config))) == NULL){ + slapi_log_error( SLAPI_LOG_FATAL, REFERINT_PLUGIN_SUBSYSTEM, "load_config failed to " + "allocate configuration\n" ); + rc = SLAPI_PLUGIN_FAILURE; + goto done; + } else { + /* set these to -1 for config validation */ + tmp_config->delay = -1; + tmp_config->logchanges = -1; + } + + if((value = slapi_entry_attr_get_charptr(config_entry, REFERINT_ATTR_DELAY))){ + tmp_config->delay = atoi(value); + slapi_ch_free_string(&value); + new_config_present = 1; + } + if((value = slapi_entry_attr_get_charptr(config_entry, REFERINT_ATTR_LOGFILE))){ + tmp_config->logfile = value; + new_config_present = 1; + } + if((value = slapi_entry_attr_get_charptr(config_entry, REFERINT_ATTR_LOGCHANGES))){ + tmp_config->logchanges = atoi(value); + slapi_ch_free_string(&value); + new_config_present = 1; + } + if((attrs = slapi_entry_attr_get_charray(config_entry, REFERINT_ATTR_MEMBERSHIP))){ + tmp_config->attrs = attrs; + new_config_present = 1; + } + + if(new_config_present){ + /* Verify we have everything we need */ + if(tmp_config->delay == -1){ + slapi_log_error( SLAPI_LOG_FATAL, REFERINT_PLUGIN_SUBSYSTEM, "Plugin configuration is missing %s\n", + REFERINT_ATTR_DELAY); + rc = SLAPI_PLUGIN_FAILURE; + } else if (!tmp_config->logfile){ + slapi_log_error( SLAPI_LOG_FATAL, REFERINT_PLUGIN_SUBSYSTEM, "Plugin configuration is missing %s\n", + REFERINT_ATTR_LOGFILE); + rc = SLAPI_PLUGIN_FAILURE; + } else if (tmp_config->logchanges == -1){ + slapi_log_error( SLAPI_LOG_FATAL, REFERINT_PLUGIN_SUBSYSTEM, "Plugin configuration is missing %s\n", + REFERINT_ATTR_LOGCHANGES); + rc = SLAPI_PLUGIN_FAILURE; + } else if (!tmp_config->attrs){ + slapi_log_error( SLAPI_LOG_FATAL, REFERINT_PLUGIN_SUBSYSTEM, "Plugin configuration is missing %s\n", + REFERINT_ATTR_MEMBERSHIP); + rc = SLAPI_PLUGIN_FAILURE; + } + } else{ + /* + * We are using the old plugin arg configuration, get the args + */ + if ( slapi_pblock_get( pb, SLAPI_PLUGIN_ARGC, &argc ) != 0) { + slapi_log_error( SLAPI_LOG_FATAL, REFERINT_PLUGIN_SUBSYSTEM, + "referint_postop failed to get argc\n" ); + rc = SLAPI_PLUGIN_FAILURE; + goto done; + } + if ( slapi_pblock_get( pb, SLAPI_PLUGIN_ARGV, &argv ) != 0) { + slapi_log_error( SLAPI_LOG_FATAL, REFERINT_PLUGIN_SUBSYSTEM, + "referint_postop failed to get argv\n" ); + rc = SLAPI_PLUGIN_FAILURE; + goto done; + } + if(argv == NULL){ + slapi_log_error( SLAPI_LOG_FATAL, REFERINT_PLUGIN_SUBSYSTEM, + "referint_postop_del, args are NULL\n" ); + rc = SLAPI_PLUGIN_FAILURE; + goto done; + } + /* + * Load the args and set the config struct + */ + if (argc >= 3) { + int i; + + tmp_config->delay = atoi(argv[0]); + tmp_config->logfile = slapi_ch_strdup(argv[1]); + tmp_config->logchanges = atoi(argv[2]); + for(i = 3; argv[i] != NULL; i++){ + slapi_ch_array_add(&tmp_config->attrs, slapi_ch_strdup(argv[i])); + } + } else { + slapi_log_error( SLAPI_LOG_FATAL, REFERINT_PLUGIN_SUBSYSTEM, + "referint_postop insufficient arguments supplied\n" ); + rc = SLAPI_PLUGIN_FAILURE; + goto done; + } + } + +done: + if(apply && rc == SLAPI_PLUGIN_SUCCESS){ + slapi_ch_free_string(&config->logfile); + slapi_ch_array_free(config->attrs); + slapi_ch_free((void **)&config); + config = tmp_config; + } else if(tmp_config){ + slapi_ch_free_string(&tmp_config->logfile); + slapi_ch_array_free(tmp_config->attrs); + slapi_ch_free((void **)&tmp_config); + } + + slapi_rwlock_unlock(config_rwlock); + + return rc; +} + +int +referint_postop_modify(Slapi_PBlock *pb) +{ + Slapi_Entry *entry = NULL, *e = NULL; + Slapi_Entry *config_e = NULL; + Slapi_DN *config_sdn = NULL; + Slapi_DN *sdn = NULL; + char *config_area = NULL; + int result = 0; + int rc = SLAPI_PLUGIN_SUCCESS; + + /* check if we are updating the shared config entry */ + slapi_pblock_get(pb, SLAPI_TARGET_SDN, &sdn); + slapi_pblock_get(pb, SLAPI_ENTRY_POST_OP, &entry); + + if (referint_sdn_config_cmp(sdn) == 0 && slapi_sdn_compare(sdn, referint_get_plugin_area())) + { + if( SLAPI_PLUGIN_FAILURE == load_config(pb, entry, 1)){ + slapi_log_error( SLAPI_LOG_FATAL, REFERINT_PLUGIN_SUBSYSTEM, "Failed to update configuration.\n"); + return SLAPI_PLUGIN_FAILURE; + } + } else if (slapi_sdn_compare(sdn, referint_get_plugin_area()) == 0){ + /* + * Check if the plugin config area is set(verify it and load its config), + * otherwise reload the plugin entry config + */ + if((config_area = slapi_entry_attr_get_charptr(entry, SLAPI_PLUGIN_SHARED_CONFIG_AREA))){ + rc = slapi_dn_syntax_check(pb, config_area, 1); + if (rc) { /* syntax check failed */ + slapi_log_error( SLAPI_LOG_FATAL, REFERINT_PLUGIN_SUBSYSTEM, "referint_postop_modify: " + "%s does not contain a valid DN (%s)\n", + SLAPI_PLUGIN_SHARED_CONFIG_AREA, config_area); + rc = LDAP_INVALID_DN_SYNTAX; + goto bail; + } + config_sdn = slapi_sdn_new_dn_byval(config_area); + result = slapi_search_internal_get_entry(config_sdn, NULL, &e, referint_plugin_identity); + if (LDAP_SUCCESS != result) { + if (result == LDAP_NO_SUCH_OBJECT) { + /* log an error and use the plugin entry for the config */ + slapi_log_error(SLAPI_LOG_PLUGIN, REFERINT_PLUGIN_SUBSYSTEM, + "referint_postop_modify: Config entry \"%s\" does " + "not exist.\n", config_area); + rc = LDAP_OPERATIONS_ERROR; + goto bail; + } + } else { + if(e){ + config_e = e; + } else { + slapi_log_error(SLAPI_LOG_PLUGIN, REFERINT_PLUGIN_SUBSYSTEM, + "referint_postop_modify: Config entry \"%s\" was not located.\n", config_area); + rc = LDAP_OPERATIONS_ERROR; + goto bail; + } + } + } else { + config_e = entry; + } + if(load_config(pb, config_e, 1) != LDAP_SUCCESS){ + rc = LDAP_UNWILLING_TO_PERFORM; + goto bail; + } + referint_set_config_area(slapi_entry_get_sdn(config_e)); + } + +bail: + slapi_ch_free_string(&config_area); + slapi_sdn_free(&config_sdn); + slapi_entry_free(e); + + return rc; +} + +int +referint_get_delay() +{ + int delay; + + slapi_rwlock_rdlock(config_rwlock); + delay = config->delay; + slapi_rwlock_unlock(config_rwlock); + + return delay; +} + +int +referint_get_logchanges() +{ + int log_changes; + + slapi_rwlock_rdlock(config_rwlock); + log_changes = config->logchanges; + slapi_rwlock_unlock(config_rwlock); + + return log_changes; +} + +char * +referint_get_logfile() +{ + char *log_file; + + slapi_rwlock_rdlock(config_rwlock); + log_file = slapi_ch_strdup(config->logfile); + slapi_rwlock_unlock(config_rwlock); + + return log_file; +} + +void +referint_get_config(int *delay, int *logchanges, char **logfile) +{ + slapi_rwlock_rdlock(config_rwlock); + if(delay){ + *delay = config->delay; + } + if(logchanges){ + *logchanges = config->logchanges; + } + if(logfile){ + *logfile = slapi_ch_strdup(config->logfile); + } + slapi_rwlock_unlock(config_rwlock); +} + +/* + * might need to find an alternate option instead of copying + */ +char ** +referint_get_attrs() +{ + char **attrs = NULL; + + slapi_rwlock_rdlock(config_rwlock); + attrs = slapi_ch_array_dup(config->attrs); + slapi_rwlock_unlock(config_rwlock); + + return attrs; +} + +int +referint_sdn_config_cmp(Slapi_DN *sdn) +{ + int rc = 0; + + slapi_rwlock_rdlock(config_rwlock); + rc = slapi_sdn_compare(sdn, referint_get_config_area()); + slapi_rwlock_unlock(config_rwlock); + + return rc; +} int referint_postop_del( Slapi_PBlock *pb ) { Slapi_DN *sdn = NULL; - char **argv; - int argc; + char *logfile = NULL; int delay; int logChanges=0; int isrepop = 0; @@ -234,50 +620,26 @@ referint_postop_del( Slapi_PBlock *pb ) if(oprc != 0 || (isrepop && !allow_repl)){ return SLAPI_PLUGIN_SUCCESS; } - /* get the args */ - if ( slapi_pblock_get( pb, SLAPI_PLUGIN_ARGC, &argc ) != 0) { - slapi_log_error( SLAPI_LOG_FATAL, REFERINT_PLUGIN_SUBSYSTEM, - "referint_postop failed to get argc\n" ); - return SLAPI_PLUGIN_FAILURE; - } - if ( slapi_pblock_get( pb, SLAPI_PLUGIN_ARGV, &argv ) != 0) { - slapi_log_error( SLAPI_LOG_FATAL, REFERINT_PLUGIN_SUBSYSTEM, - "referint_postop failed to get argv\n" ); - return SLAPI_PLUGIN_FAILURE; - } - if(argv == NULL){ - slapi_log_error( SLAPI_LOG_FATAL, REFERINT_PLUGIN_SUBSYSTEM, - "referint_postop_del, args are NULL\n" ); - return SLAPI_PLUGIN_FAILURE; - } + referint_get_config(&delay, &logChanges, NULL); - if (argc >= 3) { - /* argv[0] will be the delay */ - delay = atoi(argv[0]); - - /* argv[2] will be wether or not to log changes */ - logChanges = atoi(argv[2]); - - if(delay == -1){ - /* integrity updating is off */ - rc = SLAPI_PLUGIN_SUCCESS; - } else if(delay == 0){ /* no delay */ - /* call function to update references to entry */ - if (plugin_EntryScope && slapi_sdn_issuffix(sdn, plugin_EntryScope)) { - rc = update_integrity(argv, sdn, NULL, NULL, logChanges); - } - } else { - /* write the entry to integrity log */ - writeintegritylog(pb, argv[1], sdn, NULL, NULL, NULL /* slapi_get_requestor_sdn(pb) */); - rc = SLAPI_PLUGIN_SUCCESS; + if(delay == -1){ + /* integrity updating is off */ + rc = SLAPI_PLUGIN_SUCCESS; + } else if(delay == 0){ /* no delay */ + /* call function to update references to entry */ + if (plugin_EntryScope && slapi_sdn_issuffix(sdn, plugin_EntryScope)) { + rc = update_integrity(sdn, NULL, NULL, logChanges); } } else { - slapi_log_error( SLAPI_LOG_FATAL, REFERINT_PLUGIN_SUBSYSTEM, - "referint_postop insufficient arguments supplied\n" ); - return SLAPI_PLUGIN_FAILURE; + /* write the entry to integrity log */ + logfile = referint_get_logfile(); + writeintegritylog(pb, logfile, sdn, NULL, NULL, NULL /* slapi_get_requestor_sdn(pb) */); + rc = SLAPI_PLUGIN_SUCCESS; } + slapi_ch_free_string(&logfile); + return( rc ); } @@ -286,11 +648,10 @@ referint_postop_modrdn( Slapi_PBlock *pb ) { Slapi_DN *sdn = NULL; Slapi_DN *newsuperior; + char *logfile = NULL; char *newrdn; - char **argv; int oprc; int rc = SLAPI_PLUGIN_SUCCESS; - int argc = 0; int delay; int logChanges=0; int isrepop = 0; @@ -312,61 +673,39 @@ referint_postop_modrdn( Slapi_PBlock *pb ) if(oprc != 0 || (isrepop && !allow_repl)){ return SLAPI_PLUGIN_SUCCESS; } - /* get the args */ - if ( slapi_pblock_get( pb, SLAPI_PLUGIN_ARGC, &argc ) != 0) { - slapi_log_error( SLAPI_LOG_FATAL, REFERINT_PLUGIN_SUBSYSTEM, - "referint_postop failed to get argv\n" ); - return SLAPI_PLUGIN_FAILURE; - } - if ( slapi_pblock_get( pb, SLAPI_PLUGIN_ARGV, &argv ) != 0) { - slapi_log_error( SLAPI_LOG_FATAL, REFERINT_PLUGIN_SUBSYSTEM, - "referint_postop failed to get argv\n" ); - return SLAPI_PLUGIN_FAILURE; - } - if(argv == NULL){ - slapi_log_error( SLAPI_LOG_FATAL, REFERINT_PLUGIN_SUBSYSTEM, - "referint_postop_modrdn, args are NULL\n" ); - return SLAPI_PLUGIN_FAILURE; - } - if (argc >= 3) { - /* argv[0] will always be the delay */ - delay = atoi(argv[0]); - - /* argv[2] will be wether or not to log changes */ - logChanges = atoi(argv[2]); - } else { - slapi_log_error( SLAPI_LOG_FATAL, REFERINT_PLUGIN_SUBSYSTEM, - "referint_postop_modrdn insufficient arguments supplied\n" ); - return SLAPI_PLUGIN_FAILURE; - } + referint_get_config(&delay, &logChanges, NULL); if(delay == -1){ /* integrity updating is off */ rc = SLAPI_PLUGIN_SUCCESS; } else if(delay == 0){ /* no delay */ /* call function to update references to entry */ - if (!plugin_EntryScope) { - /* no scope definde, default always process refint */ - rc = update_integrity(argv, sdn, newrdn, newsuperior, logChanges); - } else { - const char *newsuperiordn = slapi_sdn_get_dn(newsuperior); - if ( (newsuperiordn == NULL && slapi_sdn_issuffix(sdn, plugin_EntryScope)) || - ( newsuperiordn && slapi_sdn_issuffix(newsuperior, plugin_EntryScope))) { - /* it is a modrdn inside the scope or into the scope, + if (!plugin_EntryScope) { + /* no scope defined, default always process referint */ + rc = update_integrity(sdn, newrdn, newsuperior, logChanges); + } else { + const char *newsuperiordn = slapi_sdn_get_dn(newsuperior); + if ( (newsuperiordn == NULL && slapi_sdn_issuffix(sdn, plugin_EntryScope)) || + ( newsuperiordn && slapi_sdn_issuffix(newsuperior, plugin_EntryScope))) + { + /* + * It is a modrdn inside the scope or into the scope, * process normal modrdn */ - rc = update_integrity(argv, sdn, newrdn, newsuperior, logChanges); - } else if (slapi_sdn_issuffix(sdn, plugin_EntryScope)) { - /* the entry is moved out of scope, treat as delete */ - rc = update_integrity(argv, sdn, NULL, NULL, logChanges); + rc = update_integrity(sdn, newrdn, newsuperior, logChanges); + } else if (slapi_sdn_issuffix(sdn, plugin_EntryScope)) { + /* the entry is moved out of scope, treat as delete */ + rc = update_integrity(sdn, NULL, NULL, logChanges); + } } - } } else { /* write the entry to integrity log */ - writeintegritylog(pb, argv[1], sdn, newrdn, newsuperior, NULL /* slapi_get_requestor_sdn(pb) */); + logfile = referint_get_logfile(); + writeintegritylog(pb, logfile, sdn, newrdn, newsuperior, NULL /* slapi_get_requestor_sdn(pb) */); rc = SLAPI_PLUGIN_SUCCESS; } + slapi_ch_free_string(&logfile); return( rc ); } @@ -741,7 +1080,7 @@ bail: } int -update_integrity(char **argv, Slapi_DN *origSDN, +update_integrity(Slapi_DN *origSDN, char *newrDN, Slapi_DN *newsuperior, int logChanges) { @@ -756,17 +1095,13 @@ update_integrity(char **argv, Slapi_DN *origSDN, char *attrName = NULL; char *filter = NULL; char *attrs[2]; + char **membership_attrs = NULL; int search_result; int nval = 0; int i, j; int rc = SLAPI_PLUGIN_SUCCESS; - if ( argv == NULL ){ - slapi_log_error( SLAPI_LOG_FATAL, REFERINT_PLUGIN_SUBSYSTEM, - "referint_postop required config file arguments missing\n" ); - rc = SLAPI_PLUGIN_FAILURE; - goto free_and_return; - } + membership_attrs = referint_get_attrs(); /* * For now, just putting attributes to keep integrity on in conf file, * until resolve the other timing mode issue @@ -786,16 +1121,16 @@ update_integrity(char **argv, Slapi_DN *origSDN, Slapi_Backend *be = slapi_be_select(sdn); search_base = slapi_sdn_get_dn( sdn ); - for(i = 3; argv[i] != NULL; i++){ + for(i = 0; membership_attrs[i] != NULL; i++){ if(newrDN){ /* we need to check the children of the old dn, so use a wildcard */ - filter = slapi_filter_sprintf("(%s=*%s%s)", argv[i], ESC_NEXT_VAL, origDN); + filter = slapi_filter_sprintf("(%s=*%s%s)", membership_attrs[i], ESC_NEXT_VAL, origDN); } else { - filter = slapi_filter_sprintf("(%s=%s%s)", argv[i], ESC_NEXT_VAL, origDN); + filter = slapi_filter_sprintf("(%s=%s%s)", membership_attrs[i], ESC_NEXT_VAL, origDN); } if ( filter ) { /* Need only the current attribute and its subtypes */ - attrs[0] = argv[i]; + attrs[0] = membership_attrs[i]; attrs[1] = NULL; /* Use new search API */ @@ -829,7 +1164,7 @@ update_integrity(char **argv, Slapi_DN *origSDN, * in argv[i] having the necessary value - origDN */ slapi_attr_get_type(attr, &attrName); - if (slapi_attr_type_cmp(argv[i], attrName, + if (slapi_attr_type_cmp(membership_attrs[i], attrName, SLAPI_TYPE_CMP_SUBTYPE) == 0) { nval = 0; @@ -891,6 +1226,7 @@ update_integrity(char **argv, Slapi_DN *origSDN, free_and_return: /* free filter and search_results_pb */ slapi_ch_free_string(&filter); + slapi_ch_array_free(membership_attrs); slapi_pblock_destroy(mod_pb); if (search_result_pb) { @@ -903,61 +1239,102 @@ free_and_return: int referint_postop_start( Slapi_PBlock *pb) { - char **argv; - int argc = 0; - - /* get args */ - if ( slapi_pblock_get( pb, SLAPI_PLUGIN_ARGC, &argc ) != 0 ) { - slapi_log_error( SLAPI_LOG_FATAL, REFERINT_PLUGIN_SUBSYSTEM, - "referint_postop failed to get argv\n" ); - return( -1 ); - } - if ( slapi_pblock_get( pb, SLAPI_PLUGIN_ARGV, &argv ) != 0 ) { - slapi_log_error( SLAPI_LOG_FATAL, REFERINT_PLUGIN_SUBSYSTEM, - "referint_postop failed to get argv\n" ); - return( -1 ); - } - if(argv == NULL){ - slapi_log_error( SLAPI_LOG_FATAL, REFERINT_PLUGIN_SUBSYSTEM, - "args were null in referint_postop_start\n" ); - return( -1 ); + Slapi_Entry *plugin_entry = NULL; + Slapi_Entry *config_e = NULL; + Slapi_PBlock *search_pb = NULL; + Slapi_Entry *e = NULL; + Slapi_DN *config_sdn = NULL; + char *config_area = NULL; + int result = 0; + int rc = 0; + + if((config_rwlock = slapi_new_rwlock()) == NULL){ + slapi_log_error( SLAPI_LOG_FATAL, REFERINT_PLUGIN_SUBSYSTEM, "referint_postop_init failed to " + "create rwlock.\n" ); + return ( -1 ); } + + slapi_pblock_get( pb, SLAPI_ADD_ENTRY, &plugin_entry ); + + /* Set the alternate config area if one is defined. */ + slapi_pblock_get(pb, SLAPI_PLUGIN_CONFIG_AREA, &config_area); + if (config_area) + { + rc = slapi_dn_syntax_check(pb, config_area, 1); + if (rc) { /* syntax check failed */ + slapi_log_error( SLAPI_LOG_FATAL, REFERINT_PLUGIN_SUBSYSTEM, "referint_postop_start: " + "%s does not contain a valid DN (%s)\n", + SLAPI_PLUGIN_SHARED_CONFIG_AREA, config_area); + rc = LDAP_INVALID_DN_SYNTAX; + goto bail; + } + config_sdn = slapi_sdn_new_dn_byval(config_area); + result = slapi_search_internal_get_entry(config_sdn, NULL, &e, referint_plugin_identity); + if (LDAP_SUCCESS != result) { + if (result == LDAP_NO_SUCH_OBJECT) { + /* log an error and use the plugin entry for the config */ + slapi_log_error(SLAPI_LOG_PLUGIN, REFERINT_PLUGIN_SUBSYSTEM, + "referint_postop_start: Config entry \"%s\" does " + "not exist.\n", config_area); + rc = -1; + goto bail; + } + } else { + if(e){ + config_e = e; + } else { + slapi_log_error(SLAPI_LOG_PLUGIN, REFERINT_PLUGIN_SUBSYSTEM, + "referint_postop_start: Config entry \"%s\" was not located.\n", config_area); + rc = -1; + goto bail; + } + } + } else { + config_e = plugin_entry; + } + if(load_config(pb, config_e, STARTUP) != LDAP_SUCCESS){ + rc = -1; + goto bail; + } + referint_set_config_area(slapi_entry_get_sdn(config_e)); + /* * Only bother to start the thread if you are in delay mode. * 0 = no delay, * -1 = integrity off */ - if (argc >= 1) { - if(atoi(argv[0]) > 0){ - /* initialize the cv and lock */ - if (!use_txn && (NULL == referint_mutex)) { - referint_mutex = PR_NewLock(); - } - keeprunning_mutex = PR_NewLock(); - keeprunning_cv = PR_NewCondVar(keeprunning_mutex); - keeprunning =1; - - referint_tid = PR_CreateThread (PR_USER_THREAD, - referint_thread_func, - (void *)argv, - PR_PRIORITY_NORMAL, - PR_GLOBAL_THREAD, - PR_UNJOINABLE_THREAD, - SLAPD_DEFAULT_THREAD_STACKSIZE); - if ( referint_tid == NULL ) { - slapi_log_error( SLAPI_LOG_FATAL, REFERINT_PLUGIN_SUBSYSTEM, - "referint_postop_start PR_CreateThread failed\n" ); - exit( 1 ); - } + if(referint_get_delay() > 0){ + /* initialize the cv and lock */ + if (!use_txn && (NULL == referint_mutex)) { + referint_mutex = PR_NewLock(); + } + keeprunning_mutex = PR_NewLock(); + keeprunning_cv = PR_NewCondVar(keeprunning_mutex); + keeprunning =1; + + referint_tid = PR_CreateThread (PR_USER_THREAD, + referint_thread_func, + NULL, + PR_PRIORITY_NORMAL, + PR_GLOBAL_THREAD, + PR_UNJOINABLE_THREAD, + SLAPD_DEFAULT_THREAD_STACKSIZE); + if ( referint_tid == NULL ) { + slapi_log_error( SLAPI_LOG_FATAL, REFERINT_PLUGIN_SUBSYSTEM, + "referint_postop_start PR_CreateThread failed\n" ); + exit( 1 ); } - } else { - slapi_log_error( SLAPI_LOG_FATAL, REFERINT_PLUGIN_SUBSYSTEM, - "referint_postop_start insufficient arguments supplied\n" ); - return( -1 ); } refint_started = 1; - return(0); + +bail: + slapi_free_search_results_internal(search_pb); + slapi_pblock_destroy(search_pb); + slapi_sdn_free(&config_sdn); + slapi_entry_free(e); + + return rc; } int referint_postop_close( Slapi_PBlock *pb) @@ -972,6 +1349,8 @@ int referint_postop_close( Slapi_PBlock *pb) PR_Unlock(keeprunning_mutex); } + slapi_destroy_rwlock(config_rwlock); + refint_started = 0; return(0); } @@ -980,8 +1359,7 @@ void referint_thread_func(void *arg) { PRFileDesc *prfd = NULL; - char **plugin_argv = (char **)arg; - char *logfilename; + char *logfilename = NULL; char thisline[MAX_LINE]; char delimiter[]="\t\n"; char *ptoken; @@ -993,21 +1371,17 @@ referint_thread_func(void *arg) int delay; int no_changes; - if(plugin_argv == NULL){ - slapi_log_error( SLAPI_LOG_FATAL, REFERINT_PLUGIN_SUBSYSTEM, - "referint_thread_func not get args \n" ); - return; - } - - delay = atoi(plugin_argv[0]); - logfilename = plugin_argv[1]; - logChanges = atoi(plugin_argv[2]); /* * keep running this thread until plugin is signaled to close */ while(1){ + /* refresh the config */ + slapi_ch_free_string(&logfilename); + referint_get_config(&delay, &logChanges, &logfilename); + no_changes=1; while(no_changes){ + PR_Lock(keeprunning_mutex); if(keeprunning == 0){ PR_Unlock(keeprunning_mutex); @@ -1064,7 +1438,7 @@ referint_thread_func(void *arg) } } - update_integrity(plugin_argv, sdn, tmprdn, tmpsuperior, logChanges); + update_integrity(sdn, tmprdn, tmpsuperior, logChanges); slapi_sdn_free(&sdn); slapi_ch_free_string(&tmprdn); @@ -1098,6 +1472,7 @@ referint_thread_func(void *arg) if (NULL != keeprunning_cv) { PR_DestroyCondVar(keeprunning_cv); } + slapi_ch_free_string(&logfilename); } int my_fgetc(PRFileDesc *stream) @@ -1267,3 +1642,122 @@ writeintegritylog(Slapi_PBlock *pb, char *logfilename, Slapi_DN *sdn, } referint_unlock(); } + +static int +referint_preop_init(Slapi_PBlock *pb) +{ + int status = 0; + + if (slapi_pblock_set(pb, SLAPI_PLUGIN_VERSION, SLAPI_PLUGIN_VERSION_01) != 0 || + slapi_pblock_set(pb, SLAPI_PLUGIN_DESCRIPTION, (void *) &pdesc) != 0 || + slapi_pblock_set(pb, premodfn, (void *)referint_validate_config) != 0) + { + slapi_log_error(SLAPI_LOG_FATAL, REFERINT_PLUGIN_SUBSYSTEM, + "referint_preop_init: failed to register plugin\n"); + status = -1; + } + + return status; +} + +/* + * This is our preop function to validate a config update, postop modify + * will apply the config change. + */ +int +referint_validate_config(Slapi_PBlock *pb) +{ + Slapi_Entry *config_e = NULL, *e = NULL; + Slapi_Entry *pre_entry = NULL; + Slapi_DN *config_sdn = NULL; + Slapi_DN *sdn = NULL; + Slapi_Mods *smods = NULL; + LDAPMod **mods = NULL; + char *config_area = NULL; + int rc = SLAPI_PLUGIN_SUCCESS; + + slapi_pblock_get(pb, SLAPI_TARGET_SDN, &sdn); + slapi_pblock_get(pb, SLAPI_ENTRY_PRE_OP, &pre_entry); + + if (referint_sdn_config_cmp(sdn) == 0 && slapi_sdn_compare(sdn, referint_get_plugin_area()) ){ + /* + * This is the shared config entry. Apply the mods and set/validate + * the config + */ + slapi_pblock_get(pb, SLAPI_MODIFY_MODS, &mods); + smods = slapi_mods_new(); + slapi_mods_init_byref(smods, mods); + + /* Apply the mods to create the resulting entry. */ + if (mods && (slapi_entry_apply_mods(pre_entry, mods) != LDAP_SUCCESS)) { + /* we don't care about this, the update is invalid and will be caught later */ + goto bail; + } + + if ( SLAPI_PLUGIN_FAILURE == load_config(pb, pre_entry, 0)) { + slapi_log_error( SLAPI_LOG_FATAL, REFERINT_PLUGIN_SUBSYSTEM, "referint_validate_config: " + "configuration validation failed.\n"); + rc = LDAP_UNWILLING_TO_PERFORM; + } + } else if (slapi_sdn_compare(sdn, referint_get_plugin_area()) == 0){ + /* + * Check if the plugin config area is set(verify it and load its config), + * otherwise reload the plugin entry config + */ + slapi_pblock_get(pb, SLAPI_MODIFY_MODS, &mods); + smods = slapi_mods_new(); + slapi_mods_init_byref(smods, mods); + + /* Apply the mods to create the resulting entry. */ + if (mods && (slapi_entry_apply_mods(pre_entry, mods) != LDAP_SUCCESS)) { + /* we don't care about this, the update is invalid and will be caught later */ + goto bail; + } + + if((config_area = slapi_entry_attr_get_charptr(pre_entry, SLAPI_PLUGIN_SHARED_CONFIG_AREA))){ + rc = slapi_dn_syntax_check(pb, config_area, 1); + if (rc) { /* syntax check failed */ + slapi_log_error( SLAPI_LOG_FATAL, REFERINT_PLUGIN_SUBSYSTEM, "referint_validate_config: " + "%s does not contain a valid DN (%s)\n", + SLAPI_PLUGIN_SHARED_CONFIG_AREA, config_area); + rc = LDAP_INVALID_DN_SYNTAX; + goto bail; + } + config_sdn = slapi_sdn_new_dn_byval(config_area); + rc = slapi_search_internal_get_entry(config_sdn, NULL, &e, referint_plugin_identity); + if (LDAP_SUCCESS != rc) { + /* log an error and use the plugin entry for the config */ + slapi_log_error(SLAPI_LOG_PLUGIN, REFERINT_PLUGIN_SUBSYSTEM, + "referint_validate_config: Config entry \"%s\" couild not be found, error %d\n", + config_area, rc); + rc = LDAP_OPERATIONS_ERROR; + goto bail; + } else { + if(e){ + config_e = e; + } else { + slapi_log_error(SLAPI_LOG_PLUGIN, REFERINT_PLUGIN_SUBSYSTEM, + "referint_validate_config: Config entry \"%s\" was not located.\n", config_area); + rc = LDAP_OPERATIONS_ERROR; + goto bail; + } + } + } else { + config_e = pre_entry; + } + if(load_config(pb, config_e, 0) != LDAP_SUCCESS){ + rc = LDAP_UNWILLING_TO_PERFORM; + goto bail; + } + referint_set_config_area(slapi_entry_get_sdn(config_e)); + } + +bail: + slapi_entry_free(e); + slapi_sdn_free(&config_sdn); + slapi_ch_free_string(&config_area); + slapi_mods_free(&smods); + + return rc; +} +
0
3deb6ad7d19774feb15c46f925059efe19462098
389ds/389-ds-base
Issue 50954 - Port buildnum.pl to python Description: Replace buildnum.pl with a python version so we can continue to remove perl dependencies. relates: https://pagure.io/389-ds-base/issue/50954 Reviewed by: firstyear(Thanks!)
commit 3deb6ad7d19774feb15c46f925059efe19462098 Author: Mark Reynolds <[email protected]> Date: Fri Mar 13 19:19:33 2020 -0400 Issue 50954 - Port buildnum.pl to python Description: Replace buildnum.pl with a python version so we can continue to remove perl dependencies. relates: https://pagure.io/389-ds-base/issue/50954 Reviewed by: firstyear(Thanks!) diff --git a/Makefile.am b/Makefile.am index 9180d170c..1e88a38a9 100644 --- a/Makefile.am +++ b/Makefile.am @@ -13,7 +13,7 @@ QUOTE := $(NULLSTRING)"# a double quote" # PYTHON := python3 -BUILDNUM := $(shell perl $(srcdir)/buildnum.pl) +BUILDNUM := $(shell $(srcdir)/buildnum.py) NQBUILDNUM := $(subst \,,$(subst $(QUOTE),,$(BUILDNUM))) DEBUG_DEFINES = @debug_defs@ DEBUG_CFLAGS = @debug_cflags@ @@ -627,7 +627,7 @@ dist_noinst_HEADERS += \ endif dist_noinst_DATA = \ - $(srcdir)/buildnum.pl \ + $(srcdir)/buildnum.py \ $(srcdir)/ldap/admin/src/*.in \ $(srcdir)/ldap/admin/src/scripts/*.in \ $(srcdir)/ldap/admin/src/scripts/*.ldif \ diff --git a/buildnum.pl b/buildnum.pl deleted file mode 100755 index 8db83da53..000000000 --- a/buildnum.pl +++ /dev/null @@ -1,62 +0,0 @@ -#!/usr/bin/perl -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. -# Copyright (C) 2005 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- - -#-------------------------------------------- -# buildnum.pl -# -# Generates a dated build number and writes -# out a buildnum.dat file in a user specified -# subdirectory. -# -# Usage: buildnum.pl -p <platform dir> -#-------------------------------------------- - -use Getopt::Std; -use FileHandle; - -autoflush STDERR 1; - -getopts('p:H'); - -if ($opt_H) {exitHelp();} - -# Load arguments -$platdir = $opt_p; - -# Get current time -@now = gmtime($ENV{SOURCE_DATE_EPOCH} || time); - -# Format buildnum as YYYY.DDD.HHMM -$year = $now[5] + 1900; -$doy = $now[7] + 1; -if ($doy < 100) { $doy = 0 . $doy; } -$tod = $now[2] . $now[1]; -$buildnum = "$year.$doy.$tod"; - -if ($platdir) { - # Write buildnum.dat - $buildnum_file = "./$platdir/buildnum.dat"; - open(BUILDNUM,">$buildnum_file") || die "Error: Can't create $buildnum_file: $!\n"; - print BUILDNUM "\\\"$buildnum\\\""; - close(BUILDNUM); -} else { - print "\\\"$buildnum\\\""; -} - -#---------- exitHelp subroutine ---------- -sub exitHelp { - print(STDERR "$0: Generates a dated build number. - - \tUsage: $0 -p <platform> - - \t-p <platform> Platform subdirectory. - \t-H Print this help message\n"); - exit(0); -} diff --git a/buildnum.py b/buildnum.py new file mode 100755 index 000000000..8b7226c59 --- /dev/null +++ b/buildnum.py @@ -0,0 +1,28 @@ +#!/usr/bin/python3 +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2020 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- + +# Generate a build number in the format YYYY.DDD.HHMM + +import os +import time + +SDE = os.getenv('SOURCE_DATE_EPOCH') +if SDE is not None: + obj = time.gmtime(SDE) +else: + obj = time.gmtime() + +year = obj[0] +doy = obj[7] +if doy < 100: + doy = "0" + str(doy) +tod = str(obj[3]) + str(obj[4]) +buildnum = f"{year}.{doy}.{tod}" + +print(f'\\"{buildnum}\\"', end = '')
0
48046e504ee12d5349cc3d5865d24196cc427fe6
389ds/389-ds-base
[164596] LDCLT distributed with Directory Server Fixed broken build on Solaris. 1) removed redeclared getsubopt in port.h 2) defined _XOPEN_SOURCE 500 for Linux
commit 48046e504ee12d5349cc3d5865d24196cc427fe6 Author: Noriko Hosoi <[email protected]> Date: Fri Jan 27 03:05:12 2006 +0000 [164596] LDCLT distributed with Directory Server Fixed broken build on Solaris. 1) removed redeclared getsubopt in port.h 2) defined _XOPEN_SOURCE 500 for Linux diff --git a/ldap/servers/slapd/tools/ldclt/Makefile b/ldap/servers/slapd/tools/ldclt/Makefile index 995e865a5..687950f0c 100644 --- a/ldap/servers/slapd/tools/ldclt/Makefile +++ b/ldap/servers/slapd/tools/ldclt/Makefile @@ -102,7 +102,11 @@ INC_PLUGINS = \ ADDLIBS = $(LDAPLIBS) $(SPEC_LIBS) $(SYSTEM_LIBS) $(END) -CFLAGS+=-DLDAP_DONT_USE_SMARTHEAP +CFLAGS+=-DLDAP_DONT_USE_SMARTHEAP + +ifeq ($(ARCH), Linux) +CFLAGS+=-D_XOPEN_SOURCE=500 +endif EXTRA_LIBS_DEP = $(LDAPSDK_DEP) $(DB_LIB_DEP) $(LDAP_COMMON_LIBS_DEP) diff --git a/ldap/servers/slapd/tools/ldclt/port.h b/ldap/servers/slapd/tools/ldclt/port.h index de3c4e034..e247936c4 100644 --- a/ldap/servers/slapd/tools/ldclt/port.h +++ b/ldap/servers/slapd/tools/ldclt/port.h @@ -104,8 +104,6 @@ extern int optind; #else /* _WIN32 */ -extern int getsubopt(char **optionp, char **tokens, char **valuep); - /************************************************************************/ /************************************************************************/ /**************** Unix section ***********************/
0
3cdc7d8234ee8c6ae7fd3187e53abc3978a31faf
389ds/389-ds-base
Issue 5798 - CLI - Add multi-valued support to dsconf config (#5799) Description: Currently, we have two editable multi-valued attributes in cn=config: nsslapd-haproxy-trusted-ip and nsslapd-referral. Our current cn=config implementation doesn't support bunch ADD operations. Make our CLI tools more robust so they can handle multi-valued attributes correctly. Add add_many method to DSLdapObject. Fixes: https://github.com/389ds/389-ds-base/issues/5798 Reviewed by: @mreynolds389 (Thanks!)
commit 3cdc7d8234ee8c6ae7fd3187e53abc3978a31faf Author: Simon Pichugin <[email protected]> Date: Fri Jun 16 10:05:37 2023 -0700 Issue 5798 - CLI - Add multi-valued support to dsconf config (#5799) Description: Currently, we have two editable multi-valued attributes in cn=config: nsslapd-haproxy-trusted-ip and nsslapd-referral. Our current cn=config implementation doesn't support bunch ADD operations. Make our CLI tools more robust so they can handle multi-valued attributes correctly. Add add_many method to DSLdapObject. Fixes: https://github.com/389ds/389-ds-base/issues/5798 Reviewed by: @mreynolds389 (Thanks!) diff --git a/src/lib389/lib389/_mapped_object.py b/src/lib389/lib389/_mapped_object.py index 34052a6e3..490e47c0a 100644 --- a/src/lib389/lib389/_mapped_object.py +++ b/src/lib389/lib389/_mapped_object.py @@ -313,6 +313,29 @@ class DSLdapObject(DSLogging, DSLint): self.set(key, value, action=ldap.MOD_ADD) + def add_many(self, *args): + """Add many key, value pairs in a single operation. + This is useful for configuration changes that require + atomic operation, and ease of use. + + An example of usage is add_many((key, value), (key, value)) + + No wrapping list is needed for the arguments. + + :param *args: tuples of key,value to replace. + :type *args: (str, str) + """ + + mods = [] + for arg in args: + if isinstance(arg[1], list) or isinstance(arg[1], tuple): + value = ensure_list_bytes(arg[1]) + else: + value = [ensure_bytes(arg[1])] + mods.append((ldap.MOD_ADD, ensure_str(arg[0]), value)) + return _modify_ext_s(self._instance,self._dn, mods, serverctrls=self._server_controls, + clientctrls=self._client_controls, escapehatch='i am sure') + # Basically what it means; def replace(self, key, value): """Replace an attribute with a value @@ -748,7 +771,7 @@ class DSLdapObject(DSLogging, DSLint): :param key: An attribute name :type key: str - :returns: A single bytes value + :returns: A list of bytes values :raises: ValueError - if instance is offline """ @@ -759,7 +782,7 @@ class DSLdapObject(DSLogging, DSLint): :param key: An attribute name :type key: str - :returns: A single bytes value + :returns: A single UTF8 value :raises: ValueError - if instance is offline """ @@ -770,7 +793,7 @@ class DSLdapObject(DSLogging, DSLint): :param key: An attribute name :type key: str - :returns: A single bytes value + :returns: A single lowered UTF8 value :raises: ValueError - if instance is offline """ @@ -785,7 +808,7 @@ class DSLdapObject(DSLogging, DSLint): :param key: An attribute name :type key: str - :returns: A single bytes value + :returns: A list of UTF8 values :raises: ValueError - if instance is offline """ @@ -796,7 +819,7 @@ class DSLdapObject(DSLogging, DSLint): :param key: An attribute name :type key: str - :returns: A single bytes value + :returns: A list of lowered UTF8 values :raises: ValueError - if instance is offline """ @@ -807,7 +830,7 @@ class DSLdapObject(DSLogging, DSLint): :param key: An attribute name :type key: str - :returns: A single bytes value + :returns: A single int value :raises: ValueError - if instance is offline """ @@ -818,7 +841,7 @@ class DSLdapObject(DSLogging, DSLint): :param key: An attribute name :type key: str - :returns: A single bytes value + :returns: A list of int values :raises: ValueError - if instance is offline """ diff --git a/src/lib389/lib389/cli_conf/config.py b/src/lib389/lib389/cli_conf/config.py index 293851514..97e22cdbc 100644 --- a/src/lib389/lib389/cli_conf/config.py +++ b/src/lib389/lib389/cli_conf/config.py @@ -1,20 +1,22 @@ # --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2018 Red Hat, Inc. +# Copyright (C) 2023 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). # See LICENSE for details. # --- END COPYRIGHT BLOCK --- +import ldap +from enum import Enum from lib389.config import Config from lib389.cli_base import ( _generic_get_entry, _generic_get_attr, - _generic_add_attr, _generic_replace_attr, - _generic_del_attr, ) +OpType = Enum("OpType", "add delete") + def _config_display_ldapimaprootdn_warning(log, args): """If we update the rootdn we need to update the ldapi settings too""" @@ -25,6 +27,46 @@ def _config_display_ldapimaprootdn_warning(log, args): "For LDAPI configuration, \"nsslapd-rootdn\" is used instead.") +def _config_get_existing_attrs(conf, args, op_type): + """Get the existing attribute from the server and return them in a dict + so we can add them back after the operation is done. + + For op_type == OpType.delete, we delete them from the server so we can add + back only those that are not specified in the command line. + (i.e delete nsslapd-haproxy-trusted-ip="192.168.0.1", but + nsslapd-haproxy-trusted-ip has 192.168.0.1 and 192.168.0.2 values. + So we want only 192.168.0.1 to be deleted in the end) + """ + + existing_attrs = {} + if args and args.attr: + for attr in args.attr: + if "=" in attr: + [attr_name, val] = attr.split("=", 1) + # We should process only multi-valued attributes this way + if attr_name.lower() == "nsslapd-haproxy-trusted-ip" or \ + attr_name.lower() == "nsslapd-referral": + if attr_name not in existing_attrs.keys(): + existing_attrs[attr_name] = conf.get_attr_vals_utf8(attr_name) + existing_attrs[attr_name] = [x for x in existing_attrs[attr_name] if x != val] + + if op_type == OpType.add: + if existing_attrs[attr_name] == []: + del existing_attrs[attr_name] + + if op_type == OpType.delete: + conf.remove_all(attr_name) + else: + if op_type == OpType.delete: + conf.remove_all(attr) + else: + raise ValueError(f"You must specify a value to add for the attribute ({attr_name})") + return existing_attrs + else: + # Missing value + raise ValueError(f"Missing attribute to {op_type.name}") + + def config_get(inst, basedn, log, args): if args and args.attrs: _generic_get_attr(inst, basedn, log.getChild('config_get'), Config, args) @@ -33,20 +75,50 @@ def config_get(inst, basedn, log, args): _generic_get_entry(inst, basedn, log.getChild('config_get'), Config, args) -def config_add_attr(inst, basedn, log, args): - _generic_add_attr(inst, basedn, log.getChild('config_add_attr'), Config, args) +def config_replace_attr(inst, basedn, log, args): + _generic_replace_attr(inst, basedn, log.getChild('config_replace_attr'), Config, args) _config_display_ldapimaprootdn_warning(log, args) -def config_replace_attr(inst, basedn, log, args): - _generic_replace_attr(inst, basedn, log.getChild('config_replace_attr'), Config, args) +def config_add_attr(inst, basedn, log, args): + conf = Config(inst, basedn) + final_mods = [] + + existing_attrs = _config_get_existing_attrs(conf, args, OpType.add) + + if args and args.attr: + for attr in args.attr: + if "=" in attr: + [attr_name, val] = attr.split("=", 1) + if attr_name in existing_attrs: + for v in existing_attrs[attr_name]: + final_mods.append((attr_name, v)) + final_mods.append((attr_name, val)) + try: + conf.add_many(*set(final_mods)) + except ldap.TYPE_OR_VALUE_EXISTS: + pass + else: + raise ValueError(f"You must specify a value to add for the attribute ({attr_name})") + else: + # Missing value + raise ValueError("Missing attribute to add") _config_display_ldapimaprootdn_warning(log, args) def config_del_attr(inst, basedn, log, args): - _generic_del_attr(inst, basedn, log.getChild('config_del_attr'), Config, args) + conf = Config(inst, basedn) + final_mods = [] + + existing_attrs = _config_get_existing_attrs(conf, args, OpType.delete) + + # Then add the attributes back all except the one we need to remove + for attr_name in existing_attrs.keys(): + for val in existing_attrs[attr_name]: + final_mods.append((attr_name, val)) + conf.add_many(*set(final_mods)) def create_parser(subparsers):
0
1620c9d633806be2938711ba12c2b956a5a008de
389ds/389-ds-base
Revise README for pagure
commit 1620c9d633806be2938711ba12c2b956a5a008de Author: Mark Reynolds <[email protected]> Date: Mon Feb 13 13:49:32 2017 -0500 Revise README for pagure diff --git a/README b/README index 278643fbf..40c2a31aa 100644 --- a/README +++ b/README @@ -6,6 +6,6 @@ The 389 Directory Server is subject to the terms detailed in the license agreement file called LICENSE. Late-breaking news and information on the 389 Directory Server is -available at the following location: +available on our wiki page: http://www.port389.org/
0
e6023cab66ab258e38efbb009c2d58b8649ce989
389ds/389-ds-base
Ticket 51035 - Heavy StartTLS connection load can randomly fail with err=1 Bug Description: startTls pushes a network layer on top of the connection. So when processing startTLS, there should not be a pending operation else there is a risk that the operation sends back data on moving network layer. When startTls detects a pending operation it aborts startTls. However if a new operation is received while processing startTls, the operation is pending but can not be read because startTls holds c_mutex. Fix Description: In case of unread pending operation, relax the control and just log an information message. https://pagure.io/389-ds-base/issue/51035 Reviewed by: Mark Reynolds, William Brown Platforms tested: F30 Flag Day: no Doc impact: no
commit e6023cab66ab258e38efbb009c2d58b8649ce989 Author: Thierry Bordaz <[email protected]> Date: Mon Apr 20 15:17:03 2020 +0200 Ticket 51035 - Heavy StartTLS connection load can randomly fail with err=1 Bug Description: startTls pushes a network layer on top of the connection. So when processing startTLS, there should not be a pending operation else there is a risk that the operation sends back data on moving network layer. When startTls detects a pending operation it aborts startTls. However if a new operation is received while processing startTls, the operation is pending but can not be read because startTls holds c_mutex. Fix Description: In case of unread pending operation, relax the control and just log an information message. https://pagure.io/389-ds-base/issue/51035 Reviewed by: Mark Reynolds, William Brown Platforms tested: F30 Flag Day: no Doc impact: no diff --git a/ldap/servers/slapd/operation.c b/ldap/servers/slapd/operation.c index 8590e2d40..ff16cd906 100644 --- a/ldap/servers/slapd/operation.c +++ b/ldap/servers/slapd/operation.c @@ -150,8 +150,8 @@ operation_init(Slapi_Operation *o, int flags) /* We can't get rid of this til we remove the operation stack. */ memset(o, 0, sizeof(Slapi_Operation)); o->o_ber = ber; - o->o_msgid = -1; - o->o_tag = LBER_DEFAULT; + o->o_msgid = -1; /* if changed please update start-tls that test this value */ + o->o_tag = LBER_DEFAULT; /* if changed please update start-tls that test this value */ o->o_status = SLAPI_OP_STATUS_PROCESSING; slapi_sdn_init(&(o->o_sdn)); o->o_authtype = NULL; diff --git a/ldap/servers/slapd/start_tls_extop.c b/ldap/servers/slapd/start_tls_extop.c index bfa32b783..f415c41e0 100644 --- a/ldap/servers/slapd/start_tls_extop.c +++ b/ldap/servers/slapd/start_tls_extop.c @@ -188,11 +188,31 @@ start_tls(Slapi_PBlock *pb) /* Check whether the Start TLS request can be accepted. */ if (connection_operations_pending(conn, pb_op, 1 /* check for ops where result not yet sent */)) { - slapi_log_err(SLAPI_LOG_PLUGIN, "start_tls", - "Other operations are still pending on the connection.\n"); - ldaprc = LDAP_OPERATIONS_ERROR; - ldapmsg = "Other operations are still pending on the connection."; - goto unlock_and_return; + for (Operation *op = conn->c_ops; op != NULL; op = op->o_next) { + if (op == pb_op) { + continue; + } + if ((op->o_msgid == -1) && (op->o_tag == LBER_DEFAULT)) { + /* while processing start-tls extop we also received a new incoming operation + * As this operation will not processed until start-tls completes. + * Be fair do not consider this operation as a pending one + */ + slapi_log_err(SLAPI_LOG_CONNS, "start_tls", + "New incoming operation blocked by start-tls, Continue start-tls (conn=%"PRIu64").\n", + conn->c_connid); + continue; + } else { + /* It is problematic, this pending operation is processed and + * start-tls can push new network layer while the operation + * send result. Safest to abort start-tls + */ + slapi_log_err(SLAPI_LOG_CONNS, "start_tls", + "Other operations are still pending on the connection.\n"); + ldaprc = LDAP_OPERATIONS_ERROR; + ldapmsg = "Other operations are still pending on the connection."; + goto unlock_and_return; + } + } }
0
4cc83693dd072c6e855948ad677000e2a595044e
389ds/389-ds-base
Issue: 50860 - Port Password Policy test cases from TET to python3 Password grace limit section. Bug Description: Port Password Policy test cases from TET to python3 Password grace limit section. Relates/Fixes: https://pagure.io/389-ds-base/issue/50860 Author: aborah Reviewed by: Viktor Ashirov
commit 4cc83693dd072c6e855948ad677000e2a595044e Author: Anuj Borah <[email protected]> Date: Wed Mar 18 14:45:56 2020 +0530 Issue: 50860 - Port Password Policy test cases from TET to python3 Password grace limit section. Bug Description: Port Password Policy test cases from TET to python3 Password grace limit section. Relates/Fixes: https://pagure.io/389-ds-base/issue/50860 Author: aborah Reviewed by: Viktor Ashirov diff --git a/dirsrvtests/tests/suites/password/pwp_gracel_test.py b/dirsrvtests/tests/suites/password/pwp_gracel_test.py new file mode 100644 index 000000000..980a1b69d --- /dev/null +++ b/dirsrvtests/tests/suites/password/pwp_gracel_test.py @@ -0,0 +1,123 @@ +""" +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2020 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +""" + +import os +import pytest +from lib389.topologies import topology_st as topo +from lib389.idm.user import UserAccounts, UserAccount +from lib389._constants import DEFAULT_SUFFIX +from lib389.config import Config +import ldap +import time + +pytestmark = pytest.mark.tier1 + + +def test_password_gracelimit_section(topo): + """Password grace limit section. + + :id: d6f4a7fa-473b-11ea-8766-8c16451d917c + :setup: Standalone + :steps: + 1. Resets the default password policy + 2. Turning on password expiration, passwordMaxAge: 30 and passwordGraceLimit: 7 + 3. Check users have 7 grace login attempts after their password expires + 4. Reset the user passwords to start the clock + 5. The the 8th should fail + 6. Now try resetting the password before the grace login attempts run out + 7. Bind 6 times, and on the 7th change the password + 8. Setting passwordMaxAge: 1 and passwordGraceLimit: 7 + 9. Modify the users passwords to start the clock of zero + 10. First 7 good attempts, 8th should fail + 11. Setting the passwordMaxAge to 3 seconds once more and the passwordGraceLimit to 0 + 12. Modify the users passwords to start the clock + 13. Users should be blocked automatically after 3 second + :expected results: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + 8. Success + 9. Success + 10. Success + 11. Success + 12. Success + 13. Success + """ + config = Config(topo.standalone) + # Resets the default password policy + config.replace_many( + ('passwordmincategories', '1'), + ('passwordStorageScheme', 'CLEAR')) + user = UserAccounts(topo.standalone, DEFAULT_SUFFIX, rdn=None).create_test_user() + # Turning on password expiration, passwordMaxAge: 30 and passwordGraceLimit: 7 + config.replace_many( + ('passwordMaxAge', '3'), + ('passwordGraceLimit', '7'), + ('passwordexp', 'on'), + ('passwordwarning', '30')) + # Reset the user passwords to start the clock + # Check users have 7 grace login attempts after their password expires + user.replace('userpassword', '00fr3d1') + for _ in range(3): + time.sleep(1) + user_account = UserAccount(topo.standalone, user.dn) + for _ in range(7): + conn = user_account.bind('00fr3d1') + # The the 8th should fail + with pytest.raises(ldap.INVALID_CREDENTIALS): + conn = user_account.bind('00fr3d1') + # Now try resetting the password before the grace login attempts run out + user.replace('userpassword', '00fr3d2') + for _ in range(3): + time.sleep(1) + user_account = UserAccount(topo.standalone, user.dn) + # Bind 6 times, and on the 7th change the password + for _ in range(6): + conn = user_account.bind('00fr3d2') + user.replace('userpassword', '00fr3d1') + for _ in range(3): + time.sleep(1) + for _ in range(7): + conn = user_account.bind('00fr3d1') + with pytest.raises(ldap.INVALID_CREDENTIALS): + conn = user_account.bind('00fr3d1') + # Setting passwordMaxAge: 1 and passwordGraceLimit: 7 + config.replace_many( + ('passwordMaxAge', '1'), + ('passwordwarning', '1')) + # Modify the users passwords to start the clock of zero + user.replace('userpassword', '00fr3d2') + time.sleep(1) + # First 7 good attempts, 8th should fail + user_account = UserAccount(topo.standalone, user.dn) + for _ in range(7): + conn = user_account.bind('00fr3d2') + with pytest.raises(ldap.INVALID_CREDENTIALS): + conn = user_account.bind('00fr3d2') + # Setting the passwordMaxAge to 3 seconds once more and the passwordGraceLimit to 0 + config.replace_many( + ('passwordMaxAge', '3'), + ('passwordGraceLimit', '0')) + # Modify the users passwords to start the clock + # Users should be blocked automatically after 3 second + user.replace('userpassword', '00fr3d1') + for _ in range(3): + time.sleep(1) + with pytest.raises(ldap.INVALID_CREDENTIALS): + conn = user_account.bind('00fr3d1') + + +if __name__ == "__main__": + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s -v %s" % CURRENT_FILE) \ No newline at end of file
0
6290f68b69504be64aa105ffe497da129c3095f9
389ds/389-ds-base
Modify NTDS and PassSync to build in the 'built' directory.
commit 6290f68b69504be64aa105ffe497da129c3095f9 Author: Thomas Lackey <[email protected]> Date: Mon May 16 19:41:38 2005 +0000 Modify NTDS and PassSync to build in the 'built' directory. diff --git a/ldap/servers/ntds/wrapper/Makefile b/ldap/servers/ntds/wrapper/Makefile index 4dcf355e7..30291d3c4 100644 --- a/ldap/servers/ntds/wrapper/Makefile +++ b/ldap/servers/ntds/wrapper/Makefile @@ -80,9 +80,9 @@ layout: $(OBJDEST) $(PRODUCT).msi: layout - $(CP) $(OBJDEST)/* wix + $(CP) wix/Binary $(OBJDEST)/ # Do not allow MAKEFLAGS or other environment variables to influence nmake. - env -i PATH="${PATH}" LIB="${LIB}" INCLUDE="${INCLUDE}" BUILD_DEBUG=${BUILD_DEBUG} build.bat + env -i PATH="${PATH}" LIB="${LIB}" INCLUDE="${INCLUDE}" BUILD_DEBUG=${BUILD_DEBUG} OBJDEST=${OBJDEST} build.bat clean: $(RM) wix/$(PRODUCT)*.msi wix/ntds.wixobj diff --git a/ldap/servers/ntds/wrapper/build.bat b/ldap/servers/ntds/wrapper/build.bat index d770dfba8..30d18d520 100644 --- a/ldap/servers/ntds/wrapper/build.bat +++ b/ldap/servers/ntds/wrapper/build.bat @@ -19,19 +19,19 @@ if NOT [%BUILD_DEBUG%] == [] ( set PATH=%PATH%;%CD%\%LIBROOT%\wix +set WXSLOC=%CD%\wix +echo %WXSLOC% + +cd %OBJDEST% + set OK=0 -cd wix -candle ntds.wxs +candle %WXSLOC%\ntds.wxs set /a OK=%OK% + %ERRORLEVEL% light ntds.wixobj set /a OK=%OK% + %ERRORLEVEL% -if NOT [%BUILD_DEBUG%] == [] ( - if EXIST ntds.msi (move /Y ntds.msi ntds-%BUILD_DEBUG%.msi) -) - :END popd if %OK% GTR 1 (set OK=1) diff --git a/ldap/synctools/Makefile b/ldap/synctools/Makefile index 1f1283d56..f45183fb2 100644 --- a/ldap/synctools/Makefile +++ b/ldap/synctools/Makefile @@ -44,10 +44,12 @@ BUILD_ROOT = ../../ include $(BUILD_ROOT)/nsdefs.mk include $(BUILD_ROOT)/nsconfig.mk +OBJDEST=../$(OBJDIR)/passsync + all: passsync passsync: ifeq ($(ARCH), WINNT) # Do not allow MAKEFLAGS or other environment variables to influence nmake. - cd passwordsync; env -i PATH="${PATH}" LIB="${LIB}" INCLUDE="${INCLUDE}" BUILD_DEBUG=${BUILD_DEBUG} build.bat + cd passwordsync; env -i PATH="${PATH}" LIB="${LIB}" INCLUDE="${INCLUDE}" BUILD_DEBUG=${BUILD_DEBUG} OBJDEST=`echo ${OBJDEST} | sed 's/\//\\\\/g'` build.bat endif diff --git a/ldap/synctools/passwordsync/build.bat b/ldap/synctools/passwordsync/build.bat index 5ffdbc653..68ca9d189 100644 --- a/ldap/synctools/passwordsync/build.bat +++ b/ldap/synctools/passwordsync/build.bat @@ -41,6 +41,9 @@ pushd +rem Convert %OBJEST% to absolute. +call :relative %OBJDEST% + if [%BUILD_DEBUG%] == [optimize] ( set LIBROOT=..\..\..\..\dist\WINNT5.0_OPT.OBJ ) else ( @@ -49,6 +52,8 @@ if [%BUILD_DEBUG%] == [optimize] ( echo %LIBROOT% +set WXSDIR=%CD%\wix + set INCLUDE=%INCLUDE%;%CD%\%LIBROOT%\ldapsdk\include;%CD%\%LIBROOT%\nspr\include;%CD%\%LIBROOT%\nss\include set LIB=%LIB%;%CD%\%LIBROOT%\ldapsdk\lib;%CD%\%LIBROOT%\nspr\lib;%CD%\%LIBROOT%\nss\lib set PATH=%PATH%;%CD%\%LIBROOT%\wix @@ -56,67 +61,68 @@ set PATH=%PATH%;%CD%\%LIBROOT%\wix set OK=0 cd passsync +echo Entering %CD% :BUILD nmake passsync.mak set /a OK=%OK% + %ERRORLEVEL% -copy /Y Debug\passsync.exe ..\Wix +copy /Y %OBJDEST%\passsync\passsync.exe %OBJDEST%\ set /a OK=%OK% + %ERRORLEVEL% cd ..\passhook +echo Entering %CD% nmake passhook.mak set /a OK=%OK% + %ERRORLEVEL% -copy /Y Debug\passhook.dll ..\Wix +copy /Y %OBJDEST%\passhook\passhook.dll %OBJDEST%\ set /a OK=%OK% + %ERRORLEVEL% :PKG -if NOT EXIST ..\Wix ( - echo ERROR: Cannot find Wix folder. - set OK=1 - goto :END ) - -cd ..\Wix if EXIST ..\%LIBROOT%\ldapsdk\lib\nsldap32v50.dll ( - copy /Y ..\%LIBROOT%\ldapsdk\lib\nsldap32v50.dll + copy /Y ..\%LIBROOT%\ldapsdk\lib\nsldap32v50.dll %OBJDEST%\ ) if EXIST ..\%LIBROOT%\ldapsdk\lib\nsldapssl32v50.dll ( - copy /Y ..\%LIBROOT%\ldapsdk\lib\nsldapssl32v50.dll + copy /Y ..\%LIBROOT%\ldapsdk\lib\nsldapssl32v50.dll %OBJDEST%\ ) if EXIST ..\%LIBROOT%\ldapsdk\lib\nsldappr32v50.dll ( - copy /Y ..\%LIBROOT%\ldapsdk\lib\nsldappr32v50.dll + copy /Y ..\%LIBROOT%\ldapsdk\lib\nsldappr32v50.dll %OBJDEST%\ ) if EXIST ..\%LIBROOT%\nspr\lib\libnspr4.dll ( - copy /Y ..\%LIBROOT%\nspr\lib\libnspr4.dll + copy /Y ..\%LIBROOT%\nspr\lib\libnspr4.dll %OBJDEST%\ ) if EXIST ..\%LIBROOT%\nspr\lib\libplds4.dll ( - copy /Y ..\%LIBROOT%\nspr\lib\libplds4.dll + copy /Y ..\%LIBROOT%\nspr\lib\libplds4.dll %OBJDEST%\ ) if EXIST ..\%LIBROOT%\nspr\lib\libplc4.dll ( - copy /Y ..\%LIBROOT%\nspr\lib\libplc4.dll + copy /Y ..\%LIBROOT%\nspr\lib\libplc4.dll %OBJDEST%\ ) if EXIST ..\%LIBROOT%\nss\lib\nss3.dll ( - copy /Y ..\%LIBROOT%\nss\lib\nss3.dll + copy /Y ..\%LIBROOT%\nss\lib\nss3.dll %OBJDEST%\ ) if EXIST ..\%LIBROOT%\nss\lib\ssl3.dll ( - copy /Y ..\%LIBROOT%\nss\lib\ssl3.dll + copy /Y ..\%LIBROOT%\nss\lib\ssl3.dll %OBJDEST%\ ) if EXIST ..\%LIBROOT%\nss\lib\softokn3.dll ( - copy /Y ..\%LIBROOT%\nss\lib\softokn3.dll + copy /Y ..\%LIBROOT%\nss\lib\softokn3.dll %OBJDEST%\ ) -candle PassSync.wxs +xcopy /E /Y /I %WXSDIR%\Binary %OBJDEST%\Binary + +cd %OBJDEST% +echo Entering %CD% + +candle %WXSDIR%\PassSync.wxs set /a OK=%OK% + %ERRORLEVEL% light PassSync.wixobj set /a OK=%OK% + %ERRORLEVEL% -if NOT [%BUILD_DEBUG%] == [] ( - if EXIST PassSync.msi (move /Y PassSync.msi PassSync-%BUILD_DEBUG%.msi) -) +:relative +set OBJDEST=%~f1 +goto :EOF :END popd diff --git a/ldap/synctools/passwordsync/passhook/passhook.mak b/ldap/synctools/passwordsync/passhook/passhook.mak index a56d7e6d3..cb86dd6ac 100644 --- a/ldap/synctools/passwordsync/passhook/passhook.mak +++ b/ldap/synctools/passwordsync/passhook/passhook.mak @@ -65,10 +65,10 @@ NULL=nul !IF "$(CFG)" == "passhook - Win32 Release" -OUTDIR=.\Release -INTDIR=.\Release +OUTDIR=$(OBJDEST)\passhook +INTDIR=$(OBJDEST)\passhook # Begin Custom Macros -OutDir=.\Release +OutDir=$(OBJDEST)\passhook # End Custom Macros ALL : "$(OUTDIR)\passhook.dll" @@ -140,10 +140,10 @@ LINK32_OBJS= \ !ELSEIF "$(CFG)" == "passhook - Win32 Debug" -OUTDIR=.\Debug -INTDIR=.\Debug +OUTDIR=$(OBJDEST)\passhook +INTDIR=$(OBJDEST)\passhook # Begin Custom Macros -OutDir=.\Debug +OutDir=$(OBJDEST)\passhook # End Custom Macros ALL : "$(OUTDIR)\passhook.dll" diff --git a/ldap/synctools/passwordsync/passsync/passsync.mak b/ldap/synctools/passwordsync/passsync/passsync.mak index e83a779d3..2980e0814 100644 --- a/ldap/synctools/passwordsync/passsync/passsync.mak +++ b/ldap/synctools/passwordsync/passsync/passsync.mak @@ -27,10 +27,10 @@ NULL=nul !IF "$(CFG)" == "passsync - Win32 Release" -OUTDIR=.\Release -INTDIR=.\Release +OUTDIR=$(OBJDEST)\passsync +INTDIR=$(OBJDEST)\passsync # Begin Custom Macros -OutDir=.\Release +OutDir=$(OBJDEST)\passsync # End Custom Macros ALL : "$(OUTDIR)\passsync.exe" @@ -102,10 +102,10 @@ LINK32_OBJS= \ !ELSEIF "$(CFG)" == "passsync - Win32 Debug" -OUTDIR=.\Debug -INTDIR=.\Debug +OUTDIR=$(OBJDEST)\passsync +INTDIR=$(OBJDEST)\passsync # Begin Custom Macros -OutDir=.\Debug +OutDir=$(OBJDEST)\passsync # End Custom Macros ALL : "$(OUTDIR)\passsync.exe"
0
1698cb64cfd185dfee586169a3daef252e97f07a
389ds/389-ds-base
Summary: Move DS Admin Code into Admin Server (Comment #62) Description: providing slapd.inf having the DS static info for the setup/config
commit 1698cb64cfd185dfee586169a3daef252e97f07a Author: Noriko Hosoi <[email protected]> Date: Fri Jun 15 20:22:14 2007 +0000 Summary: Move DS Admin Code into Admin Server (Comment #62) Description: providing slapd.inf having the DS static info for the setup/config diff --git a/Makefile.in b/Makefile.in index b93f8823d..3f1dab239 100644 --- a/Makefile.in +++ b/Makefile.in @@ -334,10 +334,6 @@ am_libns_dshttpd_la_OBJECTS = \ lib/libsi18n/libns_dshttpd_la-reshash.lo \ lib/libsi18n/libns_dshttpd_la-txtfile.lo $(am__objects_1) libns_dshttpd_la_OBJECTS = $(am_libns_dshttpd_la_OBJECTS) -libothercrypto_plugin_la_LIBADD = -am_libothercrypto_plugin_la_OBJECTS = ldap/servers/plugins/othercrypto/libothercrypto_plugin_la-othercrypto.lo -libothercrypto_plugin_la_OBJECTS = \ - $(am_libothercrypto_plugin_la_OBJECTS) libpam_passthru_plugin_la_DEPENDENCIES = $(am__DEPENDENCIES_1) am_libpam_passthru_plugin_la_OBJECTS = ldap/servers/plugins/pam_passthru/libpam_passthru_plugin_la-pam_ptconfig.lo \ ldap/servers/plugins/pam_passthru/libpam_passthru_plugin_la-pam_ptdebug.lo \ @@ -359,9 +355,6 @@ libpassthru_plugin_la_OBJECTS = $(am_libpassthru_plugin_la_OBJECTS) libpresence_plugin_la_LIBADD = am_libpresence_plugin_la_OBJECTS = ldap/servers/plugins/presence/libpresence_plugin_la-presence.lo libpresence_plugin_la_OBJECTS = $(am_libpresence_plugin_la_OBJECTS) -libpwderror_plugin_la_LIBADD = -am_libpwderror_plugin_la_OBJECTS = ldap/servers/plugins/pwderror/libpwderror_plugin_la-pwderror-plugin.lo -libpwderror_plugin_la_OBJECTS = $(am_libpwderror_plugin_la_OBJECTS) libpwdstorage_plugin_la_LIBADD = am_libpwdstorage_plugin_la_OBJECTS = ldap/servers/plugins/pwdstorage/libpwdstorage_plugin_la-clear_pwd.lo \ ldap/servers/plugins/pwdstorage/libpwdstorage_plugin_la-crypt_pwd.lo \ @@ -562,10 +555,6 @@ libviews_plugin_la_LIBADD = am_libviews_plugin_la_OBJECTS = \ ldap/servers/plugins/views/libviews_plugin_la-views.lo libviews_plugin_la_OBJECTS = $(am_libviews_plugin_la_OBJECTS) -libxor_plugin_la_LIBADD = -am_libxor_plugin_la_OBJECTS = \ - ldap/servers/plugins/xor/libxor_plugin_la-xorplugin.lo -libxor_plugin_la_OBJECTS = $(am_libxor_plugin_la_OBJECTS) binPROGRAMS_INSTALL = $(INSTALL_PROGRAM) sbinPROGRAMS_INSTALL = $(INSTALL_PROGRAM) PROGRAMS = $(bin_PROGRAMS) $(noinst_PROGRAMS) $(sbin_PROGRAMS) @@ -760,21 +749,19 @@ SOURCES = $(libavl_a_SOURCES) $(libldaputil_a_SOURCES) \ $(libdna_plugin_la_SOURCES) $(libds_admin_la_SOURCES) \ $(libhttp_client_plugin_la_SOURCES) \ $(libns_dshttpd_la_SOURCES) \ - $(libothercrypto_plugin_la_SOURCES) \ $(libpam_passthru_plugin_la_SOURCES) \ $(libpassthru_plugin_la_SOURCES) \ $(libpresence_plugin_la_SOURCES) \ - $(libpwderror_plugin_la_SOURCES) \ $(libpwdstorage_plugin_la_SOURCES) \ $(libreferint_plugin_la_SOURCES) \ $(libreplication_plugin_la_SOURCES) \ $(libretrocl_plugin_la_SOURCES) $(libroles_plugin_la_SOURCES) \ $(libslapd_la_SOURCES) $(libstatechange_plugin_la_SOURCES) \ $(libsyntax_plugin_la_SOURCES) $(libviews_plugin_la_SOURCES) \ - $(libxor_plugin_la_SOURCES) $(dbscan_bin_SOURCES) \ - $(ds_newinst_bin_SOURCES) $(dsktune_bin_SOURCES) \ - $(infadd_bin_SOURCES) $(ldap_agent_bin_SOURCES) \ - $(ldclt_bin_SOURCES) $(ldif_bin_SOURCES) $(makstrdb_SOURCES) \ + $(dbscan_bin_SOURCES) $(ds_newinst_bin_SOURCES) \ + $(dsktune_bin_SOURCES) $(infadd_bin_SOURCES) \ + $(ldap_agent_bin_SOURCES) $(ldclt_bin_SOURCES) \ + $(ldif_bin_SOURCES) $(makstrdb_SOURCES) \ $(migratecred_bin_SOURCES) $(mmldif_bin_SOURCES) \ $(ns_slapd_SOURCES) $(pwdhash_bin_SOURCES) \ $(rsearch_bin_SOURCES) @@ -788,24 +775,22 @@ DIST_SOURCES = $(libavl_a_SOURCES) $(libldaputil_a_SOURCES) \ $(libdna_plugin_la_SOURCES) $(libds_admin_la_SOURCES) \ $(libhttp_client_plugin_la_SOURCES) \ $(libns_dshttpd_la_SOURCES) \ - $(libothercrypto_plugin_la_SOURCES) \ $(libpam_passthru_plugin_la_SOURCES) \ $(libpassthru_plugin_la_SOURCES) \ $(libpresence_plugin_la_SOURCES) \ - $(libpwderror_plugin_la_SOURCES) \ $(libpwdstorage_plugin_la_SOURCES) \ $(libreferint_plugin_la_SOURCES) \ $(libreplication_plugin_la_SOURCES) \ $(libretrocl_plugin_la_SOURCES) $(libroles_plugin_la_SOURCES) \ $(libslapd_la_SOURCES) $(libstatechange_plugin_la_SOURCES) \ $(libsyntax_plugin_la_SOURCES) $(libviews_plugin_la_SOURCES) \ - $(libxor_plugin_la_SOURCES) $(dbscan_bin_SOURCES) \ - $(ds_newinst_bin_SOURCES) $(dsktune_bin_SOURCES) \ - $(infadd_bin_SOURCES) $(ldap_agent_bin_SOURCES) \ - $(am__ldclt_bin_SOURCES_DIST) $(ldif_bin_SOURCES) \ - $(makstrdb_SOURCES) $(migratecred_bin_SOURCES) \ - $(mmldif_bin_SOURCES) $(am__ns_slapd_SOURCES_DIST) \ - $(pwdhash_bin_SOURCES) $(rsearch_bin_SOURCES) + $(dbscan_bin_SOURCES) $(ds_newinst_bin_SOURCES) \ + $(dsktune_bin_SOURCES) $(infadd_bin_SOURCES) \ + $(ldap_agent_bin_SOURCES) $(am__ldclt_bin_SOURCES_DIST) \ + $(ldif_bin_SOURCES) $(makstrdb_SOURCES) \ + $(migratecred_bin_SOURCES) $(mmldif_bin_SOURCES) \ + $(am__ns_slapd_SOURCES_DIST) $(pwdhash_bin_SOURCES) \ + $(rsearch_bin_SOURCES) configDATA_INSTALL = $(INSTALL_DATA) infDATA_INSTALL = $(INSTALL_DATA) nodist_propertyDATA_INSTALL = $(INSTALL_DATA) @@ -1054,17 +1039,13 @@ server_LTLIBRARIES = libslapd.la libds_admin.la libns-dshttpd.la @enable_pam_passthru_TRUE@LIBPAM_PASSTHRU_PLUGIN = libpam-passthru-plugin.la @enable_dna_TRUE@LIBDNA_PLUGIN = libdna-plugin.la @enable_bitwise_TRUE@LIBBITWISE_PLUGIN = libbitwise-plugin.la -LIBXOR_PLUGIN = libxor-plugin.la -LIBPWDERROR_PLUGIN = libpwderror-plugin.la -LIBOTHERCRYPTO_PLUGIN = libothercrypto-plugin.la serverplugin_LTLIBRARIES = libacl-plugin.la libattr-unique-plugin.la \ libback-ldbm.la libchainingdb-plugin.la libcos-plugin.la libdes-plugin.la \ libdistrib-plugin.la libhttp-client-plugin.la libcollation-plugin.la \ libpassthru-plugin.la libpresence-plugin.la \ libpwdstorage-plugin.la libreferint-plugin.la libreplication-plugin.la \ libretrocl-plugin.la libroles-plugin.la libstatechange-plugin.la libsyntax-plugin.la \ - libviews-plugin.la $(LIBPAM_PASSTHRU_PLUGIN) $(LIBDNA_PLUGIN) $(LIBBITWISE_PLUGIN) \ - $(LIBXOR_PLUGIN) $(LIBPWDERROR_PLUGIN) $(LIBOTHERCRYPTO_PLUGIN) + libviews-plugin.la $(LIBPAM_PASSTHRU_PLUGIN) $(LIBDNA_PLUGIN) $(LIBBITWISE_PLUGIN) nodist_property_DATA = ns-slapd.properties noinst_LIBRARIES = libavl.a libldaputil.a @@ -1751,27 +1732,6 @@ libbitwise_plugin_la_SOURCES = ldap/servers/plugins/bitwise/bitwise.c libbitwise_plugin_la_CPPFLAGS = $(PLUGIN_CPPFLAGS) libbitwise_plugin_la_LDFLAGS = -avoid-version -#------------------------ -# libxor-plugin -#------------------------ -libxor_plugin_la_SOURCES = ldap/servers/plugins/xor/xorplugin.c -libxor_plugin_la_CPPFLAGS = $(PLUGIN_CPPFLAGS) -libxor_plugin_la_LDFLAGS = -avoid-version - -#------------------------ -# libpwderror-plugin -#------------------------ -libpwderror_plugin_la_SOURCES = ldap/servers/plugins/pwderror/pwderror-plugin.c -libpwderror_plugin_la_CPPFLAGS = $(PLUGIN_CPPFLAGS) -libpwderror_plugin_la_LDFLAGS = -avoid-version - -#------------------------ -# libothercrypto-plugin -#------------------------ -libothercrypto_plugin_la_SOURCES = ldap/servers/plugins/othercrypto/othercrypto.c -libothercrypto_plugin_la_CPPFLAGS = $(PLUGIN_CPPFLAGS) -libothercrypto_plugin_la_LDFLAGS = -avoid-version - #//////////////////////////////////////////////////////////////// # # Programs @@ -2813,17 +2773,6 @@ lib/ldaputil/libns_dshttpd_la-vtable.lo: lib/ldaputil/$(am__dirstamp) \ lib/ldaputil/$(DEPDIR)/$(am__dirstamp) libns-dshttpd.la: $(libns_dshttpd_la_OBJECTS) $(libns_dshttpd_la_DEPENDENCIES) $(CXXLINK) -rpath $(serverdir) $(libns_dshttpd_la_LDFLAGS) $(libns_dshttpd_la_OBJECTS) $(libns_dshttpd_la_LIBADD) $(LIBS) -ldap/servers/plugins/othercrypto/$(am__dirstamp): - @$(mkdir_p) ldap/servers/plugins/othercrypto - @: > ldap/servers/plugins/othercrypto/$(am__dirstamp) -ldap/servers/plugins/othercrypto/$(DEPDIR)/$(am__dirstamp): - @$(mkdir_p) ldap/servers/plugins/othercrypto/$(DEPDIR) - @: > ldap/servers/plugins/othercrypto/$(DEPDIR)/$(am__dirstamp) -ldap/servers/plugins/othercrypto/libothercrypto_plugin_la-othercrypto.lo: \ - ldap/servers/plugins/othercrypto/$(am__dirstamp) \ - ldap/servers/plugins/othercrypto/$(DEPDIR)/$(am__dirstamp) -libothercrypto-plugin.la: $(libothercrypto_plugin_la_OBJECTS) $(libothercrypto_plugin_la_DEPENDENCIES) - $(LINK) -rpath $(serverplugindir) $(libothercrypto_plugin_la_LDFLAGS) $(libothercrypto_plugin_la_OBJECTS) $(libothercrypto_plugin_la_LIBADD) $(LIBS) ldap/servers/plugins/pam_passthru/$(am__dirstamp): @$(mkdir_p) ldap/servers/plugins/pam_passthru @: > ldap/servers/plugins/pam_passthru/$(am__dirstamp) @@ -2881,17 +2830,6 @@ ldap/servers/plugins/presence/libpresence_plugin_la-presence.lo: \ ldap/servers/plugins/presence/$(DEPDIR)/$(am__dirstamp) libpresence-plugin.la: $(libpresence_plugin_la_OBJECTS) $(libpresence_plugin_la_DEPENDENCIES) $(LINK) -rpath $(serverplugindir) $(libpresence_plugin_la_LDFLAGS) $(libpresence_plugin_la_OBJECTS) $(libpresence_plugin_la_LIBADD) $(LIBS) -ldap/servers/plugins/pwderror/$(am__dirstamp): - @$(mkdir_p) ldap/servers/plugins/pwderror - @: > ldap/servers/plugins/pwderror/$(am__dirstamp) -ldap/servers/plugins/pwderror/$(DEPDIR)/$(am__dirstamp): - @$(mkdir_p) ldap/servers/plugins/pwderror/$(DEPDIR) - @: > ldap/servers/plugins/pwderror/$(DEPDIR)/$(am__dirstamp) -ldap/servers/plugins/pwderror/libpwderror_plugin_la-pwderror-plugin.lo: \ - ldap/servers/plugins/pwderror/$(am__dirstamp) \ - ldap/servers/plugins/pwderror/$(DEPDIR)/$(am__dirstamp) -libpwderror-plugin.la: $(libpwderror_plugin_la_OBJECTS) $(libpwderror_plugin_la_DEPENDENCIES) - $(LINK) -rpath $(serverplugindir) $(libpwderror_plugin_la_LDFLAGS) $(libpwderror_plugin_la_OBJECTS) $(libpwderror_plugin_la_LIBADD) $(LIBS) ldap/servers/plugins/pwdstorage/$(am__dirstamp): @$(mkdir_p) ldap/servers/plugins/pwdstorage @: > ldap/servers/plugins/pwdstorage/$(am__dirstamp) @@ -3480,17 +3418,6 @@ ldap/servers/plugins/views/libviews_plugin_la-views.lo: \ ldap/servers/plugins/views/$(DEPDIR)/$(am__dirstamp) libviews-plugin.la: $(libviews_plugin_la_OBJECTS) $(libviews_plugin_la_DEPENDENCIES) $(LINK) -rpath $(serverplugindir) $(libviews_plugin_la_LDFLAGS) $(libviews_plugin_la_OBJECTS) $(libviews_plugin_la_LIBADD) $(LIBS) -ldap/servers/plugins/xor/$(am__dirstamp): - @$(mkdir_p) ldap/servers/plugins/xor - @: > ldap/servers/plugins/xor/$(am__dirstamp) -ldap/servers/plugins/xor/$(DEPDIR)/$(am__dirstamp): - @$(mkdir_p) ldap/servers/plugins/xor/$(DEPDIR) - @: > ldap/servers/plugins/xor/$(DEPDIR)/$(am__dirstamp) -ldap/servers/plugins/xor/libxor_plugin_la-xorplugin.lo: \ - ldap/servers/plugins/xor/$(am__dirstamp) \ - ldap/servers/plugins/xor/$(DEPDIR)/$(am__dirstamp) -libxor-plugin.la: $(libxor_plugin_la_OBJECTS) $(libxor_plugin_la_DEPENDENCIES) - $(LINK) -rpath $(serverplugindir) $(libxor_plugin_la_LDFLAGS) $(libxor_plugin_la_OBJECTS) $(libxor_plugin_la_LIBADD) $(LIBS) install-binPROGRAMS: $(bin_PROGRAMS) @$(NORMAL_INSTALL) test -z "$(bindir)" || $(mkdir_p) "$(DESTDIR)$(bindir)" @@ -4040,8 +3967,6 @@ mostlyclean-compile: -rm -f ldap/servers/plugins/http/libhttp_client_plugin_la-http_client.lo -rm -f ldap/servers/plugins/http/libhttp_client_plugin_la-http_impl.$(OBJEXT) -rm -f ldap/servers/plugins/http/libhttp_client_plugin_la-http_impl.lo - -rm -f ldap/servers/plugins/othercrypto/libothercrypto_plugin_la-othercrypto.$(OBJEXT) - -rm -f ldap/servers/plugins/othercrypto/libothercrypto_plugin_la-othercrypto.lo -rm -f ldap/servers/plugins/pam_passthru/libpam_passthru_plugin_la-pam_ptconfig.$(OBJEXT) -rm -f ldap/servers/plugins/pam_passthru/libpam_passthru_plugin_la-pam_ptconfig.lo -rm -f ldap/servers/plugins/pam_passthru/libpam_passthru_plugin_la-pam_ptdebug.$(OBJEXT) @@ -4064,8 +3989,6 @@ mostlyclean-compile: -rm -f ldap/servers/plugins/passthru/libpassthru_plugin_la-ptutil.lo -rm -f ldap/servers/plugins/presence/libpresence_plugin_la-presence.$(OBJEXT) -rm -f ldap/servers/plugins/presence/libpresence_plugin_la-presence.lo - -rm -f ldap/servers/plugins/pwderror/libpwderror_plugin_la-pwderror-plugin.$(OBJEXT) - -rm -f ldap/servers/plugins/pwderror/libpwderror_plugin_la-pwderror-plugin.lo -rm -f ldap/servers/plugins/pwdstorage/libpwdstorage_plugin_la-clear_pwd.$(OBJEXT) -rm -f ldap/servers/plugins/pwdstorage/libpwdstorage_plugin_la-clear_pwd.lo -rm -f ldap/servers/plugins/pwdstorage/libpwdstorage_plugin_la-crypt_pwd.$(OBJEXT) @@ -4244,8 +4167,6 @@ mostlyclean-compile: -rm -f ldap/servers/plugins/uiduniq/libattr_unique_plugin_la-uid.lo -rm -f ldap/servers/plugins/views/libviews_plugin_la-views.$(OBJEXT) -rm -f ldap/servers/plugins/views/libviews_plugin_la-views.lo - -rm -f ldap/servers/plugins/xor/libxor_plugin_la-xorplugin.$(OBJEXT) - -rm -f ldap/servers/plugins/xor/libxor_plugin_la-xorplugin.lo -rm -f ldap/servers/slapd/back-ldbm/libback_ldbm_la-ancestorid.$(OBJEXT) -rm -f ldap/servers/slapd/back-ldbm/libback_ldbm_la-ancestorid.lo -rm -f ldap/servers/slapd/back-ldbm/libback_ldbm_la-archive.$(OBJEXT) @@ -4800,7 +4721,6 @@ distclean-compile: @AMDEP_TRUE@@am__include@ @am__quote@ldap/servers/plugins/dna/$(DEPDIR)/libdna_plugin_la-dna.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@ldap/servers/plugins/http/$(DEPDIR)/libhttp_client_plugin_la-http_client.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@ldap/servers/plugins/http/$(DEPDIR)/libhttp_client_plugin_la-http_impl.Plo@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@ldap/servers/plugins/othercrypto/$(DEPDIR)/libothercrypto_plugin_la-othercrypto.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@ldap/servers/plugins/pam_passthru/$(DEPDIR)/libpam_passthru_plugin_la-pam_ptconfig.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@ldap/servers/plugins/pam_passthru/$(DEPDIR)/libpam_passthru_plugin_la-pam_ptdebug.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@ldap/servers/plugins/pam_passthru/$(DEPDIR)/libpam_passthru_plugin_la-pam_ptimpl.Plo@am__quote@ @@ -4812,7 +4732,6 @@ distclean-compile: @AMDEP_TRUE@@am__include@ @am__quote@ldap/servers/plugins/passthru/$(DEPDIR)/libpassthru_plugin_la-ptpreop.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@ldap/servers/plugins/passthru/$(DEPDIR)/libpassthru_plugin_la-ptutil.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@ldap/servers/plugins/presence/$(DEPDIR)/libpresence_plugin_la-presence.Plo@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@ldap/servers/plugins/pwderror/$(DEPDIR)/libpwderror_plugin_la-pwderror-plugin.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@ldap/servers/plugins/pwdstorage/$(DEPDIR)/libpwdstorage_plugin_la-clear_pwd.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@ldap/servers/plugins/pwdstorage/$(DEPDIR)/libpwdstorage_plugin_la-crypt_pwd.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@ldap/servers/plugins/pwdstorage/$(DEPDIR)/libpwdstorage_plugin_la-md5_pwd.Plo@am__quote@ @@ -4902,7 +4821,6 @@ distclean-compile: @AMDEP_TRUE@@am__include@ @am__quote@ldap/servers/plugins/uiduniq/$(DEPDIR)/libattr_unique_plugin_la-7bit.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@ldap/servers/plugins/uiduniq/$(DEPDIR)/libattr_unique_plugin_la-uid.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@ldap/servers/plugins/views/$(DEPDIR)/libviews_plugin_la-views.Plo@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@ldap/servers/plugins/xor/$(DEPDIR)/libxor_plugin_la-xorplugin.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@ldap/servers/slapd/$(DEPDIR)/ldap_agent_bin-agtmmap.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@ldap/servers/slapd/$(DEPDIR)/libslapd_la-add.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@ldap/servers/slapd/$(DEPDIR)/libslapd_la-agtmmap.Plo@am__quote@ @@ -6334,13 +6252,6 @@ lib/ldaputil/libns_dshttpd_la-vtable.lo: lib/ldaputil/vtable.c @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libns_dshttpd_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o lib/ldaputil/libns_dshttpd_la-vtable.lo `test -f 'lib/ldaputil/vtable.c' || echo '$(srcdir)/'`lib/ldaputil/vtable.c -ldap/servers/plugins/othercrypto/libothercrypto_plugin_la-othercrypto.lo: ldap/servers/plugins/othercrypto/othercrypto.c -@am__fastdepCC_TRUE@ if $(LIBTOOL) --tag=CC --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libothercrypto_plugin_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT ldap/servers/plugins/othercrypto/libothercrypto_plugin_la-othercrypto.lo -MD -MP -MF "ldap/servers/plugins/othercrypto/$(DEPDIR)/libothercrypto_plugin_la-othercrypto.Tpo" -c -o ldap/servers/plugins/othercrypto/libothercrypto_plugin_la-othercrypto.lo `test -f 'ldap/servers/plugins/othercrypto/othercrypto.c' || echo '$(srcdir)/'`ldap/servers/plugins/othercrypto/othercrypto.c; \ -@am__fastdepCC_TRUE@ then mv -f "ldap/servers/plugins/othercrypto/$(DEPDIR)/libothercrypto_plugin_la-othercrypto.Tpo" "ldap/servers/plugins/othercrypto/$(DEPDIR)/libothercrypto_plugin_la-othercrypto.Plo"; else rm -f "ldap/servers/plugins/othercrypto/$(DEPDIR)/libothercrypto_plugin_la-othercrypto.Tpo"; exit 1; fi -@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='ldap/servers/plugins/othercrypto/othercrypto.c' object='ldap/servers/plugins/othercrypto/libothercrypto_plugin_la-othercrypto.lo' libtool=yes @AMDEPBACKSLASH@ -@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ -@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libothercrypto_plugin_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o ldap/servers/plugins/othercrypto/libothercrypto_plugin_la-othercrypto.lo `test -f 'ldap/servers/plugins/othercrypto/othercrypto.c' || echo '$(srcdir)/'`ldap/servers/plugins/othercrypto/othercrypto.c - ldap/servers/plugins/pam_passthru/libpam_passthru_plugin_la-pam_ptconfig.lo: ldap/servers/plugins/pam_passthru/pam_ptconfig.c @am__fastdepCC_TRUE@ if $(LIBTOOL) --tag=CC --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libpam_passthru_plugin_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT ldap/servers/plugins/pam_passthru/libpam_passthru_plugin_la-pam_ptconfig.lo -MD -MP -MF "ldap/servers/plugins/pam_passthru/$(DEPDIR)/libpam_passthru_plugin_la-pam_ptconfig.Tpo" -c -o ldap/servers/plugins/pam_passthru/libpam_passthru_plugin_la-pam_ptconfig.lo `test -f 'ldap/servers/plugins/pam_passthru/pam_ptconfig.c' || echo '$(srcdir)/'`ldap/servers/plugins/pam_passthru/pam_ptconfig.c; \ @am__fastdepCC_TRUE@ then mv -f "ldap/servers/plugins/pam_passthru/$(DEPDIR)/libpam_passthru_plugin_la-pam_ptconfig.Tpo" "ldap/servers/plugins/pam_passthru/$(DEPDIR)/libpam_passthru_plugin_la-pam_ptconfig.Plo"; else rm -f "ldap/servers/plugins/pam_passthru/$(DEPDIR)/libpam_passthru_plugin_la-pam_ptconfig.Tpo"; exit 1; fi @@ -6418,13 +6329,6 @@ ldap/servers/plugins/presence/libpresence_plugin_la-presence.lo: ldap/servers/pl @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libpresence_plugin_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o ldap/servers/plugins/presence/libpresence_plugin_la-presence.lo `test -f 'ldap/servers/plugins/presence/presence.c' || echo '$(srcdir)/'`ldap/servers/plugins/presence/presence.c -ldap/servers/plugins/pwderror/libpwderror_plugin_la-pwderror-plugin.lo: ldap/servers/plugins/pwderror/pwderror-plugin.c -@am__fastdepCC_TRUE@ if $(LIBTOOL) --tag=CC --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libpwderror_plugin_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT ldap/servers/plugins/pwderror/libpwderror_plugin_la-pwderror-plugin.lo -MD -MP -MF "ldap/servers/plugins/pwderror/$(DEPDIR)/libpwderror_plugin_la-pwderror-plugin.Tpo" -c -o ldap/servers/plugins/pwderror/libpwderror_plugin_la-pwderror-plugin.lo `test -f 'ldap/servers/plugins/pwderror/pwderror-plugin.c' || echo '$(srcdir)/'`ldap/servers/plugins/pwderror/pwderror-plugin.c; \ -@am__fastdepCC_TRUE@ then mv -f "ldap/servers/plugins/pwderror/$(DEPDIR)/libpwderror_plugin_la-pwderror-plugin.Tpo" "ldap/servers/plugins/pwderror/$(DEPDIR)/libpwderror_plugin_la-pwderror-plugin.Plo"; else rm -f "ldap/servers/plugins/pwderror/$(DEPDIR)/libpwderror_plugin_la-pwderror-plugin.Tpo"; exit 1; fi -@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='ldap/servers/plugins/pwderror/pwderror-plugin.c' object='ldap/servers/plugins/pwderror/libpwderror_plugin_la-pwderror-plugin.lo' libtool=yes @AMDEPBACKSLASH@ -@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ -@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libpwderror_plugin_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o ldap/servers/plugins/pwderror/libpwderror_plugin_la-pwderror-plugin.lo `test -f 'ldap/servers/plugins/pwderror/pwderror-plugin.c' || echo '$(srcdir)/'`ldap/servers/plugins/pwderror/pwderror-plugin.c - ldap/servers/plugins/pwdstorage/libpwdstorage_plugin_la-clear_pwd.lo: ldap/servers/plugins/pwdstorage/clear_pwd.c @am__fastdepCC_TRUE@ if $(LIBTOOL) --tag=CC --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libpwdstorage_plugin_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT ldap/servers/plugins/pwdstorage/libpwdstorage_plugin_la-clear_pwd.lo -MD -MP -MF "ldap/servers/plugins/pwdstorage/$(DEPDIR)/libpwdstorage_plugin_la-clear_pwd.Tpo" -c -o ldap/servers/plugins/pwdstorage/libpwdstorage_plugin_la-clear_pwd.lo `test -f 'ldap/servers/plugins/pwdstorage/clear_pwd.c' || echo '$(srcdir)/'`ldap/servers/plugins/pwdstorage/clear_pwd.c; \ @am__fastdepCC_TRUE@ then mv -f "ldap/servers/plugins/pwdstorage/$(DEPDIR)/libpwdstorage_plugin_la-clear_pwd.Tpo" "ldap/servers/plugins/pwdstorage/$(DEPDIR)/libpwdstorage_plugin_la-clear_pwd.Plo"; else rm -f "ldap/servers/plugins/pwdstorage/$(DEPDIR)/libpwdstorage_plugin_la-clear_pwd.Tpo"; exit 1; fi @@ -7629,13 +7533,6 @@ ldap/servers/plugins/views/libviews_plugin_la-views.lo: ldap/servers/plugins/vie @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libviews_plugin_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o ldap/servers/plugins/views/libviews_plugin_la-views.lo `test -f 'ldap/servers/plugins/views/views.c' || echo '$(srcdir)/'`ldap/servers/plugins/views/views.c -ldap/servers/plugins/xor/libxor_plugin_la-xorplugin.lo: ldap/servers/plugins/xor/xorplugin.c -@am__fastdepCC_TRUE@ if $(LIBTOOL) --tag=CC --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libxor_plugin_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT ldap/servers/plugins/xor/libxor_plugin_la-xorplugin.lo -MD -MP -MF "ldap/servers/plugins/xor/$(DEPDIR)/libxor_plugin_la-xorplugin.Tpo" -c -o ldap/servers/plugins/xor/libxor_plugin_la-xorplugin.lo `test -f 'ldap/servers/plugins/xor/xorplugin.c' || echo '$(srcdir)/'`ldap/servers/plugins/xor/xorplugin.c; \ -@am__fastdepCC_TRUE@ then mv -f "ldap/servers/plugins/xor/$(DEPDIR)/libxor_plugin_la-xorplugin.Tpo" "ldap/servers/plugins/xor/$(DEPDIR)/libxor_plugin_la-xorplugin.Plo"; else rm -f "ldap/servers/plugins/xor/$(DEPDIR)/libxor_plugin_la-xorplugin.Tpo"; exit 1; fi -@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='ldap/servers/plugins/xor/xorplugin.c' object='ldap/servers/plugins/xor/libxor_plugin_la-xorplugin.lo' libtool=yes @AMDEPBACKSLASH@ -@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ -@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libxor_plugin_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o ldap/servers/plugins/xor/libxor_plugin_la-xorplugin.lo `test -f 'ldap/servers/plugins/xor/xorplugin.c' || echo '$(srcdir)/'`ldap/servers/plugins/xor/xorplugin.c - ldap/servers/slapd/tools/dbscan_bin-dbscan.o: ldap/servers/slapd/tools/dbscan.c @am__fastdepCC_TRUE@ if $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(dbscan_bin_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT ldap/servers/slapd/tools/dbscan_bin-dbscan.o -MD -MP -MF "ldap/servers/slapd/tools/$(DEPDIR)/dbscan_bin-dbscan.Tpo" -c -o ldap/servers/slapd/tools/dbscan_bin-dbscan.o `test -f 'ldap/servers/slapd/tools/dbscan.c' || echo '$(srcdir)/'`ldap/servers/slapd/tools/dbscan.c; \ @am__fastdepCC_TRUE@ then mv -f "ldap/servers/slapd/tools/$(DEPDIR)/dbscan_bin-dbscan.Tpo" "ldap/servers/slapd/tools/$(DEPDIR)/dbscan_bin-dbscan.Po"; else rm -f "ldap/servers/slapd/tools/$(DEPDIR)/dbscan_bin-dbscan.Tpo"; exit 1; fi @@ -8893,11 +8790,9 @@ clean-libtool: -rm -rf ldap/servers/plugins/distrib/.libs ldap/servers/plugins/distrib/_libs -rm -rf ldap/servers/plugins/dna/.libs ldap/servers/plugins/dna/_libs -rm -rf ldap/servers/plugins/http/.libs ldap/servers/plugins/http/_libs - -rm -rf ldap/servers/plugins/othercrypto/.libs ldap/servers/plugins/othercrypto/_libs -rm -rf ldap/servers/plugins/pam_passthru/.libs ldap/servers/plugins/pam_passthru/_libs -rm -rf ldap/servers/plugins/passthru/.libs ldap/servers/plugins/passthru/_libs -rm -rf ldap/servers/plugins/presence/.libs ldap/servers/plugins/presence/_libs - -rm -rf ldap/servers/plugins/pwderror/.libs ldap/servers/plugins/pwderror/_libs -rm -rf ldap/servers/plugins/pwdstorage/.libs ldap/servers/plugins/pwdstorage/_libs -rm -rf ldap/servers/plugins/referint/.libs ldap/servers/plugins/referint/_libs -rm -rf ldap/servers/plugins/replication/.libs ldap/servers/plugins/replication/_libs @@ -8909,7 +8804,6 @@ clean-libtool: -rm -rf ldap/servers/plugins/syntaxes/.libs ldap/servers/plugins/syntaxes/_libs -rm -rf ldap/servers/plugins/uiduniq/.libs ldap/servers/plugins/uiduniq/_libs -rm -rf ldap/servers/plugins/views/.libs ldap/servers/plugins/views/_libs - -rm -rf ldap/servers/plugins/xor/.libs ldap/servers/plugins/xor/_libs -rm -rf ldap/servers/slapd/.libs ldap/servers/slapd/_libs -rm -rf ldap/servers/slapd/back-ldbm/.libs ldap/servers/slapd/back-ldbm/_libs -rm -rf lib/base/.libs lib/base/_libs @@ -9252,16 +9146,12 @@ distclean-generic: -rm -f ldap/servers/plugins/dna/$(am__dirstamp) -rm -f ldap/servers/plugins/http/$(DEPDIR)/$(am__dirstamp) -rm -f ldap/servers/plugins/http/$(am__dirstamp) - -rm -f ldap/servers/plugins/othercrypto/$(DEPDIR)/$(am__dirstamp) - -rm -f ldap/servers/plugins/othercrypto/$(am__dirstamp) -rm -f ldap/servers/plugins/pam_passthru/$(DEPDIR)/$(am__dirstamp) -rm -f ldap/servers/plugins/pam_passthru/$(am__dirstamp) -rm -f ldap/servers/plugins/passthru/$(DEPDIR)/$(am__dirstamp) -rm -f ldap/servers/plugins/passthru/$(am__dirstamp) -rm -f ldap/servers/plugins/presence/$(DEPDIR)/$(am__dirstamp) -rm -f ldap/servers/plugins/presence/$(am__dirstamp) - -rm -f ldap/servers/plugins/pwderror/$(DEPDIR)/$(am__dirstamp) - -rm -f ldap/servers/plugins/pwderror/$(am__dirstamp) -rm -f ldap/servers/plugins/pwdstorage/$(DEPDIR)/$(am__dirstamp) -rm -f ldap/servers/plugins/pwdstorage/$(am__dirstamp) -rm -f ldap/servers/plugins/referint/$(DEPDIR)/$(am__dirstamp) @@ -9284,8 +9174,6 @@ distclean-generic: -rm -f ldap/servers/plugins/uiduniq/$(am__dirstamp) -rm -f ldap/servers/plugins/views/$(DEPDIR)/$(am__dirstamp) -rm -f ldap/servers/plugins/views/$(am__dirstamp) - -rm -f ldap/servers/plugins/xor/$(DEPDIR)/$(am__dirstamp) - -rm -f ldap/servers/plugins/xor/$(am__dirstamp) -rm -f ldap/servers/slapd/$(DEPDIR)/$(am__dirstamp) -rm -f ldap/servers/slapd/$(am__dirstamp) -rm -f ldap/servers/slapd/back-ldbm/$(DEPDIR)/$(am__dirstamp) @@ -9324,7 +9212,7 @@ clean-am: clean-binPROGRAMS clean-generic clean-libtool \ distclean: distclean-am -rm -f $(am__CONFIG_DISTCLEAN_FILES) - -rm -rf ldap/admin/lib/$(DEPDIR) ldap/admin/src/$(DEPDIR) ldap/libraries/libavl/$(DEPDIR) ldap/servers/plugins/acl/$(DEPDIR) ldap/servers/plugins/bitwise/$(DEPDIR) ldap/servers/plugins/chainingdb/$(DEPDIR) ldap/servers/plugins/collation/$(DEPDIR) ldap/servers/plugins/cos/$(DEPDIR) ldap/servers/plugins/distrib/$(DEPDIR) ldap/servers/plugins/dna/$(DEPDIR) ldap/servers/plugins/http/$(DEPDIR) ldap/servers/plugins/othercrypto/$(DEPDIR) ldap/servers/plugins/pam_passthru/$(DEPDIR) ldap/servers/plugins/passthru/$(DEPDIR) ldap/servers/plugins/presence/$(DEPDIR) ldap/servers/plugins/pwderror/$(DEPDIR) ldap/servers/plugins/pwdstorage/$(DEPDIR) ldap/servers/plugins/referint/$(DEPDIR) ldap/servers/plugins/replication/$(DEPDIR) ldap/servers/plugins/retrocl/$(DEPDIR) ldap/servers/plugins/rever/$(DEPDIR) ldap/servers/plugins/roles/$(DEPDIR) ldap/servers/plugins/shared/$(DEPDIR) ldap/servers/plugins/statechange/$(DEPDIR) ldap/servers/plugins/syntaxes/$(DEPDIR) ldap/servers/plugins/uiduniq/$(DEPDIR) ldap/servers/plugins/views/$(DEPDIR) ldap/servers/plugins/xor/$(DEPDIR) ldap/servers/slapd/$(DEPDIR) ldap/servers/slapd/back-ldbm/$(DEPDIR) ldap/servers/slapd/tools/$(DEPDIR) ldap/servers/slapd/tools/ldclt/$(DEPDIR) ldap/servers/slapd/tools/rsearch/$(DEPDIR) ldap/servers/snmp/$(DEPDIR) ldap/systools/$(DEPDIR) lib/base/$(DEPDIR) lib/ldaputil/$(DEPDIR) lib/libaccess/$(DEPDIR) lib/libadmin/$(DEPDIR) lib/libsi18n/$(DEPDIR) + -rm -rf ldap/admin/lib/$(DEPDIR) ldap/admin/src/$(DEPDIR) ldap/libraries/libavl/$(DEPDIR) ldap/servers/plugins/acl/$(DEPDIR) ldap/servers/plugins/bitwise/$(DEPDIR) ldap/servers/plugins/chainingdb/$(DEPDIR) ldap/servers/plugins/collation/$(DEPDIR) ldap/servers/plugins/cos/$(DEPDIR) ldap/servers/plugins/distrib/$(DEPDIR) ldap/servers/plugins/dna/$(DEPDIR) ldap/servers/plugins/http/$(DEPDIR) ldap/servers/plugins/pam_passthru/$(DEPDIR) ldap/servers/plugins/passthru/$(DEPDIR) ldap/servers/plugins/presence/$(DEPDIR) ldap/servers/plugins/pwdstorage/$(DEPDIR) ldap/servers/plugins/referint/$(DEPDIR) ldap/servers/plugins/replication/$(DEPDIR) ldap/servers/plugins/retrocl/$(DEPDIR) ldap/servers/plugins/rever/$(DEPDIR) ldap/servers/plugins/roles/$(DEPDIR) ldap/servers/plugins/shared/$(DEPDIR) ldap/servers/plugins/statechange/$(DEPDIR) ldap/servers/plugins/syntaxes/$(DEPDIR) ldap/servers/plugins/uiduniq/$(DEPDIR) ldap/servers/plugins/views/$(DEPDIR) ldap/servers/slapd/$(DEPDIR) ldap/servers/slapd/back-ldbm/$(DEPDIR) ldap/servers/slapd/tools/$(DEPDIR) ldap/servers/slapd/tools/ldclt/$(DEPDIR) ldap/servers/slapd/tools/rsearch/$(DEPDIR) ldap/servers/snmp/$(DEPDIR) ldap/systools/$(DEPDIR) lib/base/$(DEPDIR) lib/ldaputil/$(DEPDIR) lib/libaccess/$(DEPDIR) lib/libadmin/$(DEPDIR) lib/libsi18n/$(DEPDIR) -rm -f Makefile distclean-am: clean-am distclean-compile distclean-generic \ distclean-hdr distclean-libtool distclean-tags @@ -9358,7 +9246,7 @@ installcheck-am: maintainer-clean: maintainer-clean-am -rm -f $(am__CONFIG_DISTCLEAN_FILES) -rm -rf $(top_srcdir)/autom4te.cache - -rm -rf ldap/admin/lib/$(DEPDIR) ldap/admin/src/$(DEPDIR) ldap/libraries/libavl/$(DEPDIR) ldap/servers/plugins/acl/$(DEPDIR) ldap/servers/plugins/bitwise/$(DEPDIR) ldap/servers/plugins/chainingdb/$(DEPDIR) ldap/servers/plugins/collation/$(DEPDIR) ldap/servers/plugins/cos/$(DEPDIR) ldap/servers/plugins/distrib/$(DEPDIR) ldap/servers/plugins/dna/$(DEPDIR) ldap/servers/plugins/http/$(DEPDIR) ldap/servers/plugins/othercrypto/$(DEPDIR) ldap/servers/plugins/pam_passthru/$(DEPDIR) ldap/servers/plugins/passthru/$(DEPDIR) ldap/servers/plugins/presence/$(DEPDIR) ldap/servers/plugins/pwderror/$(DEPDIR) ldap/servers/plugins/pwdstorage/$(DEPDIR) ldap/servers/plugins/referint/$(DEPDIR) ldap/servers/plugins/replication/$(DEPDIR) ldap/servers/plugins/retrocl/$(DEPDIR) ldap/servers/plugins/rever/$(DEPDIR) ldap/servers/plugins/roles/$(DEPDIR) ldap/servers/plugins/shared/$(DEPDIR) ldap/servers/plugins/statechange/$(DEPDIR) ldap/servers/plugins/syntaxes/$(DEPDIR) ldap/servers/plugins/uiduniq/$(DEPDIR) ldap/servers/plugins/views/$(DEPDIR) ldap/servers/plugins/xor/$(DEPDIR) ldap/servers/slapd/$(DEPDIR) ldap/servers/slapd/back-ldbm/$(DEPDIR) ldap/servers/slapd/tools/$(DEPDIR) ldap/servers/slapd/tools/ldclt/$(DEPDIR) ldap/servers/slapd/tools/rsearch/$(DEPDIR) ldap/servers/snmp/$(DEPDIR) ldap/systools/$(DEPDIR) lib/base/$(DEPDIR) lib/ldaputil/$(DEPDIR) lib/libaccess/$(DEPDIR) lib/libadmin/$(DEPDIR) lib/libsi18n/$(DEPDIR) + -rm -rf ldap/admin/lib/$(DEPDIR) ldap/admin/src/$(DEPDIR) ldap/libraries/libavl/$(DEPDIR) ldap/servers/plugins/acl/$(DEPDIR) ldap/servers/plugins/bitwise/$(DEPDIR) ldap/servers/plugins/chainingdb/$(DEPDIR) ldap/servers/plugins/collation/$(DEPDIR) ldap/servers/plugins/cos/$(DEPDIR) ldap/servers/plugins/distrib/$(DEPDIR) ldap/servers/plugins/dna/$(DEPDIR) ldap/servers/plugins/http/$(DEPDIR) ldap/servers/plugins/pam_passthru/$(DEPDIR) ldap/servers/plugins/passthru/$(DEPDIR) ldap/servers/plugins/presence/$(DEPDIR) ldap/servers/plugins/pwdstorage/$(DEPDIR) ldap/servers/plugins/referint/$(DEPDIR) ldap/servers/plugins/replication/$(DEPDIR) ldap/servers/plugins/retrocl/$(DEPDIR) ldap/servers/plugins/rever/$(DEPDIR) ldap/servers/plugins/roles/$(DEPDIR) ldap/servers/plugins/shared/$(DEPDIR) ldap/servers/plugins/statechange/$(DEPDIR) ldap/servers/plugins/syntaxes/$(DEPDIR) ldap/servers/plugins/uiduniq/$(DEPDIR) ldap/servers/plugins/views/$(DEPDIR) ldap/servers/slapd/$(DEPDIR) ldap/servers/slapd/back-ldbm/$(DEPDIR) ldap/servers/slapd/tools/$(DEPDIR) ldap/servers/slapd/tools/ldclt/$(DEPDIR) ldap/servers/slapd/tools/rsearch/$(DEPDIR) ldap/servers/snmp/$(DEPDIR) ldap/systools/$(DEPDIR) lib/base/$(DEPDIR) lib/ldaputil/$(DEPDIR) lib/libaccess/$(DEPDIR) lib/libadmin/$(DEPDIR) lib/libsi18n/$(DEPDIR) -rm -f Makefile maintainer-clean-am: distclean-am maintainer-clean-generic
0
18ef874d839036e5f025acf511c03dbc57ccef70
389ds/389-ds-base
Issue 4758 - Add tests for WebUI Description: Added WebUI test for bug where RHDS instance won't load when backup directory is set to non existing directory. Relates: https://github.com/389ds/389-ds-base/issues/4758 Reviewed by: @bsimonova (Thanks!)
commit 18ef874d839036e5f025acf511c03dbc57ccef70 Author: Vladimir Cech <[email protected]> Date: Wed Apr 19 14:07:29 2023 +0200 Issue 4758 - Add tests for WebUI Description: Added WebUI test for bug where RHDS instance won't load when backup directory is set to non existing directory. Relates: https://github.com/389ds/389-ds-base/issues/4758 Reviewed by: @bsimonova (Thanks!) diff --git a/dirsrvtests/tests/suites/webui/backup/__init__.py b/dirsrvtests/tests/suites/webui/backup/__init__.py new file mode 100644 index 000000000..656f03aba --- /dev/null +++ b/dirsrvtests/tests/suites/webui/backup/__init__.py @@ -0,0 +1,3 @@ +""" + :Requirement: WebUI: Backup +""" diff --git a/dirsrvtests/tests/suites/webui/backup/backup_test.py b/dirsrvtests/tests/suites/webui/backup/backup_test.py new file mode 100644 index 000000000..26eae0fff --- /dev/null +++ b/dirsrvtests/tests/suites/webui/backup/backup_test.py @@ -0,0 +1,54 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2023 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import time +import subprocess +import pytest + +from lib389.cli_idm.account import * +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st +from .. import setup_page, check_frame_assignment, setup_login + +pytestmark = pytest.mark.skipif(os.getenv('WEBUI') is None, reason="These tests are only for WebUI environment") +pytest.importorskip('playwright') + +SERVER_ID = 'standalone1' + + [email protected](reason="Will fail because of bz2189181") +def test_no_backup_dir(topology_st, page, browser_name): + """ Test that instance is able to load when backup directory doesn't exist. + + :id: a1fb9e70-c110-4578-ba1f-4b593cc0a047 + :setup: Standalone instance + :steps: + 1. Set Backup Directory (nsslapd-bakdir) to non existing directory. + 2. Check if element on Server tab is loaded. + :expectedresults: + 1. Success + 2. Element is visible. + """ + + topology_st.standalone.config.set('nsslapd-bakdir', '/DOES_NOT_EXIST') + + setup_login(page) + time.sleep(1) + frame = check_frame_assignment(page, browser_name) + + log.info('Check if server settings tabs are loaded.') + frame.get_by_role('tab', name='General Settings', exact=True).wait_for() + assert frame.get_by_role('tab', name='General Settings').is_visible() + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE)
0
c5314042ff139ca6c5b6eaf21be52dccf24f8444
389ds/389-ds-base
Resolves: 470611 Summary: Enhanced rsearch to allow user filter and password to be configurable (contributed by [email protected]).
commit c5314042ff139ca6c5b6eaf21be52dccf24f8444 Author: Nathan Kinder <[email protected]> Date: Thu Jan 29 17:32:21 2009 +0000 Resolves: 470611 Summary: Enhanced rsearch to allow user filter and password to be configurable (contributed by [email protected]). diff --git a/ldap/servers/slapd/tools/rsearch/rsearch.c b/ldap/servers/slapd/tools/rsearch/rsearch.c index 621a22e51..13512456c 100644 --- a/ldap/servers/slapd/tools/rsearch/rsearch.c +++ b/ldap/servers/slapd/tools/rsearch/rsearch.c @@ -54,6 +54,8 @@ #include <stdio.h> #include <stdlib.h> +#include <time.h> +#include <string.h> #ifdef XP_UNIX #include <unistd.h> #endif @@ -104,6 +106,8 @@ void usage() "-C num -- take num samples, then stop\n" "-R num -- drop connection & reconnect every num searches\n" "-x -- Use -B file for binding; ignored if -B is not given\n" + "-W -- Password to use when binding with -B. Default is the UID.\n" + "-U -- Filter to use with binding file. Ignored if -x is not given. Default is '(uid=%%s)'.\n" "\n", DEFAULT_HOSTNAME, DEFAULT_PORT, LDAP_SCOPE_BASE, LDAP_SCOPE_ONELEVEL, LDAP_SCOPE_SUBTREE, @@ -223,6 +227,8 @@ char *searchDatFile = 0; char *attrFile = 0; char *bindDN = NULL; char *bindPW = NULL; +char *userPW = NULL; +char *uidFilter = NULL; char **attrToReturn = 0; char *attrList = 0; Operation opType = op_search; @@ -253,7 +259,7 @@ int main(int argc, char** argv) } while ((ch = getopt(argc, argv, - "B:a:j:i:h:s:f:p:o:t:T:D:w:n:A:S:C:R:bvlyqmMcduNLHx?V")) + "U:W:B:a:j:i:h:s:f:p:o:t:T:D:w:n:A:S:C:R:bvlyqmMcduNLHx?V")) != EOF) switch (ch) { case 'h': @@ -359,6 +365,12 @@ int main(int argc, char** argv) case 'x': useBFile = 1; break; + case 'W': + userPW = optarg; + break; + case 'U': + uidFilter = optarg; + break; case 'a': if (optarg[0] == '?') { usage_A(); @@ -387,6 +399,11 @@ int main(int argc, char** argv) argc -= optind; argv += optind; + if (uidFilter && NULL == strstr(uidFilter, "%s")) { + printf("rsearch: invalid UID filter - must contain %%s, eg, (uid=%%s)\n"); + usage(); + } + PR_Init(PR_SYSTEM_THREAD, PR_PRIORITY_NORMAL, 0); ntable = nt_new(0); @@ -487,13 +504,20 @@ int main(int argc, char** argv) cumrate += rate; if ((numThreads > 1) || (!verbose)) { if (!quiet) { + char tbuf[18]; + struct tm* now; + time_t lt; + + time(&lt); + now = localtime(&lt); + strftime(tbuf, sizeof(tbuf), "%Y%m%d %H:%M:%S", now); if (showRunningAvg) - printf("Rate: %7.2f/thr (cumul rate: %7.2f/thr)\n", - rate, cumrate/(double)counter); + printf("%s - Rate: %7.2f/thr (cumul rate: %7.2f/thr)\n", + tbuf, rate, cumrate/(double)counter); else - printf("Rate: %7.2f/thr (%6.2f/sec =%7.4fms/op), " + printf("%s - Rate: %7.2f/thr (%6.2f/sec =%7.4fms/op), " "total:%6u (%d thr)\n", - rate, val, (double)1000.0/val, total, numThreads); + tbuf, rate, val, (double)1000.0/val, total, numThreads); } } if (countLimit && (counter >= countLimit)) { diff --git a/ldap/servers/slapd/tools/rsearch/rsearch.h b/ldap/servers/slapd/tools/rsearch/rsearch.h index d62c80d87..e123dc0a4 100644 --- a/ldap/servers/slapd/tools/rsearch/rsearch.h +++ b/ldap/servers/slapd/tools/rsearch/rsearch.h @@ -69,6 +69,8 @@ extern char *filter; /**/ extern char *nameFile; extern char *bindDN; extern char *bindPW; +extern char *userPW; +extern char *uidFilter; extern char **attrToReturn; /**/ extern char *attrList; extern Operation opType; diff --git a/ldap/servers/slapd/tools/rsearch/searchthread.c b/ldap/servers/slapd/tools/rsearch/searchthread.c index 1ef0b7a2e..443419c0c 100644 --- a/ldap/servers/slapd/tools/rsearch/searchthread.c +++ b/ldap/servers/slapd/tools/rsearch/searchthread.c @@ -90,7 +90,6 @@ SearchThread *st_new(void) st->alive = 1; st->lock = PR_NewLock(); st->retry = 0; - srand(time(0)); return st; } @@ -106,6 +105,11 @@ int st_getThread(SearchThread *st, PRThread **tid) return st->id; } +void st_seed(SearchThread *st) { + time_t t = time(0); + t -= st->id * 1000; + srand((unsigned int)t); +} static void st_enableTCPnodelay(SearchThread *st) { @@ -143,12 +147,12 @@ static void st_disconnect(SearchThread *st) st->soc = -1; } -static int st_bind_core(SearchThread *st, LDAP **ld, char *dn, char *uid) +static int st_bind_core(SearchThread *st, LDAP **ld, char *dn, char *pw) { int ret = 0; int retry = 0; while (1) { - ret = ldap_simple_bind_s(*ld, dn, uid); + ret = ldap_simple_bind_s(*ld, dn, pw); if (LDAP_SUCCESS == ret) { break; } else if (LDAP_CONNECT_ERROR == ret && retry < 10) { @@ -156,7 +160,7 @@ static int st_bind_core(SearchThread *st, LDAP **ld, char *dn, char *uid) } else { fprintf(stderr, "T%d: failed to bind, ldap_simple_bind_s" "(%s, %s) returned 0x%x (errno %d)\n", - st->id, dn, uid, ret, errno); + st->id, dn, pw, ret, errno); *ld = NULL; return 0; } @@ -188,30 +192,33 @@ static int st_bind(SearchThread *st) if (opType != op_delete && opType != op_modify && opType != op_idxmodify && sdattable && sdt_getlen(sdattable) > 0) { int e; - char *dn, *uid; + char *dn, *uid, *upw; do { e = sdt_getrand(sdattable); } while (e < 0); dn = sdt_dn_get(sdattable, e); uid = sdt_uid_get(sdattable, e); + /* in this test, assuming uid == password unless told otherwise */ + upw = (userPW) ? userPW : uid; if (useBFile) { - /* in this test, assuming uid == password */ + if (dn) { - if (0 == st_bind_core(st, &(st->ld), dn, uid)) { + if (0 == st_bind_core(st, &(st->ld), dn, upw)) { return 0; } } else if (uid) { char filterBuffer[100]; char *pFilter; + char *filterTemplate = (uidFilter) ? uidFilter : "(uid=%s)"; struct timeval timeout; int scope = LDAP_SCOPE_SUBTREE, attrsOnly = 0; LDAPMessage *result; int retry = 0; pFilter = filterBuffer; - sprintf(filterBuffer, "(uid=%s)", uid); + sprintf(filterBuffer, filterTemplate, uid); timeout.tv_sec = 3600; timeout.tv_usec = 0; while (1) { @@ -230,7 +237,7 @@ static int st_bind(SearchThread *st) } dn = ldap_get_dn(st->ld2, result); - if (0 == st_bind_core(st, &(st->ld), dn, uid)) { + if (0 == st_bind_core(st, &(st->ld), dn, upw)) { return 0; } } else { @@ -239,7 +246,7 @@ static int st_bind(SearchThread *st) return 0; } } else { - if (0 == st_bind_core(st, &(st->ld), dn, uid)) { + if (0 == st_bind_core(st, &(st->ld), dn, upw)) { return 0; } } @@ -504,6 +511,7 @@ void search_start(void *v) int notBound = 1, res = LDAP_SUCCESS, searches = 0; PRUint32 span; + st_seed(st); st->alive = 1; st->ld = 0; while (1) { @@ -544,6 +552,10 @@ void search_start(void *v) return; } } + else { + /* Fake status for NOOP */ + res = LDAP_SUCCESS; + } if (LDAP_SUCCESS == res) { st->retry = 0; } else if (LDAP_CONNECT_ERROR == res && st->retry < 10) {
0
77cacd96c1648373939a0bf3a4cba81b42cafda3
389ds/389-ds-base
coverity 12606 Logically dead code The previous fix (commit 325abca7135d06225adf5380d726de60dacda5a4) for "Ticket #303 - make DNA range requests work with transactions" introduced this dead code. Since dna_pre_op does not allocate an entry "e", there is no need to check the flag "free_entry" and free it. Reviewed by [email protected] (Thanks, Mark!!).
commit 77cacd96c1648373939a0bf3a4cba81b42cafda3 Author: Noriko Hosoi <[email protected]> Date: Thu Mar 15 09:46:23 2012 -0700 coverity 12606 Logically dead code The previous fix (commit 325abca7135d06225adf5380d726de60dacda5a4) for "Ticket #303 - make DNA range requests work with transactions" introduced this dead code. Since dna_pre_op does not allocate an entry "e", there is no need to check the flag "free_entry" and free it. Reviewed by [email protected] (Thanks, Mark!!). diff --git a/ldap/servers/plugins/dna/dna.c b/ldap/servers/plugins/dna/dna.c index c744e0a03..ce2486ed2 100644 --- a/ldap/servers/plugins/dna/dna.c +++ b/ldap/servers/plugins/dna/dna.c @@ -3214,7 +3214,6 @@ dna_pre_op(Slapi_PBlock * pb, int modtype) char *dn = NULL; Slapi_Mods *smods = NULL; LDAPMod **mods; - int free_entry = 0; int ret = 0; slapi_log_error(SLAPI_LOG_TRACE, DNA_PLUGIN_SUBSYSTEM, @@ -3308,9 +3307,6 @@ dna_pre_op(Slapi_PBlock * pb, int modtype) slapi_mods_free(&smods); } bail: - if (free_entry && e) - slapi_entry_free(e); - if (resulting_e) slapi_entry_free(resulting_e);
0
866d13e4471c084ba83f82e2a83bb7fd14158548
389ds/389-ds-base
609255 - fix coverity Defect Type: Memory - illegal accesses issues https://bugzilla.redhat.com/show_bug.cgi?id=609255 12230 UNINIT Triaged Unassigned Bug Minor Fix Required preop_add() ds/ldap/servers/plugins/uiduniq/7bit.c Comment: Some cases such as NULL attrName is passed or it does not have a value, uninitialized "violated" is passed to slapi_ch_smprintf via issue_error. We should init violated to NULL. 12231 UNINIT Triaged Unassigned Bug Unspecified Fix Required preop_modify() ds/ldap/servers/plugins/uiduniq/7bit.c Comment: Some cases such as NULL attrName is passed or mods were empty, uninitialized "violated" is passed to slapi_ch_smprintf via issue_error. We should init violated to NULL. 12232 UNINIT Triaged Unassigned Bug Minor Fix Required preop_modrdn() ds/ldap/servers/plugins/uiduniq/7bit.c Comment: Some cases such as NULL attrName is passed or it does not have a value, uninitialized "violated" is passed to slapi_ch_smprintf via issue_error. We should init violated to NULL.
commit 866d13e4471c084ba83f82e2a83bb7fd14158548 Author: Noriko Hosoi <[email protected]> Date: Wed Jun 30 10:28:14 2010 -0700 609255 - fix coverity Defect Type: Memory - illegal accesses issues https://bugzilla.redhat.com/show_bug.cgi?id=609255 12230 UNINIT Triaged Unassigned Bug Minor Fix Required preop_add() ds/ldap/servers/plugins/uiduniq/7bit.c Comment: Some cases such as NULL attrName is passed or it does not have a value, uninitialized "violated" is passed to slapi_ch_smprintf via issue_error. We should init violated to NULL. 12231 UNINIT Triaged Unassigned Bug Unspecified Fix Required preop_modify() ds/ldap/servers/plugins/uiduniq/7bit.c Comment: Some cases such as NULL attrName is passed or mods were empty, uninitialized "violated" is passed to slapi_ch_smprintf via issue_error. We should init violated to NULL. 12232 UNINIT Triaged Unassigned Bug Minor Fix Required preop_modrdn() ds/ldap/servers/plugins/uiduniq/7bit.c Comment: Some cases such as NULL attrName is passed or it does not have a value, uninitialized "violated" is passed to slapi_ch_smprintf via issue_error. We should init violated to NULL. diff --git a/ldap/servers/plugins/uiduniq/7bit.c b/ldap/servers/plugins/uiduniq/7bit.c index 56eef22ed..f575d2679 100644 --- a/ldap/servers/plugins/uiduniq/7bit.c +++ b/ldap/servers/plugins/uiduniq/7bit.c @@ -217,7 +217,7 @@ static int preop_add(Slapi_PBlock *pb) { int result; - char *violated; + char *violated = NULL; #ifdef DEBUG slapi_log_error(SLAPI_LOG_PLUGIN, plugin_name, "ADD begin\n"); @@ -359,7 +359,7 @@ static int preop_modify(Slapi_PBlock *pb) { int result; - char *violated; + char *violated = NULL; LDAPMod **checkmods = NULL; /* holds mods to check */ int checkmodsCapacity = 0; /* max capacity of checkmods */ @@ -522,7 +522,7 @@ preop_modrdn(Slapi_PBlock *pb) { int result; Slapi_Entry *e; - char *violated; + char *violated = NULL; #ifdef DEBUG slapi_log_error(SLAPI_LOG_PLUGIN, plugin_name,
0
d3f94d49e0cec1b46216486b1a6e528b79b439f3
389ds/389-ds-base
Issue 5848 - Fix condition and add a CI test (#5916) Description: Add a "positive" test for the issue and fix the condition to make sure that 65535 and no --replica-id are correctly accepted. Related: https://github.com/389ds/389-ds-base/issues/5848 Reviewed by: @mreynolds389 @tbordaz (Thanks!)
commit d3f94d49e0cec1b46216486b1a6e528b79b439f3 Author: Simon Pichugin <[email protected]> Date: Thu Aug 31 11:19:35 2023 -0700 Issue 5848 - Fix condition and add a CI test (#5916) Description: Add a "positive" test for the issue and fix the condition to make sure that 65535 and no --replica-id are correctly accepted. Related: https://github.com/389ds/389-ds-base/issues/5848 Reviewed by: @mreynolds389 @tbordaz (Thanks!)
0
b9484501e655873ac20fa7612e57323a3b85c08f
389ds/389-ds-base
Fix ntds installer to allow repair and removal
commit b9484501e655873ac20fa7612e57323a3b85c08f Author: David Boreham <[email protected]> Date: Fri May 13 03:20:20 2005 +0000 Fix ntds installer to allow repair and removal diff --git a/ldap/servers/ntds/apacheds/usersync.schema b/ldap/servers/ntds/apacheds/usersync.schema index 5f87ff61c..0dc36a3d2 100644 --- a/ldap/servers/ntds/apacheds/usersync.schema +++ b/ldap/servers/ntds/apacheds/usersync.schema @@ -212,7 +212,7 @@ attributetype ( 1.2.840.113556.1.4.169 attributetype ( 1.2.840.113556.1.4.90 NAME 'unicodePwd' - SYNTAX 1.3.6.1.4.1.1466.115.121.1.40 + SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE ) attributetype ( 1.2.840.113556.1.4.62 diff --git a/ldap/servers/ntds/netman/netman.cpp b/ldap/servers/ntds/netman/netman.cpp index ac94415a4..91ace87f7 100644 --- a/ldap/servers/ntds/netman/netman.cpp +++ b/ldap/servers/ntds/netman/netman.cpp @@ -255,6 +255,7 @@ exit: NTUser::NTUser() { currentAccountName = NULL; + userInfo = NULL; groupsInfo = NULL; currentGroupEntry = 0; @@ -275,6 +276,11 @@ NTUser::NTUser() NTUser::~NTUser() { quickFree((char**)&currentAccountName); + if(userInfo != NULL) + { + NetApiBufferFree(userInfo); + userInfo = NULL; + } if(groupsInfo != NULL) { NetApiBufferFree(groupsInfo); @@ -288,6 +294,34 @@ NTUser::~NTUser() quickFree((char**)&resultBuf); } +// **************************************************************** +// NTUser::LoadUserInfo +// **************************************************************** +int NTUser::LoadUserInfo() +{ + int result = 0; + + if(currentAccountName == NULL) + { + result = -1; + goto exit; + } + + if(userInfo != NULL) + { + NetApiBufferFree(userInfo); + userInfo = NULL; + } + + if(NetUserGetInfo(NULL, currentAccountName, USER_INFO_LEVEL, (unsigned char**)&userInfo) != NERR_Success) + { + result = -1; + } + +exit: + return result; +} + // **************************************************************** // NTUser::NewUser // **************************************************************** @@ -320,6 +354,9 @@ int NTUser::NewUser(char* username) // Free buffers quickFree((char**)&info); + // Load info for quick retrevial + LoadUserInfo(); + return result; } @@ -330,14 +367,14 @@ int NTUser::RetriveUserByAccountName(char* username) { int result; unsigned long length = 0; - PUSER_INFO_3 info; + PUSER_INFO_3 info = NULL; quickFree((char**)&currentAccountName); UTF8ToUTF16(username, NULL, &length); currentAccountName = (unsigned short*)malloc(length); UTF8ToUTF16(username, currentAccountName, &length); - result = NetUserGetInfo(NULL, currentAccountName, USER_INFO_LEVEL, (unsigned char**)&info); + result = LoadUserInfo(); return result; } @@ -396,6 +433,12 @@ int NTUser::DeleteUser() quickFree((char**)&currentAccountName); + if(userInfo != NULL) + { + NetApiBufferFree(userInfo); + userInfo = NULL; + } + exit: return result; } @@ -472,25 +515,16 @@ exit: unsigned long NTUser::GetAccountExpires() { unsigned long result = 0; - PUSER_INFO_3 info; - - if(currentAccountName == NULL) - { - result = 0; - goto exit; - } - if((result = NetUserGetInfo(NULL, currentAccountName, 3, (unsigned char**)&info)) != NERR_Success) + if(currentAccountName == NULL || userInfo == NULL) { result = 0; goto exit; } - result = info->usri3_acct_expires; + result = userInfo->usri3_acct_expires; exit: - NetApiBufferFree((void*)info); - return result; } @@ -510,6 +544,7 @@ int NTUser::SetAccountExpires(unsigned long accountExpires) info.usri1017_acct_expires = accountExpires; result = NetUserSetInfo(NULL, currentAccountName, 1017, (unsigned char*)&info, NULL); + LoadUserInfo(); exit: return result; @@ -521,25 +556,16 @@ exit: unsigned long NTUser::GetBadPasswordCount() { unsigned long result = 0; - PUSER_INFO_3 info; - - if(currentAccountName == NULL) - { - result = 0; - goto exit; - } - if(NetUserGetInfo(NULL, currentAccountName, 3, (unsigned char**)&info) != NERR_Success) + if(currentAccountName == NULL || userInfo == NULL) { result = 0; goto exit; } - result = info->usri3_bad_pw_count; + result = userInfo->usri3_bad_pw_count; exit: - NetApiBufferFree((void*)info); - return result; } @@ -549,25 +575,16 @@ exit: unsigned long NTUser::GetCodePage() { unsigned long result = 0; - PUSER_INFO_3 info; - - if(currentAccountName == NULL) - { - result = 0; - goto exit; - } - if(NetUserGetInfo(NULL, currentAccountName, 3, (unsigned char**)&info) != NERR_Success) + if(currentAccountName == NULL || userInfo == NULL) { result = 0; goto exit; } - result = info->usri3_code_page; + result = userInfo->usri3_code_page; exit: - NetApiBufferFree((void*)info); - return result; } @@ -587,6 +604,7 @@ int NTUser::SetCodePage(unsigned long codePage) info.usri1025_code_page = codePage; result = NetUserSetInfo(NULL, currentAccountName, 1025, (unsigned char*)&info, NULL); + LoadUserInfo(); exit: return result; @@ -599,29 +617,20 @@ char* NTUser::GetComment() { char* result = NULL; unsigned long length; - PUSER_INFO_3 info; - if(currentAccountName == NULL) - { - result = NULL; - goto exit; - } - - if(NetUserGetInfo(NULL, currentAccountName, 3, (unsigned char**)&info) != NERR_Success) + if(currentAccountName == NULL || userInfo == NULL) { result = NULL; goto exit; } quickFree(&resultBuf); - UTF16ToUTF8(info->usri3_comment, NULL, &length); + UTF16ToUTF8(userInfo->usri3_comment, NULL, &length); resultBuf = (char*)malloc(length); - UTF16ToUTF8(info->usri3_comment, resultBuf, &length); + UTF16ToUTF8(userInfo->usri3_comment, resultBuf, &length); result = resultBuf; exit: - NetApiBufferFree((void*)info); - return result; } @@ -647,6 +656,7 @@ int NTUser::SetComment(char* comment) info.usri1007_comment = wideStr; result = NetUserSetInfo(NULL, currentAccountName, 1007, (unsigned char*)&info, NULL); + LoadUserInfo(); exit: quickFree((char**)&wideStr); @@ -660,25 +670,16 @@ exit: unsigned long NTUser::GetCountryCode() { unsigned long result = 0; - PUSER_INFO_3 info; - - if(currentAccountName == NULL) - { - result = 0; - goto exit; - } - if(NetUserGetInfo(NULL, currentAccountName, 3, (unsigned char**)&info) != NERR_Success) + if(currentAccountName == NULL || userInfo == NULL) { result = 0; goto exit; } - result = info->usri3_country_code; + result = userInfo->usri3_country_code; exit: - NetApiBufferFree((void*)info); - return result; } @@ -698,6 +699,7 @@ int NTUser::SetCountryCode(unsigned long countryCode) info.usri1024_country_code = countryCode; result = NetUserSetInfo(NULL, currentAccountName, 1024, (unsigned char*)&info, NULL); + LoadUserInfo(); exit: return result; @@ -709,25 +711,16 @@ exit: unsigned long NTUser::GetFlags() { unsigned long result = 0; - PUSER_INFO_3 info; - if(currentAccountName == NULL) + if(currentAccountName == NULL || userInfo == NULL) { result = 0; goto exit; } - if(NetUserGetInfo(NULL, currentAccountName, 3, (unsigned char**)&info) != NERR_Success) - { - result = 0; - goto exit; - } - - result = info->usri3_flags; + result = userInfo->usri3_flags; exit: - NetApiBufferFree((void*)info); - return result; } @@ -747,6 +740,7 @@ int NTUser::SetFlags(unsigned long flags) info.usri1008_flags = flags; result = NetUserSetInfo(NULL, currentAccountName, 1008, (unsigned char*)&info, NULL); + LoadUserInfo(); exit: return result; @@ -759,29 +753,20 @@ char* NTUser::GetHomeDir() { char* result = NULL; unsigned long length; - PUSER_INFO_3 info; - if(currentAccountName == NULL) - { - result = NULL; - goto exit; - } - - if(NetUserGetInfo(NULL, currentAccountName, 3, (unsigned char**)&info) != NERR_Success) + if(currentAccountName == NULL || userInfo == NULL) { result = NULL; goto exit; } quickFree(&resultBuf); - UTF16ToUTF8(info->usri3_home_dir, NULL, &length); + UTF16ToUTF8(userInfo->usri3_home_dir, NULL, &length); resultBuf = (char*)malloc(length); - UTF16ToUTF8(info->usri3_home_dir, resultBuf, &length); + UTF16ToUTF8(userInfo->usri3_home_dir, resultBuf, &length); result = resultBuf; exit: - NetApiBufferFree((void*)info); - return result; } @@ -807,6 +792,7 @@ int NTUser::SetHomeDir(char* path) info.usri1006_home_dir = wideStr; result = NetUserSetInfo(NULL, currentAccountName, 1006, (unsigned char*)&info, NULL); + LoadUserInfo(); exit: quickFree((char**)&wideStr); @@ -821,29 +807,20 @@ char* NTUser::GetHomeDirDrive() { char* result = NULL; unsigned long length; - PUSER_INFO_3 info; - - if(currentAccountName == NULL) - { - result = NULL; - goto exit; - } - if(NetUserGetInfo(NULL, currentAccountName, 3, (unsigned char**)&info) != NERR_Success) + if(currentAccountName == NULL || userInfo == NULL) { result = NULL; goto exit; } quickFree(&resultBuf); - UTF16ToUTF8(info->usri3_home_dir_drive, NULL, &length); + UTF16ToUTF8(userInfo->usri3_home_dir_drive, NULL, &length); resultBuf = (char*)malloc(length); - UTF16ToUTF8(info->usri3_home_dir_drive, resultBuf, &length); + UTF16ToUTF8(userInfo->usri3_home_dir_drive, resultBuf, &length); result = resultBuf; exit: - NetApiBufferFree((void*)info); - return result; } @@ -869,6 +846,7 @@ int NTUser::SetHomeDirDrive(char* path) info.usri1053_home_dir_drive = wideStr; result = NetUserSetInfo(NULL, currentAccountName, 1053, (unsigned char*)&info, NULL); + LoadUserInfo(); exit: quickFree((char**)&wideStr); @@ -882,25 +860,16 @@ exit: unsigned long NTUser::GetLastLogoff() { unsigned long result = 0; - PUSER_INFO_3 info; - - if(currentAccountName == NULL) - { - result = 0; - goto exit; - } - if(NetUserGetInfo(NULL, currentAccountName, 3, (unsigned char**)&info) != NERR_Success) + if(currentAccountName == NULL || userInfo == NULL) { result = 0; goto exit; } - result = info->usri3_last_logoff; + result = userInfo->usri3_last_logoff; exit: - NetApiBufferFree((void*)info); - return result; } @@ -910,25 +879,16 @@ exit: unsigned long NTUser::GetLastLogon() { unsigned long result = 0; - PUSER_INFO_3 info; - if(currentAccountName == NULL) + if(currentAccountName == NULL || userInfo == NULL) { result = 0; goto exit; } - if(NetUserGetInfo(NULL, currentAccountName, 3, (unsigned char**)&info) != NERR_Success) - { - result = 0; - goto exit; - } - - result = info->usri3_last_logon; + result = userInfo->usri3_last_logon; exit: - NetApiBufferFree((void*)info); - return result; } @@ -939,29 +899,20 @@ char* NTUser::GetLogonHours() { char* result = NULL; unsigned long length; - PUSER_INFO_3 info; - if(currentAccountName == NULL) - { - result = NULL; - goto exit; - } - - if(NetUserGetInfo(NULL, currentAccountName, 3, (unsigned char**)&info) != NERR_Success) + if(currentAccountName == NULL || userInfo == NULL) { result = NULL; goto exit; } quickFree(&resultBuf); - BinToHexStr((char*)info->usri3_logon_hours, 21, NULL, &length); + BinToHexStr((char*)userInfo->usri3_logon_hours, 21, NULL, &length); resultBuf = (char*)malloc(length); - BinToHexStr((char*)info->usri3_script_path, 21, resultBuf, &length); + BinToHexStr((char*)userInfo->usri3_script_path, 21, resultBuf, &length); result = resultBuf; exit: - NetApiBufferFree((void*)info); - return result; } @@ -987,6 +938,7 @@ int NTUser::SetLogonHours(char* logonHours) info.usri1020_logon_hours = (unsigned char*)bin; result = NetUserSetInfo(NULL, currentAccountName, 1020, (unsigned char*)&info, NULL); + LoadUserInfo(); exit: quickFree(&bin); @@ -1000,25 +952,16 @@ exit: unsigned long NTUser::GetMaxStorage() { unsigned long result = 0; - PUSER_INFO_3 info; - if(currentAccountName == NULL) + if(currentAccountName == NULL || userInfo == NULL) { result = 0; goto exit; } - if(NetUserGetInfo(NULL, currentAccountName, 3, (unsigned char**)&info) != NERR_Success) - { - result = 0; - goto exit; - } - - result = info->usri3_max_storage; + result = userInfo->usri3_max_storage; exit: - NetApiBufferFree((void*)info); - return result; } @@ -1038,6 +981,7 @@ int NTUser::SetMaxStorage(unsigned long maxStorage) info.usri1018_max_storage = maxStorage; result = NetUserSetInfo(NULL, currentAccountName, 1018, (unsigned char*)&info, NULL); + LoadUserInfo(); exit: return result; @@ -1049,25 +993,16 @@ exit: unsigned long NTUser::GetNumLogons() { unsigned long result = 0; - PUSER_INFO_3 info; - - if(currentAccountName == NULL) - { - result = 0; - goto exit; - } - if(NetUserGetInfo(NULL, currentAccountName, 3, (unsigned char**)&info) != NERR_Success) + if(currentAccountName == NULL || userInfo == NULL) { result = 0; goto exit; } - result = info->usri3_num_logons; + result = userInfo->usri3_num_logons; exit: - NetApiBufferFree((void*)info); - return result; } @@ -1078,29 +1013,20 @@ char* NTUser::GetProfile() { char* result = NULL; unsigned long length; - PUSER_INFO_3 info; - if(currentAccountName == NULL) - { - result = NULL; - goto exit; - } - - if(NetUserGetInfo(NULL, currentAccountName, 3, (unsigned char**)&info) != NERR_Success) + if(currentAccountName == NULL || userInfo == NULL) { result = NULL; goto exit; } quickFree(&resultBuf); - UTF16ToUTF8(info->usri3_profile, NULL, &length); + UTF16ToUTF8(userInfo->usri3_profile, NULL, &length); resultBuf = (char*)malloc(length); - UTF16ToUTF8(info->usri3_profile, resultBuf, &length); + UTF16ToUTF8(userInfo->usri3_profile, resultBuf, &length); result = resultBuf; exit: - NetApiBufferFree((void*)info); - return result; } @@ -1126,6 +1052,7 @@ int NTUser::SetProfile(char* path) info.usri1052_profile = wideStr; result = NetUserSetInfo(NULL, currentAccountName, 1052, (unsigned char*)&info, NULL); + LoadUserInfo(); exit: quickFree((char**)&wideStr); @@ -1140,29 +1067,20 @@ char* NTUser::GetScriptPath() { char* result = NULL; unsigned long length; - PUSER_INFO_3 info; - if(currentAccountName == NULL) - { - result = NULL; - goto exit; - } - - if(NetUserGetInfo(NULL, currentAccountName, 3, (unsigned char**)&info) != NERR_Success) + if(currentAccountName == NULL || userInfo == NULL) { result = NULL; goto exit; } quickFree(&resultBuf); - UTF16ToUTF8(info->usri3_script_path, NULL, &length); + UTF16ToUTF8(userInfo->usri3_script_path, NULL, &length); resultBuf = (char*)malloc(length); - UTF16ToUTF8(info->usri3_script_path, resultBuf, &length); + UTF16ToUTF8(userInfo->usri3_script_path, resultBuf, &length); result = resultBuf; exit: - NetApiBufferFree((void*)info); - return result; } @@ -1188,6 +1106,7 @@ int NTUser::SetScriptPath(char* path) info.usri1009_script_path = wideStr; result = NetUserSetInfo(NULL, currentAccountName, 1009, (unsigned char*)&info, NULL); + LoadUserInfo(); exit: quickFree((char**)&wideStr); @@ -1202,29 +1121,20 @@ char* NTUser::GetWorkstations() { char* result = NULL; unsigned long length; - PUSER_INFO_3 info; - - if(currentAccountName == NULL) - { - result = NULL; - goto exit; - } - if(NetUserGetInfo(NULL, currentAccountName, 3, (unsigned char**)&info) != NERR_Success) + if(currentAccountName == NULL || userInfo == NULL) { result = NULL; goto exit; } quickFree(&resultBuf); - UTF16ToUTF8(info->usri3_workstations, NULL, &length); + UTF16ToUTF8(userInfo->usri3_workstations, NULL, &length); resultBuf = (char*)malloc(length); - UTF16ToUTF8(info->usri3_workstations, resultBuf, &length); + UTF16ToUTF8(userInfo->usri3_workstations, resultBuf, &length); result = resultBuf; exit: - NetApiBufferFree((void*)info); - return result; } @@ -1250,6 +1160,7 @@ int NTUser::SetWorkstations(char* workstations) info.usri1014_workstations = wideStr; result = NetUserSetInfo(NULL, currentAccountName, 1014, (unsigned char*)&info, NULL); + LoadUserInfo(); exit: quickFree((char**)&wideStr); @@ -1264,29 +1175,20 @@ char* NTUser::GetFullname() { char* result = NULL; unsigned long length; - PUSER_INFO_3 info; - - if(currentAccountName == NULL) - { - result = NULL; - goto exit; - } - if(NetUserGetInfo(NULL, currentAccountName, 3, (unsigned char**)&info) != NERR_Success) + if(currentAccountName == NULL || userInfo == NULL) { result = NULL; goto exit; } quickFree(&resultBuf); - UTF16ToUTF8(info->usri3_full_name, NULL, &length); + UTF16ToUTF8(userInfo->usri3_full_name, NULL, &length); resultBuf = (char*)malloc(length); - UTF16ToUTF8(info->usri3_full_name, resultBuf, &length); + UTF16ToUTF8(userInfo->usri3_full_name, resultBuf, &length); result = resultBuf; exit: - NetApiBufferFree((void*)info); - return result; } @@ -1312,6 +1214,7 @@ int NTUser::SetFullname(char* fullname) info.usri1011_full_name = wideStr; result = NetUserSetInfo(NULL, currentAccountName, 1011, (unsigned char*)&info, NULL); + LoadUserInfo(); exit: quickFree((char**)&wideStr); @@ -1341,6 +1244,7 @@ int NTUser::SetPassword(char* password) info.usri1003_password = wideStr; result = NetUserSetInfo(NULL, currentAccountName, 1003, (unsigned char*)&info, NULL); + LoadUserInfo(); exit: quickFree((char**)&wideStr); @@ -1707,6 +1611,7 @@ char* NTUserList::nextUsername() NTGroup::NTGroup() { currentAccountName = NULL; + groupInfo = NULL; usersInfo = NULL; currentUserEntry = 0; userEntriesRead = 0; @@ -1720,6 +1625,11 @@ NTGroup::NTGroup() NTGroup::~NTGroup() { quickFree((char**)&currentAccountName); + if(groupInfo != NULL) + { + NetApiBufferFree(groupInfo); + groupInfo = NULL; + } if(usersInfo != NULL) { NetApiBufferFree(usersInfo); @@ -1728,6 +1638,34 @@ NTGroup::~NTGroup() quickFree(&resultBuf); } +// **************************************************************** +// NTGroup::LoadGroupInfo +// **************************************************************** +int NTGroup::LoadGroupInfo() +{ + int result = 0; + + if(currentAccountName == NULL) + { + result = -1; + goto exit; + } + + if(groupInfo != NULL) + { + NetApiBufferFree(groupInfo); + groupInfo = NULL; + } + + if(NetGroupGetInfo(NULL, currentAccountName, GROUP_INFO_LEVEL, (unsigned char**)&groupInfo) != NERR_Success) + { + result = -1; + } + +exit: + return result; +} + // **************************************************************** // NTGroup::NewGroup // **************************************************************** @@ -1754,6 +1692,9 @@ int NTGroup::NewGroup(char* groupName) // Free buffers quickFree((char**)&info); + // Load info for quick retrevial + LoadGroupInfo(); + return result; } @@ -1771,7 +1712,7 @@ int NTGroup::RetriveGroupByAccountName(char* groupName) currentAccountName = (unsigned short*)malloc(length); UTF8ToUTF16(groupName, currentAccountName, &length); - result = NetGroupGetInfo(NULL, currentAccountName, GROUP_INFO_LEVEL, (unsigned char**)&info); + result = LoadGroupInfo(); return result; } @@ -1829,6 +1770,12 @@ int NTGroup::DeleteGroup() result = NetGroupDel(NULL, currentAccountName); quickFree((char**)&currentAccountName); + + if(groupInfo != NULL) + { + NetApiBufferFree(groupInfo); + groupInfo = NULL; + } exit: return result; @@ -1907,29 +1854,20 @@ char* NTGroup::GetComment() { char* result = NULL; unsigned long length; - PGROUP_INFO_2 info; - if(currentAccountName == NULL) - { - result = NULL; - goto exit; - } - - if(NetGroupGetInfo(NULL, currentAccountName, 2, (unsigned char**)&info) != NERR_Success) + if(currentAccountName == NULL || groupInfo == NULL) { result = NULL; goto exit; } quickFree(&resultBuf); - UTF16ToUTF8(info->grpi2_comment, NULL, &length); + UTF16ToUTF8(groupInfo->grpi2_comment, NULL, &length); resultBuf = (char*)malloc(length); - UTF16ToUTF8(info->grpi2_comment, resultBuf, &length); + UTF16ToUTF8(groupInfo->grpi2_comment, resultBuf, &length); result = resultBuf; exit: - NetApiBufferFree((void*)info); - return result; } @@ -1956,6 +1894,8 @@ int NTGroup::SetComment(char* comment) info.grpi1002_comment = wideStr; result = NetGroupSetInfo(NULL, currentAccountName, 1002, (unsigned char*)&info, NULL); + LoadGroupInfo(); + exit: quickFree((char**)&wideStr); @@ -2167,6 +2107,7 @@ char* NTGroupList::nextGroupName() NTLocalGroup::NTLocalGroup() { currentAccountName = NULL; + localGroupInfo = NULL; usersInfo = NULL; currentUserEntry = 0; userEntriesRead = 0; @@ -2180,6 +2121,11 @@ NTLocalGroup::NTLocalGroup() NTLocalGroup::~NTLocalGroup() { quickFree((char**)&currentAccountName); + if(localGroupInfo != NULL) + { + NetApiBufferFree(localGroupInfo); + localGroupInfo = NULL; + } if(usersInfo != NULL) { NetApiBufferFree(usersInfo); @@ -2188,6 +2134,34 @@ NTLocalGroup::~NTLocalGroup() quickFree(&resultBuf); } +// **************************************************************** +// NTLocalGroup::LoadLocalGroupInfo +// **************************************************************** +int NTLocalGroup::LoadLocalGroupInfo() +{ + int result = 0; + + if(currentAccountName == NULL) + { + result = -1; + goto exit; + } + + if(localGroupInfo != NULL) + { + NetApiBufferFree(localGroupInfo); + localGroupInfo = NULL; + } + + if(NetLocalGroupGetInfo(NULL, currentAccountName, LOCALGROUP_INFO_LEVEL, (unsigned char**)&localGroupInfo) != NERR_Success) + { + result = -1; + } + +exit: + return result; +} + // **************************************************************** // NTLocalGroup::NewLocalGroup // **************************************************************** @@ -2214,6 +2188,9 @@ int NTLocalGroup::NewLocalGroup(char* localGroupName) // Free buffers quickFree((char**)&info); + // Load info for quick retrevial + LoadLocalGroupInfo(); + return result; } @@ -2231,7 +2208,7 @@ int NTLocalGroup::RetriveLocalGroupByAccountName(char* localGroupName) currentAccountName = (unsigned short*)malloc(length); UTF8ToUTF16(localGroupName, currentAccountName, &length); - result = NetLocalGroupGetInfo(NULL, currentAccountName, LOCALGROUP_INFO_LEVEL, (unsigned char**)&info); + result = LoadLocalGroupInfo(); return result; } @@ -2290,6 +2267,12 @@ int NTLocalGroup::DeleteLocalGroup() quickFree((char**)&currentAccountName); + if(localGroupInfo != NULL) + { + NetApiBufferFree(localGroupInfo); + localGroupInfo = NULL; + } + exit: return result; } @@ -2367,7 +2350,6 @@ char* NTLocalGroup::GetComment() { char* result = NULL; unsigned long length; - PLOCALGROUP_INFO_1 info; if(currentAccountName == NULL) { @@ -2375,20 +2357,13 @@ char* NTLocalGroup::GetComment() goto exit; } - if(NetLocalGroupGetInfo(NULL, currentAccountName, 1, (unsigned char**)&info) != NERR_Success) - { - result = NULL; - goto exit; - } - quickFree(&resultBuf); - UTF16ToUTF8(info->lgrpi1_comment, NULL, &length); + UTF16ToUTF8(localGroupInfo->lgrpi1_comment, NULL, &length); resultBuf = (char*)malloc(length); - UTF16ToUTF8(info->lgrpi1_comment, resultBuf, &length); + UTF16ToUTF8(localGroupInfo->lgrpi1_comment, resultBuf, &length); result = resultBuf; exit: - NetApiBufferFree((void*)info); return result; } @@ -2416,6 +2391,8 @@ int NTLocalGroup::SetComment(char* comment) info.lgrpi1002_comment = wideStr; result = NetLocalGroupSetInfo(NULL, currentAccountName, 1002, (unsigned char*)&info, NULL); + LoadLocalGroupInfo(); + exit: quickFree((char**)&wideStr); diff --git a/ldap/servers/ntds/netman/netman.h b/ldap/servers/ntds/netman/netman.h index 2bbd820d6..7d7c3be0b 100644 --- a/ldap/servers/ntds/netman/netman.h +++ b/ldap/servers/ntds/netman/netman.h @@ -115,7 +115,10 @@ public: char* NextLocalGroupName(); private: + int LoadUserInfo(); + unsigned short* currentAccountName; + PUSER_INFO_3 userInfo; GROUP_USERS_INFO_0* groupsInfo; DWORD currentGroupEntry; @@ -179,7 +182,10 @@ public: char* NextUserName(); private: + int LoadGroupInfo(); + unsigned short* currentAccountName; + PGROUP_INFO_2 groupInfo; LOCALGROUP_USERS_INFO_0* usersInfo; DWORD currentUserEntry; @@ -238,7 +244,10 @@ public: char* NextUserName(); private: + int LoadLocalGroupInfo(); + unsigned short* currentAccountName; + PLOCALGROUP_INFO_1 localGroupInfo; LOCALGROUP_MEMBERS_INFO_0* usersInfo; DWORD currentUserEntry; diff --git a/ldap/servers/ntds/wrapper/wix/ntds.wxs b/ldap/servers/ntds/wrapper/wix/ntds.wxs index f6938cc6f..8075ee704 100644 --- a/ldap/servers/ntds/wrapper/wix/ntds.wxs +++ b/ldap/servers/ntds/wrapper/wix/ntds.wxs @@ -101,5 +101,1089 @@ <ComponentRef Id='logs'/> </Feature> + <UI> + <Property Id="DefaultUIFont">DlgFont8</Property> + <Property Id="ErrorDialog">ErrorDlg</Property> + + <Dialog Id="VerifyReadyDlg" Width="370" Height="270" Title="[ProductName] [Setup]" NoMinimize="yes" TrackDiskSpace="yes"> + <Control Id="Install" Type="PushButton" X="236" Y="243" Width="56" Height="17" Default="yes" Text="[ButtonText_Install]"> + <Publish Event="EndDialog" Value="Return"><![CDATA[OutOfDiskSpace <> 1]]></Publish> + <Publish Event="SpawnDialog" Value="OutOfRbDiskDlg"><![CDATA[OutOfDiskSpace = 1 AND OutOfNoRbDiskSpace = 0 AND (PROMPTROLLBACKCOST="P" OR NOT PROMPTROLLBACKCOST)]]></Publish> + <Publish Event="EndDialog" Value="Return"><![CDATA[OutOfDiskSpace = 1 AND OutOfNoRbDiskSpace = 0 AND PROMPTROLLBACKCOST="D"]]></Publish> + <Publish Event="EnableRollback" Value="False"><![CDATA[OutOfDiskSpace = 1 AND OutOfNoRbDiskSpace = 0 AND PROMPTROLLBACKCOST="D"]]></Publish> + <Publish Event="SpawnDialog" Value="OutOfDiskDlg"><![CDATA[(OutOfDiskSpace = 1 AND OutOfNoRbDiskSpace = 1) OR (OutOfDiskSpace = 1 AND PROMPTROLLBACKCOST="F")]]></Publish> + </Control> + <Control Id="Cancel" Type="PushButton" X="304" Y="243" Width="56" Height="17" Cancel="yes" Text="[ButtonText_Cancel]"> + <Publish Event="SpawnDialog" Value="CancelDlg">1</Publish> + </Control> + <Control Id="BannerBitmap" Type="Bitmap" X="0" Y="0" Width="370" Height="44" TabSkip="no" Text="[BannerBitmap]" /> + <Control Id="Back" Type="PushButton" X="180" Y="243" Width="56" Height="17" Text="[ButtonText_Back]"> + <Publish Event="NewDialog" Value="CancelDlg">1</Publish> + </Control> + <Control Id="Description" Type="Text" X="25" Y="23" Width="280" Height="15" Transparent="yes" NoPrefix="yes"> + <Text>The [Wizard] is ready to begin the [InstallMode] installation</Text> + </Control> + <Control Id="Text" Type="Text" X="25" Y="70" Width="320" Height="20"> + <Text>Click Install to begin the installation. If you want to review or change any of your installation settings, click Back. Click Cancel to exit the wizard.</Text> + </Control> + <Control Id="BottomLine" Type="Line" X="0" Y="234" Width="370" Height="0" /> + <Control Id="Title" Type="Text" X="15" Y="6" Width="200" Height="15" Transparent="yes" NoPrefix="yes"> + <Text>[DlgTitleFont]Ready to Install</Text> + </Control> + <Control Id="BannerLine" Type="Line" X="0" Y="44" Width="370" Height="0" /> + </Dialog> + + + <Dialog Id="AdminWelcomeDlg" Width="370" Height="270" Title="[ProductName] [Setup]" NoMinimize="yes"> + <Control Id="Next" Type="PushButton" X="236" Y="243" Width="56" Height="17" Default="yes" Text="[ButtonText_Next]"> + <Publish Property="InstallMode" Value="Server Image">1</Publish> + <Publish Event="NewDialog" Value="AdminRegistrationDlg">1</Publish> + </Control> + <Control Id="Cancel" Type="PushButton" X="304" Y="243" Width="56" Height="17" Cancel="yes" Text="[ButtonText_Cancel]"> + <Publish Event="SpawnDialog" Value="CancelDlg">1</Publish> + </Control> + <Control Id="Bitmap" Type="Bitmap" X="0" Y="0" Width="370" Height="234" TabSkip="no" Text="[DialogBitmap]" /> + <Control Id="Back" Type="PushButton" X="180" Y="243" Width="56" Height="17" Disabled="yes" Text="[ButtonText_Back]" /> + <Control Id="Description" Type="Text" X="135" Y="70" Width="220" Height="30" Transparent="yes" NoPrefix="yes"> + <Text>The [Wizard] will create a server image of [ProductName], at a specified network location. Click Next to continue or Cancel to exit the [Wizard].</Text> + </Control> + <Control Id="BottomLine" Type="Line" X="0" Y="234" Width="370" Height="0" /> + <Control Id="Title" Type="Text" X="135" Y="20" Width="220" Height="60" Transparent="yes" NoPrefix="yes"> + <Text>{\VerdanaBold13}Welcome to the [ProductName] [Wizard]</Text> + </Control> + </Dialog> + + <Dialog Id="PrepareDlg" Width="370" Height="270" Title="[ProductName] [Setup]" Modeless="yes"> + <Control Id="Cancel" Type="PushButton" X="304" Y="243" Width="56" Height="17" Default="yes" Cancel="yes" Text="[ButtonText_Cancel]"> + <Publish Event="SpawnDialog" Value="CancelDlg">1</Publish> + </Control> + <Control Id="Bitmap" Type="Bitmap" X="0" Y="0" Width="370" Height="234" TabSkip="no" Text="[DialogBitmap]" /> + <Control Id="ActionText" Type="Text" X="135" Y="100" Width="220" Height="20" Transparent="yes" NoPrefix="yes"> + <Subscribe Event="ActionText" Attribute="Text" /> + </Control> + <Control Id="Description" Type="Text" X="135" Y="70" Width="220" Height="20" Transparent="yes" NoPrefix="yes"> + <Text>Please wait while the [Wizard] prepares to guide you through the installation.</Text> + </Control> + <Control Id="Next" Type="PushButton" X="236" Y="243" Width="56" Height="17" Disabled="yes" TabSkip="yes" Text="[ButtonText_Next]" /> + <Control Id="Back" Type="PushButton" X="180" Y="243" Width="56" Height="17" Disabled="yes" TabSkip="yes" Text="[ButtonText_Back]" /> + <Control Id="BottomLine" Type="Line" X="0" Y="234" Width="370" Height="0" /> + <Control Id="Title" Type="Text" X="135" Y="20" Width="220" Height="60" Transparent="yes" NoPrefix="yes"> + <Text>{\VerdanaBold13}Welcome to the [ProductName] [Wizard]</Text> + </Control> + <Control Id="ActionData" Type="Text" X="135" Y="125" Width="220" Height="30" Transparent="yes" NoPrefix="yes"> + <Subscribe Event="ActionData" Attribute="Text" /> + </Control> + </Dialog> + + <Dialog Id="ProgressDlg" Width="370" Height="270" Title="[ProductName] [Setup]" Modeless="yes"> + <Control Id="Cancel" Type="PushButton" X="304" Y="243" Width="56" Height="17" Default="yes" Cancel="yes" Text="[ButtonText_Cancel]"> + <Publish Event="SpawnDialog" Value="CancelDlg">1</Publish> + </Control> + <Control Id="BannerBitmap" Type="Bitmap" X="0" Y="0" Width="370" Height="44" TabSkip="no" Text="[BannerBitmap]" /> + <Control Id="Back" Type="PushButton" X="180" Y="243" Width="56" Height="17" Disabled="yes" Text="[ButtonText_Back]" /> + <Control Id="Next" Type="PushButton" X="236" Y="243" Width="56" Height="17" Disabled="yes" Text="[ButtonText_Next]" /> + <Control Id="ActionText" Type="Text" X="70" Y="100" Width="265" Height="10"> + <Subscribe Event="ActionText" Attribute="Text" /> + </Control> + <Control Id="Text" Type="Text" X="35" Y="65" Width="300" Height="20"> + <Text>Please wait while the [Wizard] [Progress2] [ProductName]. This may take several minutes.</Text> + </Control> + <Control Id="BottomLine" Type="Line" X="0" Y="234" Width="370" Height="0" /> + <Control Id="Title" Type="Text" X="20" Y="15" Width="200" Height="15" Transparent="yes" NoPrefix="yes"> + <Text>[DlgTitleFont][Progress1] [ProductName]</Text> + </Control> + <Control Id="BannerLine" Type="Line" X="0" Y="44" Width="370" Height="0" /> + <Control Id="ProgressBar" Type="ProgressBar" X="35" Y="115" Width="300" Height="10" ProgressBlocks="yes" Text="Progress done"> + <Subscribe Event="SetProgress" Attribute="Progress" /> + </Control> + <Control Id="StatusLabel" Type="Text" X="35" Y="100" Width="35" Height="10" Text="Status:" /> + </Dialog> + + <Dialog Id="UserExit" Width="370" Height="270" Title="[ProductName] [Setup]" NoMinimize="yes"> + <Control Id="Finish" Type="PushButton" X="236" Y="243" Width="56" Height="17" Default="yes" Cancel="yes" Text="[ButtonText_Finish]"> + <Publish Event="EndDialog" Value="Exit">1</Publish> + </Control> + <Control Id="Cancel" Type="PushButton" X="304" Y="243" Width="56" Height="17" Disabled="yes" Text="[ButtonText_Cancel]" /> + <Control Id="Bitmap" Type="Bitmap" X="0" Y="0" Width="370" Height="234" TabSkip="no" Text="[DialogBitmap]" /> + <Control Id="Back" Type="PushButton" X="180" Y="243" Width="56" Height="17" Disabled="yes" Text="[ButtonText_Back]" /> + <Control Id="BottomLine" Type="Line" X="0" Y="234" Width="370" Height="0" /> + <Control Id="Title" Type="Text" X="135" Y="20" Width="220" Height="60" Transparent="yes" NoPrefix="yes"> + <Text>{\VerdanaBold13}[ProductName] [Wizard] was interrupted</Text> + </Control> + <Control Id="Description1" Type="Text" X="135" Y="70" Width="220" Height="40" Transparent="yes" NoPrefix="yes"> + <Text>[ProductName] setup was interrupted. Your system has not been modified. To install this program at a later time, please run the installation again.</Text> + </Control> + <Control Id="Description2" Type="Text" X="135" Y="115" Width="220" Height="20" Transparent="yes" NoPrefix="yes"> + <Text>Click the Finish button to exit the [Wizard].</Text> + </Control> + </Dialog> + + <Dialog Id="FatalError" Width="370" Height="270" Title="[ProductName] [Setup]" NoMinimize="yes"> + <Control Id="Finish" Type="PushButton" X="236" Y="243" Width="56" Height="17" Default="yes" Cancel="yes" Text="[ButtonText_Finish]"> + <Publish Event="EndDialog" Value="Exit">1</Publish> + </Control> + <Control Id="Cancel" Type="PushButton" X="304" Y="243" Width="56" Height="17" Disabled="yes" Text="[ButtonText_Cancel]" /> + <Control Id="Bitmap" Type="Bitmap" X="0" Y="0" Width="370" Height="234" TabSkip="no" Text="[DialogBitmap]" /> + <Control Id="Back" Type="PushButton" X="180" Y="243" Width="56" Height="17" Disabled="yes" Text="[ButtonText_Back]" /> + <Control Id="BottomLine" Type="Line" X="0" Y="234" Width="370" Height="0" /> + <Control Id="Title" Type="Text" X="135" Y="20" Width="220" Height="60" Transparent="yes" NoPrefix="yes"> + <Text>{\VerdanaBold13}[ProductName] [Wizard] ended prematurely</Text> + </Control> + <Control Id="Description1" Type="Text" X="135" Y="70" Width="220" Height="40" Transparent="yes" NoPrefix="yes"> + <Text>[ProductName] setup ended prematurely because of an error. Your system has not been modified. To install this program at a later time, please run the installation again.</Text> + </Control> + <Control Id="Description2" Type="Text" X="135" Y="115" Width="220" Height="20" Transparent="yes" NoPrefix="yes"> + <Text>Click the Finish button to exit the [Wizard].</Text> + </Control> + </Dialog> + + <Dialog Id="ExitDialog" Width="370" Height="270" Title="[ProductName] [Setup]" NoMinimize="yes"> + <Control Id="Finish" Type="PushButton" X="236" Y="243" Width="56" Height="17" Default="yes" Cancel="yes" Text="[ButtonText_Finish]"> + <Publish Event="EndDialog" Value="Return">1</Publish> + </Control> + <Control Id="Cancel" Type="PushButton" X="304" Y="243" Width="56" Height="17" Disabled="yes" Text="[ButtonText_Cancel]" /> + <Control Id="Bitmap" Type="Bitmap" X="0" Y="0" Width="370" Height="234" TabSkip="no" Text="[DialogBitmap]" /> + <Control Id="Back" Type="PushButton" X="180" Y="243" Width="56" Height="17" Disabled="yes" Text="[ButtonText_Back]" /> + <Control Id="Description" Type="Text" X="135" Y="70" Width="220" Height="20" Transparent="yes" NoPrefix="yes"> + <Text>Click the Finish button to exit the [Wizard].</Text> + </Control> + <Control Id="BottomLine" Type="Line" X="0" Y="234" Width="370" Height="0" /> + <Control Id="Title" Type="Text" X="135" Y="20" Width="220" Height="60" Transparent="yes" NoPrefix="yes"> + <Text>{\VerdanaBold13}Completing the [ProductName] [Wizard]</Text> + </Control> + </Dialog> + + <Dialog Id="AdminBrowseDlg" Width="370" Height="270" Title="[ProductName] [Setup]" NoMinimize="yes"> + <Control Id="PathEdit" Type="PathEdit" X="84" Y="202" Width="261" Height="17" Property="TARGETDIR" /> + <Control Id="OK" Type="PushButton" X="304" Y="243" Width="56" Height="17" Default="yes" Text="[ButtonText_OK]"> + <Publish Event="SetTargetPath" Value="TARGETDIR">1</Publish> + <Publish Event="EndDialog" Value="Return">1</Publish> + </Control> + <Control Id="Cancel" Type="PushButton" X="240" Y="243" Width="56" Height="17" Cancel="yes" Text="[ButtonText_Cancel]"> + <Publish Event="Reset" Value="0">1</Publish> + <Publish Event="EndDialog" Value="Return">1</Publish> + </Control> + <Control Id="ComboLabel" Type="Text" X="25" Y="58" Width="44" Height="10" TabSkip="no" Text="&amp;Look in:" /> + <Control Id="DirectoryCombo" Type="DirectoryCombo" X="70" Y="55" Width="220" Height="80" Property="TARGETDIR" Removable="yes" Fixed="yes" Remote="yes"> + <Subscribe Event="IgnoreChange" Attribute="IgnoreChange" /> + </Control> + <Control Id="Up" Type="PushButton" X="298" Y="55" Width="19" Height="19" ToolTip="Up One Level" Icon="no" FixedSize="yes" IconSize="16" Text="Up"> + <Publish Event="DirectoryListUp" Value="0">1</Publish> + </Control> + <Control Id="NewFolder" Type="PushButton" X="325" Y="55" Width="19" Height="19" ToolTip="Create A New Folder" Icon="no" FixedSize="yes" IconSize="16" Text="New"> + <Publish Event="DirectoryListNew" Value="0">1</Publish> + </Control> + <Control Id="DirectoryList" Type="DirectoryList" X="25" Y="83" Width="320" Height="110" Property="TARGETDIR" Sunken="yes" TabSkip="no" /> + <Control Id="PathLabel" Type="Text" X="25" Y="205" Width="59" Height="10" TabSkip="no" Text="&amp;Folder name:" /> + <Control Id="BannerBitmap" Type="Bitmap" X="0" Y="0" Width="370" Height="44" TabSkip="no" Text="[BannerBitmap]" /> + <Control Id="Description" Type="Text" X="25" Y="23" Width="280" Height="15" Transparent="yes" NoPrefix="yes"> + <Text>Browse to the destination folder</Text> + </Control> + <Control Id="BottomLine" Type="Line" X="0" Y="234" Width="370" Height="0" /> + <Control Id="Title" Type="Text" X="15" Y="6" Width="200" Height="15" Transparent="yes" NoPrefix="yes"> + <Text>[DlgTitleFont]Change current destination folder</Text> + </Control> + <Control Id="BannerLine" Type="Line" X="0" Y="44" Width="370" Height="0" /> + </Dialog> + + <Dialog Id="AdminInstallPointDlg" Width="370" Height="270" Title="[ProductName] [Setup]" NoMinimize="yes"> + <Control Id="Text" Type="Text" X="25" Y="80" Width="320" Height="10" TabSkip="no"> + <Text>&amp;Enter a new network location or click Browse to browse to one.</Text> + </Control> + <Control Id="PathEdit" Type="PathEdit" X="25" Y="93" Width="320" Height="18" Property="TARGETDIR" /> + <Control Id="Browse" Type="PushButton" X="289" Y="119" Width="56" Height="17" Text="[ButtonText_Browse]"> + <Publish Event="SpawnDialog" Value="AdminBrowseDlg">1</Publish> + </Control> + <Control Id="Back" Type="PushButton" X="180" Y="243" Width="56" Height="17" Text="[ButtonText_Back]"> + <Publish Event="NewDialog" Value="AdminRegistrationDlg">1</Publish> + </Control> + <Control Id="Next" Type="PushButton" X="236" Y="243" Width="56" Height="17" Default="yes" Text="[ButtonText_Next]"> + <Publish Event="SetTargetPath" Value="TARGETDIR">1</Publish> + <Publish Event="NewDialog" Value="VerifyReadyDlg">1</Publish> + </Control> + <Control Id="Cancel" Type="PushButton" X="304" Y="243" Width="56" Height="17" Cancel="yes" Text="[ButtonText_Cancel]"> + <Publish Event="SpawnDialog" Value="CancelDlg">1</Publish> + </Control> + <Control Id="BannerBitmap" Type="Bitmap" X="0" Y="0" Width="370" Height="44" TabSkip="no" Text="[BannerBitmap]" /> + <Control Id="Description" Type="Text" X="25" Y="20" Width="280" Height="20" Transparent="yes" NoPrefix="yes"> + <Text>Please specify a network location for the server image of [ProductName] product</Text> + </Control> + <Control Id="BottomLine" Type="Line" X="0" Y="234" Width="370" Height="0" /> + <Control Id="Title" Type="Text" X="15" Y="6" Width="200" Height="15" Transparent="yes" NoPrefix="yes"> + <Text>[DlgTitleFont]Network Location</Text> + </Control> + <Control Id="BannerLine" Type="Line" X="0" Y="44" Width="370" Height="0" /> + </Dialog> + + <Dialog Id="AdminRegistrationDlg" Width="370" Height="270" Title="[ProductName] [Setup]" NoMinimize="yes"> + <Control Id="OrganizationLabel" Type="Text" X="45" Y="71" Width="285" Height="30" TabSkip="no"> + <Text>&amp;Please enter the name of your organization in the box below. This will be used as default company name for subsequent installations of [ProductName]:</Text> + </Control> + <Control Id="OrganizationEdit" Type="Edit" X="45" Y="105" Width="220" Height="18" Property="COMPANYNAME" Text="{80}" /> + <Control Id="CDKeyLabel" Type="Text" X="45" Y="130" Width="50" Height="10" TabSkip="no"> + <Text>CD &amp;Key:</Text> + </Control> + <Control Id="CDKeyEdit" Type="MaskedEdit" X="45" Y="143" Width="250" Height="16" Property="PIDKEY" Text="[PortTemplate]" /> + <Control Id="Back" Type="PushButton" X="180" Y="243" Width="56" Height="17" Image="yes" Text="[ButtonText_Back]"> + <Publish Event="NewDialog" Value="AdminWelcomeDlg">1</Publish> + </Control> + <Control Id="Next" Type="PushButton" X="236" Y="243" Width="56" Height="17" Default="yes" Text="[ButtonText_Next]"> + <Publish Event="ValidateProductID" Value="0">0</Publish> + <Publish Event="NewDialog" Value="AdminInstallPointDlg">ProductID</Publish> + </Control> + <Control Id="Cancel" Type="PushButton" X="304" Y="243" Width="56" Height="17" Cancel="yes" Text="[ButtonText_Cancel]"> + <Publish Event="SpawnDialog" Value="CancelDlg">1</Publish> + </Control> + <Control Id="BannerBitmap" Type="Bitmap" X="0" Y="0" Width="370" Height="44" TabSkip="no" Text="[BannerBitmap]" /> + <Control Id="Description" Type="Text" X="25" Y="23" Width="280" Height="15" Transparent="yes" NoPrefix="yes"> + <Text>Please enter your company information</Text> + </Control> + <Control Id="BottomLine" Type="Line" X="0" Y="234" Width="370" Height="0" /> + <Control Id="Title" Type="Text" X="15" Y="6" Width="200" Height="15" Transparent="yes" NoPrefix="yes"> + <Text>[DlgTitleFont]Company Information</Text> + </Control> + <Control Id="BannerLine" Type="Line" X="0" Y="44" Width="370" Height="0" /> + </Dialog> + + <Dialog Id="BrowseDlg" Width="370" Height="270" Title="[ProductName] [Setup]" NoMinimize="yes"> + <Control Id="PathEdit" Type="PathEdit" X="84" Y="202" Width="261" Height="18" Property="_BrowseProperty" Indirect="yes" /> + <Control Id="OK" Type="PushButton" X="304" Y="243" Width="56" Height="17" Default="yes" Text="[ButtonText_OK]"> + <Publish Event="SetTargetPath" Value="[_BrowseProperty]">1</Publish> + <Publish Event="EndDialog" Value="Return">1</Publish> + </Control> + <Control Id="Cancel" Type="PushButton" X="240" Y="243" Width="56" Height="17" Cancel="yes" Text="[ButtonText_Cancel]"> + <Publish Event="Reset" Value="0">1</Publish> + <Publish Event="EndDialog" Value="Return">1</Publish> + </Control> + <Control Id="ComboLabel" Type="Text" X="25" Y="58" Width="44" Height="10" TabSkip="no" Text="&amp;Look in:" /> + <Control Id="DirectoryCombo" Type="DirectoryCombo" X="70" Y="55" Width="220" Height="80" Property="_BrowseProperty" Indirect="yes" Fixed="yes" Remote="yes"> + <Subscribe Event="IgnoreChange" Attribute="IgnoreChange" /> + </Control> + <Control Id="Up" Type="PushButton" X="298" Y="55" Width="19" Height="19" ToolTip="Up One Level" Icon="no" FixedSize="yes" IconSize="16" Text="Up"> + <Publish Event="DirectoryListUp" Value="0">1</Publish> + </Control> + <Control Id="NewFolder" Type="PushButton" X="325" Y="55" Width="19" Height="19" ToolTip="Create A New Folder" Icon="no" FixedSize="yes" IconSize="16" Text="New"> + <Publish Event="DirectoryListNew" Value="0">1</Publish> + </Control> + <Control Id="DirectoryList" Type="DirectoryList" X="25" Y="83" Width="320" Height="110" Property="_BrowseProperty" Sunken="yes" Indirect="yes" TabSkip="no" /> + <Control Id="PathLabel" Type="Text" X="25" Y="205" Width="59" Height="10" TabSkip="no" Text="&amp;Folder name:" /> + <Control Id="BannerBitmap" Type="Bitmap" X="0" Y="0" Width="370" Height="44" TabSkip="no" Text="[BannerBitmap]" /> + <Control Id="Description" Type="Text" X="25" Y="23" Width="280" Height="15" Transparent="yes" NoPrefix="yes"> + <Text>Browse to the destination folder</Text> + </Control> + <Control Id="BottomLine" Type="Line" X="0" Y="234" Width="370" Height="0" /> + <Control Id="Title" Type="Text" X="15" Y="6" Width="200" Height="15" Transparent="yes" NoPrefix="yes"> + <Text>[DlgTitleFont]Change current destination folder</Text> + </Control> + <Control Id="BannerLine" Type="Line" X="0" Y="44" Width="370" Height="0" /> + </Dialog> + + <Dialog Id="CancelDlg" Width="260" Height="85" Title="[ProductName] [Setup]" NoMinimize="yes"> + <Control Id="No" Type="PushButton" X="132" Y="57" Width="56" Height="17" Default="yes" Cancel="yes" Text="[ButtonText_No]"> + <Publish Event="EndDialog" Value="Return">1</Publish> + </Control> + <Control Id="Yes" Type="PushButton" X="72" Y="57" Width="56" Height="17" Text="[ButtonText_Yes]"> + <Publish Event="EndDialog" Value="Exit">1</Publish> + </Control> + <Control Id="Text" Type="Text" X="48" Y="15" Width="194" Height="30"> + <Text>Are you sure you want to cancel [ProductName] installation?</Text> + </Control> + <Control Id="Icon" Type="Icon" X="15" Y="15" Width="24" Height="24" ToolTip="Information icon" FixedSize="yes" IconSize="32" Text="[InfoIcon]" /> + </Dialog> + + <Dialog Id="CustomizeDlg" Width="370" Height="270" Title="[ProductName] [Setup]" NoMinimize="yes" TrackDiskSpace="yes"> + <Control Id="Tree" Type="SelectionTree" X="25" Y="85" Width="175" Height="95" Property="_BrowseProperty" Sunken="yes" TabSkip="no" Text="Tree of selections" /> + <Control Id="Browse" Type="PushButton" X="304" Y="200" Width="56" Height="17" Text="[ButtonText_Browse]"> + <Publish Event="SelectionBrowse" Value="BrowseDlg">1</Publish> + <Condition Action="hide">Installed</Condition> + </Control> + <Control Id="Reset" Type="PushButton" X="42" Y="243" Width="56" Height="17" Text="[ButtonText_Reset]"> + <Publish Event="Reset" Value="0">1</Publish> + <Subscribe Event="SelectionNoItems" Attribute="Enabled" /> + </Control> + <Control Id="DiskCost" Type="PushButton" X="111" Y="243" Width="56" Height="17"> + <Text>Disk &amp;Usage</Text> + <Publish Event="SpawnDialog" Value="DiskCostDlg">1</Publish> + <Subscribe Event="SelectionNoItems" Attribute="Enabled" /> + </Control> + <Control Id="Back" Type="PushButton" X="180" Y="243" Width="56" Height="17" Text="[ButtonText_Back]"> + <Publish Event="NewDialog" Value="MaintenanceTypeDlg"><![CDATA[InstallMode = "Change"]]></Publish> + <Publish Event="NewDialog" Value="SetupTypeDlg"><![CDATA[InstallMode = "Custom"]]></Publish> + </Control> + <Control Id="Next" Type="PushButton" X="236" Y="243" Width="56" Height="17" Default="yes" Text="[ButtonText_Next]"> + <Publish Event="NewDialog" Value="VerifyReadyDlg">1</Publish> + <Subscribe Event="SelectionNoItems" Attribute="Enabled" /> + </Control> + <Control Id="Cancel" Type="PushButton" X="304" Y="243" Width="56" Height="17" Cancel="yes" Text="[ButtonText_Cancel]"> + <Publish Event="SpawnDialog" Value="CancelDlg">1</Publish> + </Control> + <Control Id="BannerBitmap" Type="Bitmap" X="0" Y="0" Width="370" Height="44" TabSkip="no" Text="[BannerBitmap]" /> + <Control Id="Description" Type="Text" X="25" Y="23" Width="280" Height="15" Transparent="yes" NoPrefix="yes"> + <Text>Select the way you want features to be installed.</Text> + </Control> + <Control Id="Text" Type="Text" X="25" Y="55" Width="320" Height="20"> + <Text>Click on the icons in the tree below to change the way features will be installed.</Text> + </Control> + <Control Id="BottomLine" Type="Line" X="0" Y="234" Width="370" Height="0" /> + <Control Id="Title" Type="Text" X="15" Y="6" Width="200" Height="15" Transparent="yes" NoPrefix="yes"> + <Text>[DlgTitleFont]Custom Setup</Text> + </Control> + <Control Id="BannerLine" Type="Line" X="0" Y="44" Width="370" Height="0" /> + <Control Id="Box" Type="GroupBox" X="210" Y="81" Width="140" Height="98" /> + <Control Id="ItemDescription" Type="Text" X="215" Y="90" Width="131" Height="30"> + <Text>Multiline description of the currently selected item.</Text> + <Subscribe Event="SelectionDescription" Attribute="Text" /> + </Control> + <Control Id="ItemSize" Type="Text" X="215" Y="130" Width="131" Height="45"> + <Text>The size of the currently selected item.</Text> + <Subscribe Event="SelectionSize" Attribute="Text" /> + </Control> + <Control Id="Location" Type="Text" X="75" Y="200" Width="215" Height="20"> + <Text>&lt;The selection's path&gt;</Text> + <Subscribe Event="SelectionPath" Attribute="Text" /> + <Subscribe Event="SelectionPathOn" Attribute="Visible" /> + <Condition Action="hide">Installed</Condition> + </Control> + <Control Id="LocationLabel" Type="Text" X="25" Y="200" Width="50" Height="10" Text="Location:"> + <Subscribe Event="SelectionPathOn" Attribute="Visible" /> + <Condition Action="hide">Installed</Condition> + </Control> + </Dialog> + + <Dialog Id="DiskCostDlg" Width="370" Height="270" Title="[ProductName] [Setup]" NoMinimize="yes"> + <Control Id="OK" Type="PushButton" X="304" Y="243" Width="56" Height="17" Default="yes" Cancel="yes" Text="[ButtonText_OK]"> + <Publish Event="EndDialog" Value="Return">1</Publish> + </Control> + <Control Id="BannerBitmap" Type="Bitmap" X="0" Y="0" Width="370" Height="44" TabSkip="no" Text="[BannerBitmap]" /> + <Control Id="Description" Type="Text" X="20" Y="20" Width="280" Height="20" Transparent="yes" NoPrefix="yes"> + <Text>The disk space required for the installation of the selected features.</Text> + </Control> + <Control Id="Text" Type="Text" X="20" Y="53" Width="330" Height="40"> + <Text>The highlighted volumes (if any) do not have enough disk space available for the currently selected features. You can either remove some files from the highlighted volumes, or choose to install less features onto local drive(s), or select different destination drive(s).</Text> + </Control> + <Control Id="BottomLine" Type="Line" X="0" Y="234" Width="370" Height="0" /> + <Control Id="Title" Type="Text" X="15" Y="6" Width="200" Height="15" Transparent="yes" NoPrefix="yes"> + <Text>[DlgTitleFont]Disk Space Requirements</Text> + </Control> + <Control Id="BannerLine" Type="Line" X="0" Y="44" Width="370" Height="0" /> + <Control Id="VolumeList" Type="VolumeCostList" X="20" Y="100" Width="330" Height="120" Sunken="yes" Fixed="yes" Remote="yes"> + <Text>{120}{70}{70}{70}{70}</Text> + </Control> + </Dialog> + + <Dialog Id="ErrorDlg" Width="270" Height="105" Title="Installer Information" ErrorDialog="yes" NoMinimize="yes"> + <Control Id="ErrorText" Type="Text" X="48" Y="15" Width="205" Height="60" TabSkip="no" Text="Information text" /> + <Control Id="Y" Type="PushButton" X="100" Y="80" Width="56" Height="17" TabSkip="yes" Text="[ButtonText_Yes]"> + <Publish Event="EndDialog" Value="ErrorYes">1</Publish> + </Control> + <Control Id="A" Type="PushButton" X="100" Y="80" Width="56" Height="17" TabSkip="yes" Text="[ButtonText_Cancel]"> + <Publish Event="EndDialog" Value="ErrorAbort">1</Publish> + </Control> + <Control Id="C" Type="PushButton" X="100" Y="80" Width="56" Height="17" TabSkip="yes" Text="[ButtonText_Cancel]"> + <Publish Event="EndDialog" Value="ErrorCancel">1</Publish> + </Control> + <Control Id="ErrorIcon" Type="Icon" X="15" Y="15" Width="24" Height="24" ToolTip="Information icon" FixedSize="yes" IconSize="32" Text="[InfoIcon]" /> + <Control Id="I" Type="PushButton" X="100" Y="80" Width="56" Height="17" TabSkip="yes" Text="[ButtonText_Ignore]"> + <Publish Event="EndDialog" Value="ErrorIgnore">1</Publish> + </Control> + <Control Id="N" Type="PushButton" X="100" Y="80" Width="56" Height="17" TabSkip="yes" Text="[ButtonText_No]"> + <Publish Event="EndDialog" Value="ErrorNo">1</Publish> + </Control> + <Control Id="O" Type="PushButton" X="100" Y="80" Width="56" Height="17" TabSkip="yes" Text="[ButtonText_OK]"> + <Publish Event="EndDialog" Value="ErrorOk">1</Publish> + </Control> + <Control Id="R" Type="PushButton" X="100" Y="80" Width="56" Height="17" TabSkip="yes" Text="[ButtonText_Retry]"> + <Publish Event="EndDialog" Value="ErrorRetry">1</Publish> + </Control> + </Dialog> + + + + <Dialog Id="FilesInUse" Width="370" Height="270" Title="[ProductName] [Setup]" NoMinimize="yes" KeepModeless="yes"> + <Control Id="Retry" Type="PushButton" X="304" Y="243" Width="56" Height="17" Default="yes" Cancel="yes" Text="[ButtonText_Retry]"> + <Publish Event="EndDialog" Value="Retry">1</Publish> + </Control> + <Control Id="Ignore" Type="PushButton" X="235" Y="243" Width="56" Height="17" Text="[ButtonText_Ignore]"> + <Publish Event="EndDialog" Value="Ignore">1</Publish> + </Control> + <Control Id="Exit" Type="PushButton" X="166" Y="243" Width="56" Height="17" Text="[ButtonText_Exit]"> + <Publish Event="EndDialog" Value="Exit">1</Publish> + </Control> + <Control Id="BannerBitmap" Type="Bitmap" X="0" Y="0" Width="370" Height="44" TabSkip="no" Text="[BannerBitmap]" /> + <Control Id="Description" Type="Text" X="20" Y="23" Width="280" Height="20" Transparent="yes" NoPrefix="yes"> + <Text>Some files that need to be updated are currently in use.</Text> + </Control> + <Control Id="Text" Type="Text" X="20" Y="55" Width="330" Height="30"> + <Text>The following applications are using files that need to be updated by this setup. Close these applications and then click Retry to continue the installation or Cancel to exit it.</Text> + </Control> + <Control Id="BottomLine" Type="Line" X="0" Y="234" Width="370" Height="0" /> + <Control Id="Title" Type="Text" X="15" Y="6" Width="200" Height="15" Transparent="yes" NoPrefix="yes"> + <Text>[DlgTitleFont]Files in Use</Text> + </Control> + <Control Id="BannerLine" Type="Line" X="0" Y="44" Width="370" Height="0" /> + <Control Id="List" Type="ListBox" X="20" Y="87" Width="330" Height="130" Property="FileInUseProcess" Sunken="yes" TabSkip="yes" /> + </Dialog> + + <Dialog Id="LicenseAgreementDlg" Width="370" Height="270" Title="[ProductName] License Agreement" NoMinimize="yes"> + <Control Id="Buttons" Type="RadioButtonGroup" X="20" Y="187" Width="330" Height="40" Property="IAgree" /> + <Control Id="Back" Type="PushButton" X="180" Y="243" Width="56" Height="17" Text="[ButtonText_Back]"> + <Publish Event="NewDialog" Value="WelcomeDlg">1</Publish> + </Control> + <Control Id="Next" Type="PushButton" X="236" Y="243" Width="56" Height="17" Default="yes" Text="[ButtonText_Next]"> + <Publish Event="NewDialog" Value="VerifyReadyDlg"><![CDATA[IAgree = "Yes" AND ShowUserRegistrationDlg = 1]]></Publish> + <Publish Event="SpawnWaitDialog" Value="WaitForCostingDlg">CostingComplete = 1</Publish> + <Publish Event="NewDialog" Value="SetupTypeDlg"><![CDATA[IAgree = "Yes" AND ShowUserRegistrationDlg <> 1]]></Publish> + <Condition Action="disable"><![CDATA[IAgree <> "Yes"]]></Condition> + <Condition Action="enable">IAgree = "Yes"</Condition> + </Control> + <Control Id="Cancel" Type="PushButton" X="304" Y="243" Width="56" Height="17" Cancel="yes" Text="[ButtonText_Cancel]"> + <Publish Event="SpawnDialog" Value="CancelDlg">1</Publish> + </Control> + <Control Id="BannerBitmap" Type="Bitmap" X="0" Y="0" Width="370" Height="44" TabSkip="no" Text="[BannerBitmap]" /> +<!-- <Control Id="AgreementText" Type="ScrollableText" X="20" Y="60" Width="330" Height="120" Sunken="yes" TabSkip="no"> + <Text src="Binary/License.rtf" /> + </Control> --> + <Control Id="Description" Type="Text" X="25" Y="23" Width="280" Height="15" Transparent="yes" NoPrefix="yes"> + <Text>Please read the following license agreement carefully</Text> + </Control> + <Control Id="BottomLine" Type="Line" X="0" Y="234" Width="370" Height="0" /> + <Control Id="Title" Type="Text" X="15" Y="6" Width="200" Height="15" Transparent="yes" NoPrefix="yes"> + <Text>[DlgTitleFont]End-User License Agreement</Text> + </Control> + <Control Id="BannerLine" Type="Line" X="0" Y="44" Width="370" Height="0" /> + </Dialog> + + <Dialog Id="MaintenanceTypeDlg" Width="370" Height="270" Title="[ProductName] [Setup]" NoMinimize="yes"> + <Control Id="RepairLabel" Type="Text" X="105" Y="114" Width="100" Height="10" TabSkip="no"> + <Text>[DlgTitleFont]Re&amp;pair</Text> + </Control> + <Control Id="RepairButton" Type="PushButton" X="50" Y="114" Width="38" Height="38" ToolTip="Repair Installation" Icon="yes" FixedSize="yes" IconSize="32" Text="[RepairIcon]"> + <Publish Property="InstallMode" Value="Repair">1</Publish> + <Publish Property="Progress1" Value="Repairing">1</Publish> + <Publish Property="Progress2" Value="repaires">1</Publish> + <Publish Event="NewDialog" Value="VerifyRepairDlg">1</Publish> + </Control> + <Control Id="RemoveLabel" Type="Text" X="105" Y="163" Width="100" Height="10" TabSkip="no"> + <Text>[DlgTitleFont]&amp;Remove</Text> + </Control> + <Control Id="RemoveButton" Type="PushButton" X="50" Y="163" Width="38" Height="38" ToolTip="Remove Installation" Icon="yes" FixedSize="yes" IconSize="32" Text="[RemoveIcon]"> + <Publish Property="InstallMode" Value="Remove">1</Publish> + <Publish Property="Progress1" Value="Removing">1</Publish> + <Publish Property="Progress2" Value="removes">1</Publish> + <Publish Event="NewDialog" Value="VerifyRemoveDlg">1</Publish> + </Control> + <Control Id="Back" Type="PushButton" X="180" Y="243" Width="56" Height="17" Text="[ButtonText_Back]"> + <Publish Event="NewDialog" Value="MaintenanceWelcomeDlg">1</Publish> + </Control> + <Control Id="Next" Type="PushButton" X="236" Y="243" Width="56" Height="17" Disabled="yes" Text="[ButtonText_Next]" /> + <Control Id="Cancel" Type="PushButton" X="304" Y="243" Width="56" Height="17" Cancel="yes" Text="[ButtonText_Cancel]"> + <Publish Event="SpawnDialog" Value="CancelDlg">1</Publish> + </Control> + <Control Id="BannerBitmap" Type="Bitmap" X="0" Y="0" Width="370" Height="44" TabSkip="no" Text="[BannerBitmap]" /> + <Control Id="Description" Type="Text" X="25" Y="23" Width="280" Height="20" Transparent="yes" NoPrefix="yes"> + <Text>Select the operation you wish to perform.</Text> + </Control> + <Control Id="BottomLine" Type="Line" X="0" Y="234" Width="370" Height="0" /> + <Control Id="Title" Type="Text" X="15" Y="6" Width="240" Height="15" Transparent="yes" NoPrefix="yes"> + <Text>[DlgTitleFont]Modify, Repair or Remove installation</Text> + </Control> + <Control Id="BannerLine" Type="Line" X="0" Y="44" Width="370" Height="0" /> + <Control Id="RemoveText" Type="Text" X="105" Y="176" Width="230" Height="20"> + <Text>Removes [ProductName] from your computer.</Text> + </Control> + <Control Id="RepairText" Type="Text" X="105" Y="127" Width="230" Height="30"> + <Text>Repairs errors in the most recent installation state - fixes missing or corrupt files, shortcuts and registry entries.</Text> + </Control> + </Dialog> + + <Dialog Id="MaintenanceWelcomeDlg" Width="370" Height="270" Title="[ProductName] [Setup]" NoMinimize="yes"> + <Control Id="Next" Type="PushButton" X="236" Y="243" Width="56" Height="17" Default="yes" Text="[ButtonText_Next]"> + <Publish Event="SpawnWaitDialog" Value="WaitForCostingDlg">CostingComplete = 1</Publish> + <Publish Event="NewDialog" Value="MaintenanceTypeDlg">1</Publish> + </Control> + <Control Id="Cancel" Type="PushButton" X="304" Y="243" Width="56" Height="17" Cancel="yes" Text="[ButtonText_Cancel]"> + <Publish Event="SpawnDialog" Value="CancelDlg">1</Publish> + </Control> + <Control Id="Bitmap" Type="Bitmap" X="0" Y="0" Width="370" Height="234" TabSkip="no" Text="[DialogBitmap]" /> + <Control Id="Back" Type="PushButton" X="180" Y="243" Width="56" Height="17" Disabled="yes" Text="[ButtonText_Back]" /> + <Control Id="Description" Type="Text" X="135" Y="70" Width="220" Height="60" Transparent="yes" NoPrefix="yes"> + <Text>The [Wizard] will allow you to change the way [ProductName] features are installed on your computer or even to remove [ProductName] from your computer. Click Next to continue or Cancel to exit the [Wizard].</Text> + </Control> + <Control Id="BottomLine" Type="Line" X="0" Y="234" Width="370" Height="0" /> + <Control Id="Title" Type="Text" X="135" Y="20" Width="220" Height="60" Transparent="yes" NoPrefix="yes"> + <Text>{\VerdanaBold13}Welcome to the [ProductName] [Wizard]</Text> + </Control> + </Dialog> + + <Dialog Id="OutOfDiskDlg" Width="370" Height="270" Title="[ProductName] [Setup]" NoMinimize="yes"> + <Control Id="OK" Type="PushButton" X="304" Y="243" Width="56" Height="17" Default="yes" Cancel="yes" Text="[ButtonText_OK]"> + <Publish Event="EndDialog" Value="Return">1</Publish> + </Control> + <Control Id="BannerBitmap" Type="Bitmap" X="0" Y="0" Width="370" Height="44" TabSkip="no" Text="[BannerBitmap]" /> + <Control Id="Description" Type="Text" X="20" Y="20" Width="280" Height="20" Transparent="yes" NoPrefix="yes"> + <Text>Disk space required for the installation exceeds available disk space.</Text> + </Control> + <Control Id="Text" Type="Text" X="20" Y="53" Width="330" Height="40"> + <Text>The highlighted volumes do not have enough disk space available for the currently selected features. You can either remove some files from the highlighted volumes, or choose to install less features onto local drive(s), or select different destination drive(s).</Text> + </Control> + <Control Id="BottomLine" Type="Line" X="0" Y="234" Width="370" Height="0" /> + <Control Id="Title" Type="Text" X="15" Y="6" Width="200" Height="15" Transparent="yes" NoPrefix="yes"> + <Text>[DlgTitleFont]Out of Disk Space</Text> + </Control> + <Control Id="BannerLine" Type="Line" X="0" Y="44" Width="370" Height="0" /> + <Control Id="VolumeList" Type="VolumeCostList" X="20" Y="100" Width="330" Height="120" Sunken="yes" Fixed="yes" Remote="yes"> + <Text>{120}{70}{70}{70}{70}</Text> + </Control> + </Dialog> + + <Dialog Id="OutOfRbDiskDlg" Width="370" Height="270" Title="[ProductName] [Setup]" NoMinimize="yes"> + <Control Id="No" Type="PushButton" X="304" Y="243" Width="56" Height="17" Default="yes" Cancel="yes" Text="[ButtonText_No]"> + <Publish Event="EndDialog" Value="Return">1</Publish> + </Control> + <Control Id="Yes" Type="PushButton" X="240" Y="243" Width="56" Height="17" Text="[ButtonText_Yes]"> + <Publish Event="EnableRollback" Value="False">1</Publish> + <Publish Event="EndDialog" Value="Return">1</Publish> + </Control> + <Control Id="BannerBitmap" Type="Bitmap" X="0" Y="0" Width="370" Height="44" TabSkip="no" Text="[BannerBitmap]" /> + <Control Id="Description" Type="Text" X="20" Y="20" Width="280" Height="20" Transparent="yes" NoPrefix="yes"> + <Text>Disk space required for the installation exceeds available disk space.</Text> + </Control> + <Control Id="Text" Type="Text" X="20" Y="53" Width="330" Height="40"> + <Text>The highlighted volumes do not have enough disk space available for the currently selected features. You can either remove some files from the highlighted volumes, or choose to install less features onto local drive(s), or select different destination drive(s).</Text> + </Control> + <Control Id="BottomLine" Type="Line" X="0" Y="234" Width="370" Height="0" /> + <Control Id="Title" Type="Text" X="15" Y="6" Width="200" Height="15" Transparent="yes" NoPrefix="yes"> + <Text>[DlgTitleFont]Out of Disk Space</Text> + </Control> + <Control Id="BannerLine" Type="Line" X="0" Y="44" Width="370" Height="0" /> + <Control Id="VolumeList" Type="VolumeCostList" X="20" Y="140" Width="330" Height="80" Sunken="yes" Fixed="yes" Remote="yes" ShowRollbackCost="yes"> + <Text>{120}{70}{70}{70}{70}</Text> + </Control> + <Control Id="Text2" Type="Text" X="20" Y="94" Width="330" Height="40"> + <Text>Alternatively, you may choose to disable the installer's rollback functionality. This allows the installer to restore your computer's original state should the installation be interrupted in any way. Click Yes if you wish to take the risk to disable rollback.</Text> + </Control> + </Dialog> + + <Dialog Id="ResumeDlg" Width="370" Height="270" Title="[ProductName] [Setup]" NoMinimize="yes"> + <Control Id="Install" Type="PushButton" X="236" Y="243" Width="56" Height="17" Default="yes" Text="[ButtonText_Install]"> + <Publish Event="SpawnWaitDialog" Value="WaitForCostingDlg">CostingComplete = 1</Publish> + <Publish Event="EndDialog" Value="Return"><![CDATA[OutOfDiskSpace <> 1]]></Publish> + <Publish Event="SpawnDialog" Value="OutOfRbDiskDlg"><![CDATA[OutOfDiskSpace = 1 AND OutOfNoRbDiskSpace = 0 AND (PROMPTROLLBACKCOST="P" OR NOT PROMPTROLLBACKCOST)]]></Publish> + <Publish Event="EndDialog" Value="Return"><![CDATA[OutOfDiskSpace = 1 AND OutOfNoRbDiskSpace = 0 AND PROMPTROLLBACKCOST="D"]]></Publish> + <Publish Event="EnableRollback" Value="False"><![CDATA[OutOfDiskSpace = 1 AND OutOfNoRbDiskSpace = 0 AND PROMPTROLLBACKCOST="D"]]></Publish> + <Publish Event="SpawnDialog" Value="OutOfDiskDlg"><![CDATA[(OutOfDiskSpace = 1 AND OutOfNoRbDiskSpace = 1) OR (OutOfDiskSpace = 1 AND PROMPTROLLBACKCOST="F")]]></Publish> + </Control> + <Control Id="Cancel" Type="PushButton" X="304" Y="243" Width="56" Height="17" Cancel="yes" Text="[ButtonText_Cancel]"> + <Publish Event="SpawnDialog" Value="CancelDlg">1</Publish> + </Control> + <Control Id="Bitmap" Type="Bitmap" X="0" Y="0" Width="370" Height="234" TabSkip="no" Text="[DialogBitmap]" /> + <Control Id="Back" Type="PushButton" X="180" Y="243" Width="56" Height="17" Disabled="yes" Text="[ButtonText_Back]" /> + <Control Id="Description" Type="Text" X="135" Y="70" Width="220" Height="30" Transparent="yes" NoPrefix="yes"> + <Text>The [Wizard] will complete the installation of [ProductName] on your computer. Click Install to continue or Cancel to exit the [Wizard].</Text> + </Control> + <Control Id="BottomLine" Type="Line" X="0" Y="234" Width="370" Height="0" /> + <Control Id="Title" Type="Text" X="135" Y="20" Width="220" Height="60" Transparent="yes" NoPrefix="yes"> + <Text>{\VerdanaBold13}Resuming the [ProductName] [Wizard]</Text> + </Control> + </Dialog> + + <Dialog Id="SetupTypeDlg" Width="370" Height="270" Title="[ProductName] [Setup]" NoMinimize="yes"> + <Control Id="TypicalLabel" Type="Text" X="105" Y="65" Width="100" Height="10" TabSkip="no"> + <Text>[DlgTitleFont]&amp;Typical</Text> + </Control> + <Control Id="TypicalButton" Type="PushButton" X="50" Y="65" Width="38" Height="38" ToolTip="Typical Installation" Default="yes" Icon="no" FixedSize="yes" IconSize="32" Text="[InstallerIcon]"> + <Publish Property="InstallMode" Value="Typical">1</Publish> + <Publish Event="SetInstallLevel" Value="3">1</Publish> + <Publish Event="NewDialog" Value="VerifyReadyDlg">1</Publish> + </Control> + <Control Id="CustomLabel" Type="Text" X="105" Y="118" Width="100" Height="10" TabSkip="no"> + <Text>[DlgTitleFont]C&amp;ustom</Text> + </Control> + <Control Id="CustomButton" Type="PushButton" X="50" Y="118" Width="38" Height="38" ToolTip="Custom Installation" Icon="no" FixedSize="yes" IconSize="32" Text="[CustomSetupIcon]"> + <Publish Property="InstallMode" Value="Custom">1</Publish> + <Publish Event="NewDialog" Value="CustomizeDlg">1</Publish> + </Control> + <Control Id="CompleteLabel" Type="Text" X="105" Y="171" Width="100" Height="10" TabSkip="no"> + <Text>[DlgTitleFont]C&amp;omplete</Text> + </Control> + <Control Id="CompleteButton" Type="PushButton" X="50" Y="171" Width="38" Height="38" ToolTip="Complete Installation" Icon="no" FixedSize="yes" IconSize="32" Text="[CompleteSetupIcon]"> + <Publish Property="InstallMode" Value="Complete">1</Publish> + <Publish Event="SetInstallLevel" Value="1000">1</Publish> + <Publish Event="NewDialog" Value="VerifyReadyDlg">1</Publish> + </Control> + <Control Id="Back" Type="PushButton" X="180" Y="243" Width="56" Height="17" Text="[ButtonText_Back]"> + <Publish Event="NewDialog" Value="LicenseAgreementDlg"><![CDATA[ShowUserRegistrationDlg <> 1]]></Publish> + <!-- <Publish Event="NewDialog" Value="UserRegistrationDlg">ShowUserRegistrationDlg = 1</Publish> --> + </Control> + <Control Id="Next" Type="PushButton" X="236" Y="243" Width="56" Height="17" Disabled="yes" Text="[ButtonText_Next]" /> + <Control Id="Cancel" Type="PushButton" X="304" Y="243" Width="56" Height="17" Cancel="yes" Text="[ButtonText_Cancel]"> + <Publish Event="SpawnDialog" Value="CancelDlg">1</Publish> + </Control> + <Control Id="BannerBitmap" Type="Bitmap" X="0" Y="0" Width="370" Height="44" TabSkip="no" Text="[BannerBitmap]" /> + <Control Id="Description" Type="Text" X="25" Y="23" Width="280" Height="15" Transparent="yes" NoPrefix="yes"> + <Text>Choose the setup type that best suits your needs</Text> + </Control> + <Control Id="BottomLine" Type="Line" X="0" Y="234" Width="370" Height="0" /> + <Control Id="Title" Type="Text" X="15" Y="6" Width="200" Height="15" Transparent="yes" NoPrefix="yes"> + <Text>[DlgTitleFont]Choose Setup Type</Text> + </Control> + <Control Id="BannerLine" Type="Line" X="0" Y="44" Width="370" Height="0" /> + <Control Id="CompleteText" Type="Text" X="105" Y="184" Width="230" Height="20"> + <Text>All program features will be installed. (Requires most disk space)</Text> + </Control> + <Control Id="CustomText" Type="Text" X="105" Y="131" Width="230" Height="30"> + <Text>Allows users to choose which program features will be installed and where they will be installed. Recommended for advanced users.</Text> + </Control> + <Control Id="TypicalText" Type="Text" X="105" Y="78" Width="230" Height="20"> + <Text>Installs the most common program features. Recommended for most users.</Text> + </Control> + </Dialog> + + <Dialog Id="VerifyRemoveDlg" Width="370" Height="270" Title="[ProductName] [Setup]" NoMinimize="yes" TrackDiskSpace="yes"> + <Control Id="Back" Type="PushButton" X="180" Y="243" Width="56" Height="17" Default="yes" Text="[ButtonText_Back]"> + <Publish Event="NewDialog" Value="MaintenanceTypeDlg">1</Publish> + </Control> + <Control Id="Remove" Type="PushButton" X="236" Y="243" Width="56" Height="17" Text="[ButtonText_Remove]"> + <Publish Event="Remove" Value="All"><![CDATA[OutOfDiskSpace <> 1]]></Publish> + <Publish Event="EndDialog" Value="Return"><![CDATA[OutOfDiskSpace <> 1]]></Publish> + <Publish Event="SpawnDialog" Value="OutOfRbDiskDlg"><![CDATA[OutOfDiskSpace = 1 AND OutOfNoRbDiskSpace = 0 AND (PROMPTROLLBACKCOST="P" OR NOT PROMPTROLLBACKCOST)]]></Publish> + <Publish Event="EndDialog" Value="Return"><![CDATA[OutOfDiskSpace = 1 AND OutOfNoRbDiskSpace = 0 AND PROMPTROLLBACKCOST="D"]]></Publish> + <Publish Event="EnableRollback" Value="False"><![CDATA[OutOfDiskSpace = 1 AND OutOfNoRbDiskSpace = 0 AND PROMPTROLLBACKCOST="D"]]></Publish> + <Publish Event="SpawnDialog" Value="OutOfDiskDlg"><![CDATA[(OutOfDiskSpace = 1 AND OutOfNoRbDiskSpace = 1) OR (OutOfDiskSpace = 1 AND PROMPTROLLBACKCOST="F")]]></Publish> + </Control> + <Control Id="Cancel" Type="PushButton" X="304" Y="243" Width="56" Height="17" Cancel="yes" Text="[ButtonText_Cancel]"> + <Publish Event="SpawnDialog" Value="CancelDlg">1</Publish> + </Control> + <Control Id="BannerBitmap" Type="Bitmap" X="0" Y="0" Width="370" Height="44" TabSkip="no" Text="[BannerBitmap]" /> + <Control Id="Description" Type="Text" X="25" Y="23" Width="280" Height="15" Transparent="yes" NoPrefix="yes"> + <Text>You have chosen to remove the program from your computer.</Text> + </Control> + <Control Id="Text" Type="Text" X="25" Y="70" Width="320" Height="30"> + <Text>Click Remove to remove [ProductName] from your computer. If you want to review or change any of your installation settings, click Back. Click Cancel to exit the wizard.</Text> + </Control> + <Control Id="BottomLine" Type="Line" X="0" Y="234" Width="370" Height="0" /> + <Control Id="Title" Type="Text" X="15" Y="6" Width="200" Height="15" Transparent="yes" NoPrefix="yes"> + <Text>[DlgTitleFont]Remove [ProductName]</Text> + </Control> + <Control Id="BannerLine" Type="Line" X="0" Y="44" Width="370" Height="0" /> + </Dialog> + + <Dialog Id="VerifyRepairDlg" Width="370" Height="270" Title="[ProductName] [Setup]" NoMinimize="yes" TrackDiskSpace="yes"> + <Control Id="Repair" Type="PushButton" X="236" Y="243" Width="56" Height="17" Default="yes" Text="[ButtonText_Repair]"> + <Publish Event="ReinstallMode" Value="ecmus"><![CDATA[OutOfDiskSpace <> 1]]></Publish> + <Publish Event="Reinstall" Value="All"><![CDATA[OutOfDiskSpace <> 1]]></Publish> + <Publish Event="EndDialog" Value="Return"><![CDATA[OutOfDiskSpace <> 1]]></Publish> + <Publish Event="SpawnDialog" Value="OutOfRbDiskDlg"><![CDATA[OutOfDiskSpace = 1 AND OutOfNoRbDiskSpace = 0 AND (PROMPTROLLBACKCOST="P" OR NOT PROMPTROLLBACKCOST)]]></Publish> + <Publish Event="EndDialog" Value="Return"><![CDATA[OutOfDiskSpace = 1 AND OutOfNoRbDiskSpace = 0 AND PROMPTROLLBACKCOST="D"]]></Publish> + <Publish Event="EnableRollback" Value="False"><![CDATA[OutOfDiskSpace = 1 AND OutOfNoRbDiskSpace = 0 AND PROMPTROLLBACKCOST="D"]]></Publish> + <Publish Event="SpawnDialog" Value="OutOfDiskDlg"><![CDATA[(OutOfDiskSpace = 1 AND OutOfNoRbDiskSpace = 1) OR (OutOfDiskSpace = 1 AND PROMPTROLLBACKCOST="F")]]></Publish> + </Control> + <Control Id="Cancel" Type="PushButton" X="304" Y="243" Width="56" Height="17" Cancel="yes" Text="[ButtonText_Cancel]"> + <Publish Event="SpawnDialog" Value="CancelDlg">1</Publish> + </Control> + <Control Id="BannerBitmap" Type="Bitmap" X="0" Y="0" Width="370" Height="44" TabSkip="no" Text="[BannerBitmap]" /> + <Control Id="Back" Type="PushButton" X="180" Y="243" Width="56" Height="17" Text="[ButtonText_Back]"> + <Publish Event="NewDialog" Value="MaintenanceTypeDlg">1</Publish> + </Control> + <Control Id="Description" Type="Text" X="25" Y="23" Width="280" Height="15" Transparent="yes" NoPrefix="yes"> + <Text>The [Wizard] is ready to begin the repair of [ProductName].</Text> + </Control> + <Control Id="Text" Type="Text" X="25" Y="70" Width="320" Height="30"> + <Text>Click Repair to repair the installation of [ProductName]. If you want to review or change any of your installation settings, click Back. Click Cancel to exit the wizard.</Text> + </Control> + <Control Id="BottomLine" Type="Line" X="0" Y="234" Width="370" Height="0" /> + <Control Id="Title" Type="Text" X="15" Y="6" Width="200" Height="15" Transparent="yes" NoPrefix="yes"> + <Text>[DlgTitleFont]Repair [ProductName]</Text> + </Control> + <Control Id="BannerLine" Type="Line" X="0" Y="44" Width="370" Height="0" /> + </Dialog> + + <Dialog Id="WaitForCostingDlg" Width="260" Height="85" Title="[ProductName] [Setup]" NoMinimize="yes"> + <Control Id="Return" Type="PushButton" X="102" Y="57" Width="56" Height="17" Default="yes" Cancel="yes" Text="[ButtonText_Return]"> + <Publish Event="EndDialog" Value="Exit">1</Publish> + </Control> + <Control Id="Text" Type="Text" X="48" Y="15" Width="194" Height="30"> + <Text>Please wait while the installer finishes determining your disk space requirements.</Text> + </Control> + <Control Id="Icon" Type="Icon" X="15" Y="15" Width="24" Height="24" ToolTip="Exclamation icon" FixedSize="yes" IconSize="32" Text="[ExclamationIcon]" /> + </Dialog> + + <Dialog Id="WelcomeDlg" Width="370" Height="270" Title="[ProductName] [Setup]" NoMinimize="yes"> + <Control Id="Next" Type="PushButton" X="236" Y="243" Width="56" Height="17" Default="yes" Text="[ButtonText_Next]"> + <Publish Event="NewDialog" Value="VerifyReadyDlg">1</Publish> + </Control> + <Control Id="Cancel" Type="PushButton" X="304" Y="243" Width="56" Height="17" Cancel="yes" Text="[ButtonText_Cancel]"> + <Publish Event="SpawnDialog" Value="CancelDlg">1</Publish> + </Control> + <Control Id="Bitmap" Type="Bitmap" X="0" Y="0" Width="370" Height="234" TabSkip="no" Text="[DialogBitmap]" /> + <Control Id="Back" Type="PushButton" X="180" Y="243" Width="56" Height="17" Disabled="yes" Text="[ButtonText_Back]" /> + <Control Id="Description" Type="Text" X="135" Y="70" Width="220" Height="30" Transparent="yes" NoPrefix="yes"> + <Text>The [Wizard] will install [ProductName] on your computer. Click Next to continue or Cancel to exit the [Wizard].</Text> + </Control> + <Control Id="BottomLine" Type="Line" X="0" Y="234" Width="370" Height="0" /> + <Control Id="Title" Type="Text" X="135" Y="20" Width="220" Height="60" Transparent="yes" NoPrefix="yes"> + <Text>{\VerdanaBold13}Welcome to the [ProductName] [Wizard]</Text> + </Control> + </Dialog> + + <RadioButtonGroup Property="IAgree"> + <RadioButton Text="{\DlgFont8}I &amp;accept the terms in the License Agreement" Value="Yes" X="5" Y="0" Width="250" Height="15" /> + <RadioButton Text="{\DlgFont8}I &amp;do not accept the terms in the License Agreement" Value="No" X="5" Y="20" Width="250" Height="15" /> + </RadioButtonGroup> + + <TextStyle Id="DlgFont8" FaceName="Tahoma" Size="8" /> + <TextStyle Id="DlgFontBold8" FaceName="Tahoma" Size="8" Bold="yes" /> + <TextStyle Id="VerdanaBold13" FaceName="Verdana" Size="13" Bold="yes" /> + + <UIText Id="AbsentPath" /> + <UIText Id="bytes">bytes</UIText> + <UIText Id="GB">GB</UIText> + <UIText Id="KB">KB</UIText> + <UIText Id="MB">MB</UIText> + <UIText Id="MenuAbsent">Entire feature will be unavailable</UIText> + <UIText Id="MenuAdvertise">Feature will be installed when required</UIText> + <UIText Id="MenuAllCD">Entire feature will be installed to run from CD</UIText> + <UIText Id="MenuAllLocal">Entire feature will be installed on local hard drive</UIText> + <UIText Id="MenuAllNetwork">Entire feature will be installed to run from network</UIText> + <UIText Id="MenuCD">Will be installed to run from CD</UIText> + <UIText Id="MenuLocal">Will be installed on local hard drive</UIText> + <UIText Id="MenuNetwork">Will be installed to run from network</UIText> + <UIText Id="ScriptInProgress">Gathering required information...</UIText> + <UIText Id="SelAbsentAbsent">This feature will remain uninstalled</UIText> + <UIText Id="SelAbsentAdvertise">This feature will be set to be installed when required</UIText> + <UIText Id="SelAbsentCD">This feature will be installed to run from CD</UIText> + <UIText Id="SelAbsentLocal">This feature will be installed on the local hard drive</UIText> + <UIText Id="SelAbsentNetwork">This feature will be installed to run from the network</UIText> + <UIText Id="SelAdvertiseAbsent">This feature will become unavailable</UIText> + <UIText Id="SelAdvertiseAdvertise">Will be installed when required</UIText> + <UIText Id="SelAdvertiseCD">This feature will be available to run from CD</UIText> + <UIText Id="SelAdvertiseLocal">This feature will be installed on your local hard drive</UIText> + <UIText Id="SelAdvertiseNetwork">This feature will be available to run from the network</UIText> + <UIText Id="SelCDAbsent">This feature will be uninstalled completely, you won't be able to run it from CD</UIText> + <UIText Id="SelCDAdvertise">This feature will change from run from CD state to set to be installed when required</UIText> + <UIText Id="SelCDCD">This feature will remain to be run from CD</UIText> + <UIText Id="SelCDLocal">This feature will change from run from CD state to be installed on the local hard drive</UIText> + <UIText Id="SelChildCostNeg">This feature frees up [1] on your hard drive.</UIText> + <UIText Id="SelChildCostPos">This feature requires [1] on your hard drive.</UIText> + <UIText Id="SelCostPending">Compiling cost for this feature...</UIText> + <UIText Id="SelLocalAbsent">This feature will be completely removed</UIText> + <UIText Id="SelLocalAdvertise">This feature will be removed from your local hard drive, but will be set to be installed when required</UIText> + <UIText Id="SelLocalCD">This feature will be removed from your local hard drive, but will be still available to run from CD</UIText> + <UIText Id="SelLocalLocal">This feature will remain on you local hard drive</UIText> + <UIText Id="SelLocalNetwork">This feature will be removed from your local hard drive, but will be still available to run from the network</UIText> + <UIText Id="SelNetworkAbsent">This feature will be uninstalled completely, you won't be able to run it from the network</UIText> + <UIText Id="SelNetworkAdvertise">This feature will change from run from network state to set to be installed when required</UIText> + <UIText Id="SelNetworkLocal">This feature will change from run from network state to be installed on the local hard drive</UIText> + <UIText Id="SelNetworkNetwork">This feature will remain to be run from the network</UIText> + <UIText Id="SelParentCostNegNeg">This feature frees up [1] on your hard drive. It has [2] of [3] subfeatures selected. The subfeatures free up [4] on your hard drive.</UIText> + <UIText Id="SelParentCostNegPos">This feature frees up [1] on your hard drive. It has [2] of [3] subfeatures selected. The subfeatures require [4] on your hard drive.</UIText> + <UIText Id="SelParentCostPosNeg">This feature requires [1] on your hard drive. It has [2] of [3] subfeatures selected. The subfeatures free up [4] on your hard drive.</UIText> + <UIText Id="SelParentCostPosPos">This feature requires [1] on your hard drive. It has [2] of [3] subfeatures selected. The subfeatures require [4] on your hard drive.</UIText> + <UIText Id="TimeRemaining">Time remaining: {[1] minutes }{[2] seconds}</UIText> + <UIText Id="VolumeCostAvailable">Available</UIText> + <UIText Id="VolumeCostDifference">Difference</UIText> + <UIText Id="VolumeCostRequired">Required</UIText> + <UIText Id="VolumeCostSize">Disk Size</UIText> + <UIText Id="VolumeCostVolume">Volume</UIText> + <ProgressText Action="CostFinalize">Computing space requirements</ProgressText> + <ProgressText Action="CostInitialize">Computing space requirements</ProgressText> + <ProgressText Action="FileCost">Computing space requirements</ProgressText> + <ProgressText Action="InstallValidate">Validating install</ProgressText> + <ProgressText Action="InstallFiles" Template="File: [1], Directory: [9], Size: [6]">Copying new files</ProgressText> + <ProgressText Action="InstallAdminPackage" Template="File: [1], Directory: [9], Size: [6]">Copying network install files</ProgressText> + <ProgressText Action="CreateShortcuts" Template="Shortcut: [1]">Creating shortcuts</ProgressText> + <ProgressText Action="PublishComponents" Template="Component ID: [1], Qualifier: [2]">Publishing Qualified Components</ProgressText> + <ProgressText Action="PublishFeatures" Template="Feature: [1]">Publishing Product Features</ProgressText> + <ProgressText Action="PublishProduct">Publishing product information</ProgressText> + <ProgressText Action="ClassInfo" Template="Class Id: [1]">Registering Class servers</ProgressText> + <ProgressText Action="RegisterExtensionInfo" Template="Extension: [1]">Registering extension servers</ProgressText> + <ProgressText Action="RegisterMIMEInfo" Template="MIME Content Type: [1], Extension: [2]">Registering MIME info</ProgressText> + <ProgressText Action="RegisterProgIdInfo" Template="ProgId: [1]">Registering program identifiers</ProgressText> + <ProgressText Action="AllocateRegistrySpace" Template="Free space: [1]">Allocating registry space</ProgressText> + <ProgressText Action="AppSearch" Template="Property: [1], Signature: [2]">Searching for installed applications</ProgressText> + <ProgressText Action="BindImage" Template="File: [1]">Binding executables</ProgressText> + <ProgressText Action="CCPSearch">Searching for qualifying products</ProgressText> + <ProgressText Action="CreateFolders" Template="Folder: [1]">Creating folders</ProgressText> + <ProgressText Action="DeleteServices" Template="Service: [1]">Deleting services</ProgressText> + <ProgressText Action="DuplicateFiles" Template="File: [1], Directory: [9], Size: [6]">Creating duplicate files</ProgressText> + <ProgressText Action="FindRelatedProducts" Template="Found application: [1]">Searching for related applications</ProgressText> + <ProgressText Action="InstallODBC">Installing ODBC components</ProgressText> + <ProgressText Action="InstallServices" Template="Service: [2]">Installing new services</ProgressText> + <ProgressText Action="LaunchConditions">Evaluating launch conditions</ProgressText> + <ProgressText Action="MigrateFeatureStates" Template="Application: [1]">Migrating feature states from related applications</ProgressText> + <ProgressText Action="MoveFiles" Template="File: [1], Directory: [9], Size: [6]">Moving files</ProgressText> + <ProgressText Action="PatchFiles" Template="File: [1], Directory: [2], Size: [3]">Patching files</ProgressText> + <ProgressText Action="ProcessComponents">Updating component registration</ProgressText> + <ProgressText Action="RegisterComPlus" Template="AppId: [1]{{, AppType: [2], Users: [3], RSN: [4]}}">Registering COM+ Applications and Components</ProgressText> + <ProgressText Action="RegisterFonts" Template="Font: [1]">Registering fonts</ProgressText> + <ProgressText Action="RegisterProduct" Template="[1]">Registering product</ProgressText> + <ProgressText Action="RegisterTypeLibraries" Template="LibID: [1]">Registering type libraries</ProgressText> + <ProgressText Action="RegisterUser" Template="[1]">Registering user</ProgressText> + <ProgressText Action="RemoveDuplicateFiles" Template="File: [1], Directory: [9]">Removing duplicated files</ProgressText> + <ProgressText Action="RemoveEnvironmentStrings" Template="Name: [1], Value: [2], Action [3]">Updating environment strings</ProgressText> + <ProgressText Action="RemoveExistingProducts" Template="Application: [1], Command line: [2]">Removing applications</ProgressText> + <ProgressText Action="RemoveFiles" Template="File: [1], Directory: [9]">Removing files</ProgressText> + <ProgressText Action="RemoveFolders" Template="Folder: [1]">Removing folders</ProgressText> + <ProgressText Action="RemoveIniValues" Template="File: [1], Section: [2], Key: [3], Value: [4]">Removing INI files entries</ProgressText> + <ProgressText Action="RemoveODBC">Removing ODBC components</ProgressText> + <ProgressText Action="RemoveRegistryValues" Template="Key: [1], Name: [2]">Removing system registry values</ProgressText> + <ProgressText Action="RemoveShortcuts" Template="Shortcut: [1]">Removing shortcuts</ProgressText> + <ProgressText Action="RMCCPSearch">Searching for qualifying products</ProgressText> + <ProgressText Action="SelfRegModules" Template="File: [1], Folder: [2]">Registering modules</ProgressText> + <ProgressText Action="SelfUnregModules" Template="File: [1], Folder: [2]">Unregistering modules</ProgressText> + <ProgressText Action="SetODBCFolders">Initializing ODBC directories</ProgressText> + <ProgressText Action="StartServices" Template="Service: [1]">Starting services</ProgressText> + <ProgressText Action="StopServices" Template="Service: [1]">Stopping services</ProgressText> + <ProgressText Action="UnpublishComponents" Template="Component ID: [1], Qualifier: [2]">Unpublishing Qualified Components</ProgressText> + <ProgressText Action="UnpublishFeatures" Template="Feature: [1]">Unpublishing Product Features</ProgressText> + <ProgressText Action="UnregisterClassInfo" Template="Class Id: [1]">Unregister Class servers</ProgressText> + <ProgressText Action="UnregisterComPlus" Template="AppId: [1]{{, AppType: [2]}}">Unregistering COM+ Applications and Components</ProgressText> + <ProgressText Action="UnregisterExtensionInfo" Template="Extension: [1]">Unregistering extension servers</ProgressText> + <ProgressText Action="UnregisterFonts" Template="Font: [1]">Unregistering fonts</ProgressText> + <ProgressText Action="UnregisterMIMEInfo" Template="MIME Content Type: [1], Extension: [2]">Unregistering MIME info</ProgressText> + <ProgressText Action="UnregisterProgIdInfo" Template="ProgId: [1]">Unregistering program identifiers</ProgressText> + <ProgressText Action="UnregisterTypeLibraries" Template="LibID: [1]">Unregistering type libraries</ProgressText> + <ProgressText Action="WriteEnvironmentStrings" Template="Name: [1], Value: [2], Action [3]">Updating environment strings</ProgressText> + <ProgressText Action="WriteIniValues" Template="File: [1], Section: [2], Key: [3], Value: [4]">Writing INI files values</ProgressText> + <ProgressText Action="WriteRegistryValues" Template="Key: [1], Name: [2], Value: [3]">Writing system registry values</ProgressText> + <ProgressText Action="Advertise">Advertising application</ProgressText> + <ProgressText Action="GenerateScript" Template="[1]">Generating script operations for action:</ProgressText> + <ProgressText Action="InstallSFPCatalogFile" Template="File: [1], Dependencies: [2]">Installing system catalog</ProgressText> + <ProgressText Action="MsiPublishAssemblies" Template="Application Context:[1], Assembly Name:[2]">Publishing assembly information</ProgressText> + <ProgressText Action="MsiUnpublishAssemblies" Template="Application Context:[1], Assembly Name:[2]">Unpublishing assembly information</ProgressText> + <ProgressText Action="Rollback" Template="[1]">Rolling back action:</ProgressText> + <ProgressText Action="RollbackCleanup" Template="File: [1]">Removing backup files</ProgressText> + <ProgressText Action="UnmoveFiles" Template="File: [1], Directory: [9]">Removing moved files</ProgressText> + <ProgressText Action="UnpublishProduct">Unpublishing product information</ProgressText> + <Error Id="0">{{Fatal error: }}</Error> + <Error Id="1">{{Error [1]. }}</Error> + <Error Id="2">Warning [1]. </Error> + <Error Id="3" /> + <Error Id="4">Info [1]. </Error> + <Error Id="5">The installer has encountered an unexpected error installing this package. This may indicate a problem with this package. The error code is [1]. {{The arguments are: [2], [3], [4]}}</Error> + <Error Id="6" /> + <Error Id="7">{{Disk full: }}</Error> + <Error Id="8">Action [Time]: [1]. [2]</Error> + <Error Id="9">[ProductName]</Error> + <Error Id="10">{[2]}{, [3]}{, [4]}</Error> + <Error Id="11">Message type: [1], Argument: [2]</Error> + <Error Id="12">=== Logging started: [Date] [Time] ===</Error> + <Error Id="13">=== Logging stopped: [Date] [Time] ===</Error> + <Error Id="14">Action start [Time]: [1].</Error> + <Error Id="15">Action ended [Time]: [1]. Return value [2].</Error> + <Error Id="16">Time remaining: {[1] minutes }{[2] seconds}</Error> + <Error Id="17">Out of memory. Shut down other applications before retrying.</Error> + <Error Id="18">Installer is no longer responding.</Error> + <Error Id="19">Installer stopped prematurely.</Error> + <Error Id="20">Please wait while Windows configures [ProductName]</Error> + <Error Id="21">Gathering required information...</Error> + <Error Id="22">Removing older versions of this application...</Error> + <Error Id="23">Preparing to remove older versions of this application...</Error> + <Error Id="32">{[ProductName] }Setup completed successfully.</Error> + <Error Id="33">{[ProductName] }Setup failed.</Error> + <Error Id="1101">Error reading from file: [2]. {{ System error [3].}} Verify that the file exists and that you can access it.</Error> + <Error Id="1301">Cannot create the file '[2]'. A directory with this name already exists. Cancel the install and try installing to a different location.</Error> + <Error Id="1302">Please insert the disk: [2]</Error> + <Error Id="1303">The installer has insufficient privileges to access this directory: [2]. The installation cannot continue. Log on as administrator or contact your system administrator.</Error> + <Error Id="1304">Error writing to file: [2]. Verify that you have access to that directory.</Error> + <Error Id="1305">Error reading from file [2]. {{ System error [3].}} Verify that the file exists and that you can access it.</Error> + <Error Id="1306">Another application has exclusive access to the file '[2]'. Please shut down all other applications, then click Retry.</Error> + <Error Id="1307">There is not enough disk space to install this file: [2]. Free some disk space and click Retry, or click Cancel to exit.</Error> + <Error Id="1308">Source file not found: [2]. Verify that the file exists and that you can access it.</Error> + <Error Id="1309">Error reading from file: [3]. {{ System error [2].}} Verify that the file exists and that you can access it.</Error> + <Error Id="1310">Error writing to file: [3]. {{ System error [2].}} Verify that you have access to that directory.</Error> + <Error Id="1311">Source file not found{{(cabinet)}}: [2]. Verify that the file exists and that you can access it.</Error> + <Error Id="1312">Cannot create the directory '[2]'. A file with this name already exists. Please rename or remove the file and click retry, or click Cancel to exit.</Error> + <Error Id="1313">The volume [2] is currently unavailable. Please select another.</Error> + <Error Id="1314">The specified path '[2]' is unavailable.</Error> + <Error Id="1315">Unable to write to the specified folder: [2].</Error> + <Error Id="1316">A network error occurred while attempting to read from the file: [2]</Error> + <Error Id="1317">An error occurred while attempting to create the directory: [2]</Error> + <Error Id="1318">A network error occurred while attempting to create the directory: [2]</Error> + <Error Id="1319">A network error occurred while attempting to open the source file cabinet: [2]</Error> + <Error Id="1320">The specified path is too long: [2]</Error> + <Error Id="1321">The Installer has insufficient privileges to modify this file: [2].</Error> + <Error Id="1322">A portion of the folder path '[2]' is invalid. It is either empty or exceeds the length allowed by the system.</Error> + <Error Id="1323">The folder path '[2]' contains words that are not valid in folder paths.</Error> + <Error Id="1324">The folder path '[2]' contains an invalid character.</Error> + <Error Id="1325">'[2]' is not a valid short file name.</Error> + <Error Id="1326">Error getting file security: [3] GetLastError: [2]</Error> + <Error Id="1327">Invalid Drive: [2]</Error> + <Error Id="1328">Error applying patch to file [2]. It has probably been updated by other means, and can no longer be modified by this patch. For more information contact your patch vendor. {{System Error: [3]}}</Error> + <Error Id="1329">A file that is required cannot be installed because the cabinet file [2] is not digitally signed. This may indicate that the cabinet file is corrupt.</Error> + <Error Id="1330">A file that is required cannot be installed because the cabinet file [2] has an invalid digital signature. This may indicate that the cabinet file is corrupt.{{ Error [3] was returned by WinVerifyTrust.}}</Error> + <Error Id="1331">Failed to correctly copy [2] file: CRC error.</Error> + <Error Id="1332">Failed to correctly move [2] file: CRC error.</Error> + <Error Id="1333">Failed to correctly patch [2] file: CRC error.</Error> + <Error Id="1334">The file '[2]' cannot be installed because the file cannot be found in cabinet file '[3]'. This could indicate a network error, an error reading from the CD-ROM, or a problem with this package.</Error> + <Error Id="1335">The cabinet file '[2]' required for this installation is corrupt and cannot be used. This could indicate a network error, an error reading from the CD-ROM, or a problem with this package.</Error> + <Error Id="1336">There was an error creating a temporary file that is needed to complete this installation.{{ Folder: [3]. System error code: [2]}}</Error> + <Error Id="1401">Could not create key: [2]. {{ System error [3].}} Verify that you have sufficient access to that key, or contact your support personnel. </Error> + <Error Id="1402">Could not open key: [2]. {{ System error [3].}} Verify that you have sufficient access to that key, or contact your support personnel. </Error> + <Error Id="1403">Could not delete value [2] from key [3]. {{ System error [4].}} Verify that you have sufficient access to that key, or contact your support personnel. </Error> + <Error Id="1404">Could not delete key [2]. {{ System error [3].}} Verify that you have sufficient access to that key, or contact your support personnel. </Error> + <Error Id="1405">Could not read value [2] from key [3]. {{ System error [4].}} Verify that you have sufficient access to that key, or contact your support personnel. </Error> + <Error Id="1406">Could not write value [2] to key [3]. {{ System error [4].}} Verify that you have sufficient access to that key, or contact your support personnel.</Error> + <Error Id="1407">Could not get value names for key [2]. {{ System error [3].}} Verify that you have sufficient access to that key, or contact your support personnel.</Error> + <Error Id="1408">Could not get sub key names for key [2]. {{ System error [3].}} Verify that you have sufficient access to that key, or contact your support personnel.</Error> + <Error Id="1409">Could not read security information for key [2]. {{ System error [3].}} Verify that you have sufficient access to that key, or contact your support personnel.</Error> + <Error Id="1410">Could not increase the available registry space. [2] KB of free registry space is required for the installation of this application.</Error> + <Error Id="1500">Another installation is in progress. You must complete that installation before continuing this one.</Error> + <Error Id="1501">Error accessing secured data. Please make sure the Windows Installer is configured properly and try the install again.</Error> + <Error Id="1502">User '[2]' has previously initiated an install for product '[3]'. That user will need to run that install again before they can use that product. Your current install will now continue.</Error> + <Error Id="1503">User '[2]' has previously initiated an install for product '[3]'. That user will need to run that install again before they can use that product.</Error> + <Error Id="1601">Out of disk space -- Volume: '[2]'; required space: [3] KB; available space: [4] KB. Free some disk space and retry.</Error> + <Error Id="1602">Are you sure you want to cancel?</Error> + <Error Id="1603">The file [2][3] is being held in use{ by the following process: Name: [4], Id: [5], Window Title: '[6]'}. Close that application and retry.</Error> + <Error Id="1604">The product '[2]' is already installed, preventing the installation of this product. The two products are incompatible.</Error> + <Error Id="1605">There is not enough disk space on the volume '[2]' to continue the install with recovery enabled. [3] KB are required, but only [4] KB are available. Click Ignore to continue the install without saving recovery information, click Retry to check for available space again, or click Cancel to quit the installation.</Error> + <Error Id="1606">Could not access network location [2].</Error> + <Error Id="1607">The following applications should be closed before continuing the install:</Error> + <Error Id="1608">Could not find any previously installed compliant products on the machine for installing this product.</Error> + <Error Id="1609">An error occurred while applying security settings. [2] is not a valid user or group. This could be a problem with the package, or a problem connecting to a domain controller on the network. Check your network connection and click Retry, or Cancel to end the install. {{Unable to locate the user's SID, system error [3]}}</Error> + <Error Id="1701">The key [2] is not valid. Verify that you entered the correct key.</Error> + <Error Id="1702">The installer must restart your system before configuration of [2] can continue. Click Yes to restart now or No if you plan to manually restart later.</Error> + <Error Id="1703">You must restart your system for the configuration changes made to [2] to take effect. Click Yes to restart now or No if you plan to manually restart later.</Error> + <Error Id="1704">An installation for [2] is currently suspended. You must undo the changes made by that installation to continue. Do you want to undo those changes?</Error> + <Error Id="1705">A previous installation for this product is in progress. You must undo the changes made by that installation to continue. Do you want to undo those changes?</Error> + <Error Id="1706">An installation package for the product [2] cannot be found. Try the installation again using a valid copy of the installation package '[3]'.</Error> + <Error Id="1707">Installation completed successfully.</Error> + <Error Id="1708">Installation failed.</Error> + <Error Id="1709">Product: [2] -- [3]</Error> + <Error Id="1710">You may either restore your computer to its previous state or continue the install later. Would you like to restore?</Error> + <Error Id="1711">An error occurred while writing installation information to disk. Check to make sure enough disk space is available, and click Retry, or Cancel to end the install.</Error> + <Error Id="1712">One or more of the files required to restore your computer to its previous state could not be found. Restoration will not be possible.</Error> + <Error Id="1713">[2] cannot install one of its required products. Contact your technical support group. {{System Error: [3].}}</Error> + <Error Id="1714">The older version of [2] cannot be removed. Contact your technical support group. {{System Error [3].}}</Error> + <Error Id="1715">Installed [2]</Error> + <Error Id="1716">Configured [2]</Error> + <Error Id="1717">Removed [2]</Error> + <Error Id="1718">File [2] was rejected by digital signature policy.</Error> + <Error Id="1719">The Windows Installer Service could not be accessed. This can occur if you are running Windows in safe mode, or if the Windows Installer is not correctly installed. Contact your support personnel for assistance.</Error> + <Error Id="1720">There is a problem with this Windows Installer package. A script required for this install to complete could not be run. Contact your support personnel or package vendor. {{Custom action [2] script error [3], [4]: [5] Line [6], Column [7], [8] }}</Error> + <Error Id="1721">There is a problem with this Windows Installer package. A program required for this install to complete could not be run. Contact your support personnel or package vendor. {{Action: [2], location: [3], command: [4] }}</Error> + <Error Id="1722">There is a problem with this Windows Installer package. A program run as part of the setup did not finish as expected. Contact your support personnel or package vendor. {{Action [2], location: [3], command: [4] }}</Error> + <Error Id="1723">There is a problem with this Windows Installer package. A DLL required for this install to complete could not be run. Contact your support personnel or package vendor. {{Action [2], entry: [3], library: [4] }}</Error> + <Error Id="1724">Removal completed successfully.</Error> + <Error Id="1725">Removal failed.</Error> + <Error Id="1726">Advertisement completed successfully.</Error> + <Error Id="1727">Advertisement failed.</Error> + <Error Id="1728">Configuration completed successfully.</Error> + <Error Id="1729">Configuration failed.</Error> + <Error Id="1730">You must be an Administrator to remove this application. To remove this application, you can log on as an Administrator, or contact your technical support group for assistance.</Error> + <Error Id="1801">The path [2] is not valid. Please specify a valid path.</Error> + <Error Id="1802">Out of memory. Shut down other applications before retrying.</Error> + <Error Id="1803">There is no disk in drive [2]. Please insert one and click Retry, or click Cancel to go back to the previously selected volume.</Error> + <Error Id="1804">There is no disk in drive [2]. Please insert one and click Retry, or click Cancel to return to the browse dialog and select a different volume.</Error> + <Error Id="1805">The folder [2] does not exist. Please enter a path to an existing folder.</Error> + <Error Id="1806">You have insufficient privileges to read this folder.</Error> + <Error Id="1807">A valid destination folder for the install could not be determined.</Error> + <Error Id="1901">Error attempting to read from the source install database: [2].</Error> + <Error Id="1902">Scheduling reboot operation: Renaming file [2] to [3]. Must reboot to complete operation.</Error> + <Error Id="1903">Scheduling reboot operation: Deleting file [2]. Must reboot to complete operation.</Error> + <Error Id="1904">Module [2] failed to register. HRESULT [3]. Contact your support personnel.</Error> + <Error Id="1905">Module [2] failed to unregister. HRESULT [3]. Contact your support personnel.</Error> + <Error Id="1906">Failed to cache package [2]. Error: [3]. Contact your support personnel.</Error> + <Error Id="1907">Could not register font [2]. Verify that you have sufficient permissions to install fonts, and that the system supports this font.</Error> + <Error Id="1908">Could not unregister font [2]. Verify that you that you have sufficient permissions to remove fonts.</Error> + <Error Id="1909">Could not create Shortcut [2]. Verify that the destination folder exists and that you can access it.</Error> + <Error Id="1910">Could not remove Shortcut [2]. Verify that the shortcut file exists and that you can access it.</Error> + <Error Id="1911">Could not register type library for file [2]. Contact your support personnel.</Error> + <Error Id="1912">Could not unregister type library for file [2]. Contact your support personnel.</Error> + <Error Id="1913">Could not update the ini file [2][3]. Verify that the file exists and that you can access it.</Error> + <Error Id="1914">Could not schedule file [2] to replace file [3] on reboot. Verify that you have write permissions to file [3].</Error> + <Error Id="1915">Error removing ODBC driver manager, ODBC error [2]: [3]. Contact your support personnel.</Error> + <Error Id="1916">Error installing ODBC driver manager, ODBC error [2]: [3]. Contact your support personnel.</Error> + <Error Id="1917">Error removing ODBC driver: [4], ODBC error [2]: [3]. Verify that you have sufficient privileges to remove ODBC drivers.</Error> + <Error Id="1918">Error installing ODBC driver: [4], ODBC error [2]: [3]. Verify that the file [4] exists and that you can access it.</Error> + <Error Id="1919">Error configuring ODBC data source: [4], ODBC error [2]: [3]. Verify that the file [4] exists and that you can access it.</Error> + <Error Id="1920">Service '[2]' ([3]) failed to start. Verify that you have sufficient privileges to start system services.</Error> + <Error Id="1921">Service '[2]' ([3]) could not be stopped. Verify that you have sufficient privileges to stop system services.</Error> + <Error Id="1922">Service '[2]' ([3]) could not be deleted. Verify that you have sufficient privileges to remove system services.</Error> + <Error Id="1923">Service '[2]' ([3]) could not be installed. Verify that you have sufficient privileges to install system services.</Error> + <Error Id="1924">Could not update environment variable '[2]'. Verify that you have sufficient privileges to modify environment variables.</Error> + <Error Id="1925">You do not have sufficient privileges to complete this installation for all users of the machine. Log on as administrator and then retry this installation.</Error> + <Error Id="1926">Could not set file security for file '[3]'. Error: [2]. Verify that you have sufficient privileges to modify the security permissions for this file.</Error> + <Error Id="1927">Component Services (COM+ 1.0) are not installed on this computer. This installation requires Component Services in order to complete successfully. Component Services are available on Windows 2000.</Error> + <Error Id="1928">Error registering COM+ Application. Contact your support personnel for more information.</Error> + <Error Id="1929">Error unregistering COM+ Application. Contact your support personnel for more information.</Error> + <Error Id="1930">The description for service '[2]' ([3]) could not be changed.</Error> + <Error Id="1931">The Windows Installer service cannot update the system file [2] because the file is protected by Windows. You may need to update your operating system for this program to work correctly. {{Package version: [3], OS Protected version: [4]}}</Error> + <Error Id="1932">The Windows Installer service cannot update the protected Windows file [2]. {{Package version: [3], OS Protected version: [4], SFP Error: [5]}}</Error> + <Error Id="1933">The Windows Installer service cannot update one or more protected Windows files. {{SFP Error: [2]. List of protected files:\r\n[3]}}</Error> + <Error Id="1934">User installations are disabled via policy on the machine.</Error> + <Error Id="1935">An error occured during the installation of assembly component [2]. HRESULT: [3]. {{assembly interface: [4], function: [5], assembly name: [6]}}</Error> + + <AdminUISequence> + <Show Dialog="FatalError" OnExit="error" /> + <Show Dialog="UserExit" OnExit="cancel" /> + <Show Dialog="ExitDialog" OnExit="success" /> + <Show Dialog="PrepareDlg" Before="CostInitialize"></Show> + <Show Dialog="AdminWelcomeDlg" After="CostFinalize" /> + <Show Dialog="ProgressDlg" After="AdminWelcomeDlg" /> + </AdminUISequence> + + <InstallUISequence> + <Show Dialog="FatalError" OnExit="error" /> + <Show Dialog="UserExit" OnExit="cancel" /> + <Show Dialog="ExitDialog" OnExit="success" /> + <Show Dialog="PrepareDlg" After="LaunchConditions" /> + <Show Dialog="WelcomeDlg" After="MigrateFeatureStates">NOT Installed</Show> + <Show Dialog="ResumeDlg" After="WelcomeDlg">Installed AND (RESUME OR Preselected)</Show> + <Show Dialog="MaintenanceWelcomeDlg" After="ResumeDlg">Installed AND NOT RESUME AND NOT Preselected</Show> + <Show Dialog="ProgressDlg" After="MaintenanceWelcomeDlg" /> + </InstallUISequence> + </UI> + + <Property Id="ALLUSERS">2</Property> + <Property Id="ROOTDRIVE"><![CDATA[C:\]]></Property> + <Property Id="ButtonText_No">&amp;No</Property> + <Property Id="ButtonText_Install">&amp;Install</Property> + <Property Id="Setup">Setup</Property> + <Property Id="ButtonText_Browse">Br&amp;owse</Property> + <Property Id="CustomSetupIcon">custicon</Property> + <Property Id="ButtonText_Next">&amp;Next &gt;</Property> + <Property Id="ButtonText_Back">&lt; &amp;Back</Property> + <Property Id="InstallMode">Typical</Property> + <Property Id="Progress2">installs</Property> + <Property Id="IAgree">No</Property> + <Property Id="Wizard">Setup Wizard</Property> + <Property Id="RemoveIcon">removico</Property> + <Property Id="ExclamationIcon">exclamic</Property> + <Property Id="ShowUserRegistrationDlg">1</Property> + <Property Id="ProductID">none</Property> + <Property Id="ButtonText_Reset">&amp;Reset</Property> + <Property Id="ButtonText_Remove">&amp;Remove</Property> + <Property Id="CompleteSetupIcon">completi</Property> + <Property Id="ButtonText_Yes">&amp;Yes</Property> + <Property Id="ButtonText_Exit">&amp;Exit</Property> + <Property Id="ButtonText_Return">&amp;Return</Property> + <Property Id="DialogBitmap">dlgbmp</Property> + <Property Id="DlgTitleFont">{&amp;DlgFontBold8}</Property> + <Property Id="ButtonText_Ignore">&amp;Ignore</Property> + <Property Id="RepairIcon">repairic</Property> + <Property Id="ButtonText_Resume">&amp;Resume</Property> + <Property Id="InstallerIcon">insticon</Property> + <Property Id="ButtonText_Finish">&amp;Finish</Property> + <Property Id="PROMPTROLLBACKCOST">P</Property> + <Property Id="PortTemplate"><![CDATA[12345<######>@@@@@]]></Property> + <Property Id="Progress1">Installing</Property> + <Property Id="ButtonText_Cancel">Cancel</Property> + <Property Id="INSTALLLEVEL">3</Property> + <Property Id="InfoIcon">info</Property> + <Property Id="ButtonText_Repair">&amp;Repair</Property> + <Property Id="ButtonText_Retry">&amp;Retry</Property> + <Property Id="BannerBitmap">bannrbmp</Property> + <Property Id="ButtonText_OK">OK</Property> + + <AdminExecuteSequence /> + + <InstallExecuteSequence> + <RemoveExistingProducts After='InstallFinalize' /> + </InstallExecuteSequence> + + <Binary Id="repairic" src="Binary\Repair.ico" /> + <Binary Id="removico" src="Binary\Remove.ico" /> + </Product> </Wix>
0
59b8b05c39104cd919a3fa872259b45a6def7b83
389ds/389-ds-base
Ticket #48234 - CI test: test case for ticket 48234 Description: "matching rules" in ACI's "bind rules not fully evaluated
commit 59b8b05c39104cd919a3fa872259b45a6def7b83 Author: Noriko Hosoi <[email protected]> Date: Fri Jun 10 18:33:05 2016 -0700 Ticket #48234 - CI test: test case for ticket 48234 Description: "matching rules" in ACI's "bind rules not fully evaluated diff --git a/dirsrvtests/tests/tickets/ticket48234_test.py b/dirsrvtests/tests/tickets/ticket48234_test.py new file mode 100644 index 000000000..27eff1a8f --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48234_test.py @@ -0,0 +1,139 @@ +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +installation1_prefix = None + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + [email protected](scope="module") +def topology(request): + global installation1_prefix + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + + # Creating standalone instance ... + standalone = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + instance_standalone = standalone.exists() + if instance_standalone: + standalone.delete() + standalone.create() + standalone.open() + + # Delete each instance in the end + def fin(): + standalone.delete() + request.addfinalizer(fin) + + # Clear out the tmp dir + standalone.clearTmpDir(__file__) + + return TopologyStandalone(standalone) + +def add_ou_entry(server, name, myparent): + dn = 'ou=%s,%s' % (name, myparent) + server.add_s(Entry((dn, {'objectclass': ['top', 'organizationalunit'], + 'ou': name}))) + +def add_user_entry(server, name, pw, myparent): + dn = 'cn=%s,%s' % (name, myparent) + server.add_s(Entry((dn, {'objectclass': ['top', 'person'], + 'sn': name, + 'cn': name, + 'telephonenumber': '+1 222 333-4444', + 'userpassword': pw}))) + +def test_ticket48234(topology): + """ + Test aci which contains an extensible filter. + shutdown + """ + + log.info('Bind as root DN') + try: + topology.standalone.simple_bind_s(DN_DM, PASSWORD) + except ldap.LDAPError as e: + topology.standalone.log.error('Root DN failed to authenticate: ' + e.message['desc']) + assert False + + ouname = 'outest' + username = 'admin' + passwd = 'Password' + deniedattr = 'telephonenumber' + log.info('Add aci which contains extensible filter.') + aci_text = ('(targetattr = "%s")' % (deniedattr) + + '(target = "ldap:///%s")' % (DEFAULT_SUFFIX) + + '(version 3.0;acl "admin-tel-matching-rule-outest";deny (all)' + + '(userdn = "ldap:///%s??sub?(&(cn=%s)(ou:dn:=%s))");)' % (DEFAULT_SUFFIX, username, ouname)) + + try: + topology.standalone.modify_s(DEFAULT_SUFFIX, [(ldap.MOD_ADD, 'aci', aci_text)]) + except ldap.LDAPError as e: + log.error('Failed to add aci: (%s) error %s' % (aci_text, e.message['desc'])) + assert False + + log.info('Add entries ...') + for idx in range(0, 2): + ou0 = 'OU%d' % idx + log.info('adding %s under %s...' % (ou0, DEFAULT_SUFFIX)) + add_ou_entry(topology.standalone, ou0, DEFAULT_SUFFIX) + parent = 'ou=%s,%s' % (ou0, DEFAULT_SUFFIX) + log.info('adding %s under %s...' % (ouname, parent)) + add_ou_entry(topology.standalone, ouname, parent) + + for idx in range(0, 2): + parent = 'ou=%s,ou=OU%d,%s' % (ouname, idx, DEFAULT_SUFFIX) + log.info('adding %s under %s...' % (username, parent)) + add_user_entry(topology.standalone, username, passwd, parent) + + binddn = 'cn=%s,%s' % (username, parent) + log.info('Bind as user %s' % binddn) + try: + topology.standalone.simple_bind_s(binddn, passwd) + except ldap.LDAPError as e: + topology.standalone.log.error(bindn + ' failed to authenticate: ' + e.message['desc']) + assert False + + filter = '(cn=%s)' % username + try: + entries = topology.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, filter, [deniedattr, 'dn']) + assert 2 == len(entries) + for idx in range(0, 1): + if entries[idx].hasAttr(deniedattr): + log.fatal('aci with extensible filter failed -- %s') + assert False + except ldap.LDAPError as e: + topology.standalone.log.error('Search (%s, %s) failed: ' % (DEFAULT_SUFFIX, filter) + e.message['desc']) + assert False + + log.info('Test complete') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE)
0
37e0121445b69405a7d11f9990eb91dc3d3a0d7c
389ds/389-ds-base
CLEANALLRUV coverity fixes There was code that allowed for a static buffer overflow(CLEANRIDSIZ) in a few places, but there are previous checks that make this impossible to over run. A small potential memory leak was also fixed. Reviewed by: Noriko(Thanks!)
commit 37e0121445b69405a7d11f9990eb91dc3d3a0d7c Author: Mark Reynolds <[email protected]> Date: Tue Aug 21 12:18:21 2012 -0400 CLEANALLRUV coverity fixes There was code that allowed for a static buffer overflow(CLEANRIDSIZ) in a few places, but there are previous checks that make this impossible to over run. A small potential memory leak was also fixed. Reviewed by: Noriko(Thanks!) diff --git a/ldap/servers/plugins/replication/repl5_agmt.c b/ldap/servers/plugins/replication/repl5_agmt.c index 5817f994d..e573d402c 100644 --- a/ldap/servers/plugins/replication/repl5_agmt.c +++ b/ldap/servers/plugins/replication/repl5_agmt.c @@ -434,7 +434,7 @@ agmt_new_from_entry(Slapi_Entry *e) for (i = 0; i < CLEANRIDSIZ && clean_vals[i]; i++){ ra->cleanruv_notified[i] = atoi(clean_vals[i]); } - if(i <= CLEANRIDSIZ) + if(i < CLEANRIDSIZ) ra->cleanruv_notified[i + 1] = 0; slapi_ch_array_free(clean_vals); } else { @@ -2676,7 +2676,7 @@ agmt_set_cleanruv_notified_from_entry(Repl_Agmt *ra, Slapi_Entry *e){ for (i = 0; i < CLEANRIDSIZ && attr_vals[i]; i++){ ra->cleanruv_notified[i] = atoi(attr_vals[i]); } - if( i <= CLEANRIDSIZ ) + if( i < CLEANRIDSIZ ) ra->cleanruv_notified[i + 1] = 0; slapi_ch_array_free(attr_vals); } else { diff --git a/ldap/servers/plugins/replication/repl5_replica.c b/ldap/servers/plugins/replication/repl5_replica.c index 75f82ab77..b79280cde 100644 --- a/ldap/servers/plugins/replication/repl5_replica.c +++ b/ldap/servers/plugins/replication/repl5_replica.c @@ -1954,6 +1954,7 @@ done: if(payload == NULL){ slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name, "Abort CleanAllRUV Task: failed to create extended " "op payload\n"); + slapi_ch_free((void **)&data); } else { /* setup the data */ data->repl_obj = NULL; @@ -3796,10 +3797,10 @@ replica_add_cleanruv_data(Replica *r, char *val) PR_Lock(r->repl_lock); for (i = 0; i < CLEANRIDSIZ && r->repl_cleanruv_data[i] != NULL; i++); /* goto the end of the list */ - if( i < CLEANRIDSIZ) + if( i < CLEANRIDSIZ){ r->repl_cleanruv_data[i] = slapi_ch_strdup(val); /* append to list */ - if(i <= CLEANRIDSIZ) - r->repl_cleanruv_data[i + 1] = NULL; + r->repl_cleanruv_data[i + 1] = 0; + } PR_Unlock(r->repl_lock); }
0
8bb10ab2c2a1283aac4540a023acb0361f56f003
389ds/389-ds-base
Ticket #47374 - flush.pl is not included in perl5 Fix description: replaced obsolete flush.pl and getopts.pl with perl5 equivalents. https://fedorahosted.org/389/ticket/47374 Reviewed by Rich (Thank you!!)
commit 8bb10ab2c2a1283aac4540a023acb0361f56f003 Author: Noriko Hosoi <[email protected]> Date: Thu Jun 27 16:13:00 2013 -0700 Ticket #47374 - flush.pl is not included in perl5 Fix description: replaced obsolete flush.pl and getopts.pl with perl5 equivalents. https://fedorahosted.org/389/ticket/47374 Reviewed by Rich (Thank you!!) diff --git a/ldap/servers/slapd/tools/rsearch/scripts/dbgen.pl.in b/ldap/servers/slapd/tools/rsearch/scripts/dbgen.pl.in index ce18f7c61..51af0db18 100755 --- a/ldap/servers/slapd/tools/rsearch/scripts/dbgen.pl.in +++ b/ldap/servers/slapd/tools/rsearch/scripts/dbgen.pl.in @@ -55,6 +55,7 @@ ## Now, dc style namingcontext can be used. #------------------------------------------------------------------------ +use Getopt::Long; sub PrintUsage { print STDERR @@ -68,8 +69,7 @@ sub PrintUsage { "\t -m naming attribute for RDN\n", "\t -n number of entries to generate\n", "\t -b beginning number for RDN (default 1 - ending number is -n value)\n", - "\t -O for organizationalPersons, default is inetOrgPerson\n", - "\t -p for piranha style aci's, default is barracuda\n", + "\t -p for organizationalPersons, default is inetOrgPerson\n", "\t -r seed---seed number for random number generator\n", "\t -g print extra entries for orgchart\n", "\t -x suppress printing pre amble\n", @@ -533,30 +533,61 @@ my $mycert = ); @managers = (\%m0, \%m1, \%m2, \%m3, \%m4, \%m5, \%m6, \%m7, \%m8, \%m9, \%m10, \%m11, \%m12, \%m13, \%m14, \%m15, \%m16, \%m17); -require "flush.pl"; -require "getopts.pl"; -&Getopts('n:o:s:r:cOvpqgxyl:z:m:b:j:'); - -$Number_To_Generate = $opt_n; -$Verbose = $opt_v; -$Quiet = $opt_q; -$Output_File_Name = $opt_o; -$Random_Seed = $opt_r || 0xdbdbdbdb; -$TargetServer = $opt_t; -$debug = $opt_d; -$Suffix = $opt_s || 'dc=example,dc=com'; +sub flush { + local($old) = select(shift); + $| = 1; + print ""; + $| = 0; + select($old); +} + +my $Number_To_Generate = 1; +$Output_File_Name = "output.ldif"; +$Random_Seed = 1; +$Suffix = 'dc=example,dc=com'; +$NamingType = "uid"; +$inetOrgPerson = "objectClass: inetOrgPerson\n"; +$PrintOrgChartDat = 0; +$DataDir = "/usr/share/dirsrv/data"; +$BeginNum = 0; + +$Verbose = 0; +$debug = 0; +$Quiet = 0; + +$opt_x = 0; +$opt_y = 0; +$opt_z = ""; + +GetOptions('number=i' => \$Number_To_Generate, + 'output=s' => \$Output_File_Name, + 'random=i' => \$Random_Seed, + 'suffix=s' => \$Suffix, + 'cnInDn' => \$opt_c, + 'moreChoices=s' => \$opt_m, + 'person' => \$opt_p, + 'graph' => \$PrintOrgChartDat, + 'location=s' => \$DataDir, + 'beginNum=i' => \$BeginNum, + 'x' => \$opt_x, + 'y' => \$opt_y, + 'z' => \$opt_z, + 'j' => \$opt_j, + 'verbose' => \$Verbose, + 'debug' => \$debug, + 'quiet' => \$Quiet, + ); + +$Random_Seed = $Random_Seed || 0xdbdbdbdb; $NamingType = "cn" if ($opt_c); -$NamingType = "uid" if (!$opt_c); $NamingType = $opt_m if ($opt_m); -$inetOrgPerson = "objectClass: inetOrgPerson\n" if (!$opt_O); -$PrintOrgChartDat = $opt_g; -$DataDir = $opt_l || "@templatedir@"; +$inetOrgPerson = "" if ($opt_p); + $ExtraObjClasses = ""; if ($opt_z) { $ExtraObjClasses = join "\n", map { "objectClass: $_" } split(/ /, $opt_z); $ExtraObjClasses .= "\n"; } -$BeginNum = $opt_b || 0; $printpreamble = 1; if ("" != $opt_x) {
0
dea9f6a7cd6d1c4029705559c69c273584a53561
389ds/389-ds-base
Issue 5012 - Migrate pcre to pcre2 Description: PCRE is deprecated and is being removed, need to use the new PCRE2 lkbrary fixes: https://github.com/389ds/389-ds-base/issues/5012 Reviewed by: tbordaz & firstyear (Thanks!!)
commit dea9f6a7cd6d1c4029705559c69c273584a53561 Author: Mark Reynolds <[email protected]> Date: Tue Aug 16 08:55:29 2022 -0400 Issue 5012 - Migrate pcre to pcre2 Description: PCRE is deprecated and is being removed, need to use the new PCRE2 lkbrary fixes: https://github.com/389ds/389-ds-base/issues/5012 Reviewed by: tbordaz & firstyear (Thanks!!) diff --git a/Makefile.am b/Makefile.am index af7e92f9f..c75ae5e87 100644 --- a/Makefile.am +++ b/Makefile.am @@ -1993,7 +1993,6 @@ fixupcmd = sed \ -e 's,@db_libdir\@,$(db_libdir),g' \ -e 's,@db_bindir\@,$(db_bindir),g' \ -e 's,@netsnmp_libdir\@,$(netsnmp_libdir),g' \ - -e 's,@pcre_libdir\@,$(pcre_libdir),g' \ -e 's,@propertydir\@,$(propertydir),g' \ -e 's,@datadir\@,$(datadir),g' \ -e 's,@schemadir\@,$(schemadir),g' \ diff --git a/configure.ac b/configure.ac index 8dfa656cd..df4bc84a6 100644 --- a/configure.ac +++ b/configure.ac @@ -847,14 +847,17 @@ if test "$krb5_vendor" = "MIT"; then LIBS="$save_LIBS" fi -if $PKG_CONFIG --exists pcre; then - PKG_CHECK_MODULES([PCRE], [pcre]) - pcre_libdir=`$PKG_CONFIG --libs-only-L pcre | sed -e s/-L// | sed -e s/\ .*$//` -else - PKG_CHECK_MODULES([PCRE], [libpcre]) - pcre_libdir=`$PKG_CONFIG --libs-only-L libpcre | sed -e s/-L// | sed -e s/\ .*$//` -fi -AC_SUBST(pcre_libdir) +PKG_CHECK_MODULES( + [PCRE], + [libpcre2-8], + [ + AC_DEFINE( + [PCRE2_CODE_UNIT_WIDTH], + 8, + [Define libpcre2 unit size] + ) + ] +) m4_include(m4/selinux.m4) m4_include(m4/systemd.m4) diff --git a/dirsrvtests/tests/suites/automember_plugin/basic_test.py b/dirsrvtests/tests/suites/automember_plugin/basic_test.py index a660d0aab..3f2338f17 100644 --- a/dirsrvtests/tests/suites/automember_plugin/basic_test.py +++ b/dirsrvtests/tests/suites/automember_plugin/basic_test.py @@ -914,6 +914,8 @@ def test_automemtask_re_build_task(topo, _create_all_entries, _startuptask, _fix 'basedn': auto_mem_scope, 'filter': "objectClass=posixAccount" }) + time.sleep(10) + # Search for any error logs assert not supplier.searchErrorsLog(error_string) for grp in (managers_grp, contract_grp): @@ -1042,6 +1044,7 @@ def test_automemtask_re_build(topo, _create_all_entries, _startuptask, _fixture_ 'basedn': auto_mem_scope, 'filter': "objectClass=inetOrgPerson" }) + time.sleep(10) with pytest.raises(AssertionError): bulk_check_groups(supplier, managers_grp, "member", 10) @@ -1121,7 +1124,7 @@ def test_automemtask_run_re_build(topo, _create_all_entries, _startuptask, _fixt AutomemberRebuildMembershipTask(supplier).create(properties={ 'basedn': auto_mem_scope, 'filter': "objectClass=inetOrgPerson"}) - time.sleep(2) + time.sleep(10) bulk_check_groups(supplier, managers_grp, "member", 10) AutoMembershipDefinition(supplier, f'cn=userGroups,{PLUGIN_AUTO}').replace('autoMemberFilter', diff --git a/ldap/servers/plugins/automember/automember.c b/ldap/servers/plugins/automember/automember.c index 3494d0343..d0770365b 100644 --- a/ldap/servers/plugins/automember/automember.c +++ b/ldap/servers/plugins/automember/automember.c @@ -1168,7 +1168,7 @@ automember_parse_regex_rule(char *rule_string) struct automemberRegexRule *rule = NULL; char *attr = NULL; Slapi_Regex *regex = NULL; - const char *recomp_result = NULL; + char *recomp_result = NULL; char *p = NULL; char *p2 = NULL; @@ -1223,6 +1223,7 @@ automember_parse_regex_rule(char *rule_string) "automember_parse_regex_rule - Unable to parse " "regex rule (invalid regex). Error \"%s\".\n", recomp_result ? recomp_result : "unknown"); + slapi_ch_free_string(&recomp_result); goto bail; } diff --git a/ldap/servers/plugins/syntaxes/string.c b/ldap/servers/plugins/syntaxes/string.c index 36f9be586..31ea33315 100644 --- a/ldap/servers/plugins/syntaxes/string.c +++ b/ldap/servers/plugins/syntaxes/string.c @@ -197,7 +197,7 @@ string_filter_sub(Slapi_PBlock *pb, char *initial, char **any, char * final, Sla struct timespec expire_time = {0}; Operation *op = NULL; Slapi_Regex *re = NULL; - const char *re_result = NULL; + char *re_result = NULL; char *alt = NULL; int filter_normalized = 0; int free_re = 1; @@ -313,6 +313,7 @@ string_filter_sub(Slapi_PBlock *pb, char *initial, char **any, char * final, Sla slapi_log_err(SLAPI_LOG_ERR, SYNTAX_PLUGIN_SUBSYSTEM, "string_filter_sub - re_comp (%s) failed (%s): %s\n", pat, p, re_result ? re_result : "unknown"); + slapi_ch_free_string(&re_result); rc = LDAP_OPERATIONS_ERROR; goto bailout; } else if (slapi_is_loglevel_set(SLAPI_LOG_TRACE)) { diff --git a/ldap/servers/slapd/back-ldbm/ldbm_search.c b/ldap/servers/slapd/back-ldbm/ldbm_search.c index cd33d30fd..cfd2f793f 100644 --- a/ldap/servers/slapd/back-ldbm/ldbm_search.c +++ b/ldap/servers/slapd/back-ldbm/ldbm_search.c @@ -208,7 +208,7 @@ ldbm_search_compile_filter(Slapi_Filter *f, void *arg __attribute__((unused))) char *p, *end, *bigpat = NULL; size_t size = 0; Slapi_Regex *re = NULL; - const char *re_result = NULL; + char *re_result = NULL; int i = 0; PR_ASSERT(NULL == f->f_un.f_un_sub.sf_private); @@ -262,6 +262,7 @@ ldbm_search_compile_filter(Slapi_Filter *f, void *arg __attribute__((unused))) if (NULL == re) { slapi_log_err(SLAPI_LOG_ERR, "ldbm_search_compile_filter", "re_comp (%s) failed (%s): %s\n", pat, p, re_result ? re_result : "unknown"); + slapi_ch_free_string(&re_result); rc = SLAPI_FILTER_SCAN_ERROR; } else { char ebuf[BUFSIZ]; diff --git a/ldap/servers/slapd/getfilelist.c b/ldap/servers/slapd/getfilelist.c index a2c41f506..dd5deb282 100644 --- a/ldap/servers/slapd/getfilelist.c +++ b/ldap/servers/slapd/getfilelist.c @@ -122,7 +122,7 @@ matches(const char *filename, const char *pattern) { Slapi_Regex *re = NULL; int match = 0; - const char *error = NULL; + char *error = NULL; if (!pattern) return 1; /* null pattern matches everything */ @@ -133,6 +133,8 @@ matches(const char *filename, const char *pattern) /* Matches the compiled pattern against the filename */ match = slapi_re_exec_nt(re, filename); slapi_re_free(re); + } else { + slapi_ch_free_string(&error); } return match; diff --git a/ldap/servers/slapd/regex.c b/ldap/servers/slapd/regex.c index a17c354fd..e8068cf8c 100644 --- a/ldap/servers/slapd/regex.c +++ b/ldap/servers/slapd/regex.c @@ -1,5 +1,5 @@ /** BEGIN COPYRIGHT BLOCK - * Copyright (C) 2009 Red Hat, Inc. + * Copyright (C) 2022 Red Hat, Inc. * All rights reserved. * * License: GPL (version 3 or any later version). @@ -10,20 +10,18 @@ #include <config.h> #endif -/* number of elements in the output vector */ -#define OVECCOUNT 30 /* should be a multiple of 3; store up to \9 */ - #include "slap.h" #include "slapi-plugin.h" +#include <pcre2.h> -/* Perl Compatible Regular Expression */ -#include <pcre.h> +#define OVEC_MATCH_LIMIT 30 /* should be a multiple of 3; store up to \9 */ +#define CPRE_ERR_MSG_SIZE 120 struct slapi_regex_handle { - pcre *re_pcre; /* contains the compiled pattern */ - int *re_ovector; /* output vector */ - int re_oveccount; /* count of the elements in output vector */ + pcre2_code *re_pcre; + pcre2_match_data *match_data; /* Contains the output vector */ + pcre2_match_context *mcontext; /* Stores the max element limit */ }; /** @@ -36,20 +34,23 @@ struct slapi_regex_handle * \warning The regex handler should be released by slapi_re_free(). */ Slapi_Regex * -slapi_re_comp(const char *pat, const char **error) +slapi_re_comp(const char *pat, char **error) { Slapi_Regex *re_handle = NULL; - pcre *re = NULL; - const char *myerror = NULL; - int erroffset; + pcre2_code *re = NULL; + int32_t myerror; + PCRE2_SIZE erroffset; + PCRE2_UCHAR errormsg[CPRE_ERR_MSG_SIZE]; - re = pcre_compile(pat, 0, &myerror, &erroffset, NULL); - if (error) { - *error = myerror; - } - if (re) { + re = pcre2_compile((PCRE2_SPTR)pat, strlen(pat), 0, + &myerror, &erroffset, NULL); + if (re == NULL) { + pcre2_get_error_message(myerror, errormsg, CPRE_ERR_MSG_SIZE); + *error = slapi_ch_strdup((char *)errormsg); + } else { re_handle = (Slapi_Regex *)slapi_ch_calloc(sizeof(Slapi_Regex), 1); re_handle->re_pcre = re; + *error = NULL; } return re_handle; @@ -71,7 +72,7 @@ slapi_re_comp(const char *pat, const char **error) int slapi_re_exec(Slapi_Regex *re_handle, const char *subject, time_t time_up) { - int rc; + int32_t rc; time_t curtime = slapi_current_rel_time_t(); if (NULL == re_handle || NULL == re_handle->re_pcre || NULL == subject) { @@ -82,27 +83,30 @@ slapi_re_exec(Slapi_Regex *re_handle, const char *subject, time_t time_up) return LDAP_TIMELIMIT_EXCEEDED; } - if (NULL == re_handle->re_ovector) { - re_handle->re_oveccount = OVECCOUNT; - re_handle->re_ovector = (int *)slapi_ch_malloc(sizeof(int) * OVECCOUNT); + if (re_handle->match_data) { + pcre2_match_data_free(re_handle->match_data); } + re_handle->match_data = pcre2_match_data_create_from_pattern(re_handle->re_pcre, NULL); + + if (re_handle->mcontext == NULL) { + re_handle->mcontext = pcre2_match_context_create(NULL); + pcre2_set_match_limit(re_handle->mcontext, OVEC_MATCH_LIMIT); + } + - rc = pcre_exec(re_handle->re_pcre, /* the compiled pattern */ - NULL, /* no extra data */ - subject, /* the subject string */ - strlen(subject), /* the length of the subject */ - 0, /* start at offset 0 in the subject */ - 0, /* default options */ - re_handle->re_ovector, /* output vector for substring info */ - re_handle->re_oveccount); /* number of elems in the ovector */ + rc = pcre2_match(re_handle->re_pcre, /* the compiled pattern */ + (PCRE2_SPTR)subject, /* the subject string */ + strlen(subject), /* the length of the subject */ + 0, /* start at offset 0 in the subject */ + 0, /* default options */ + re_handle->match_data, /* contains the resulting output vector */ + re_handle->mcontext); /* stores the max element limit */ if (rc >= 0) { return 1; /* matched */ } else { return 0; /* did not match */ } - - return rc; } /** @@ -123,33 +127,34 @@ slapi_re_exec(Slapi_Regex *re_handle, const char *subject, time_t time_up) int32_t slapi_re_exec_nt(Slapi_Regex *re_handle, const char *subject) { - int32_t rc; + int32_t rc = 0; if (NULL == re_handle || NULL == re_handle->re_pcre || NULL == subject) { return LDAP_PARAM_ERROR; } - if (NULL == re_handle->re_ovector) { - re_handle->re_oveccount = OVECCOUNT; - re_handle->re_ovector = (int *)slapi_ch_malloc(sizeof(int) * OVECCOUNT); + if (re_handle->match_data) { + pcre2_match_data_free(re_handle->match_data); } + re_handle->match_data = pcre2_match_data_create_from_pattern(re_handle->re_pcre, NULL); - rc = pcre_exec(re_handle->re_pcre, /* the compiled pattern */ - NULL, /* no extra data */ - subject, /* the subject string */ - strlen(subject), /* the length of the subject */ - 0, /* start at offset 0 in the subject */ - 0, /* default options */ - re_handle->re_ovector, /* output vector for substring info */ - re_handle->re_oveccount); /* number of elems in the ovector */ + if (re_handle->mcontext == NULL) { + re_handle->mcontext = pcre2_match_context_create(NULL); + pcre2_set_match_limit(re_handle->mcontext, OVEC_MATCH_LIMIT); + } + rc = pcre2_match(re_handle->re_pcre, /* the compiled pattern */ + (PCRE2_SPTR)subject, /* the subject string */ + strlen(subject), /* the length of the subject */ + 0, /* start at offset 0 in the subject */ + 0, /* default options */ + re_handle->match_data, /* contains the resulting output vector */ + re_handle->mcontext); /* stores the max element limit */ if (rc >= 0) { return 1; /* matched */ } else { return 0; /* did not match */ } - - return rc; } /** @@ -173,24 +178,23 @@ slapi_re_subs(Slapi_Regex *re_handle, const char *subject, const char *src, char int slapi_re_subs_ext(Slapi_Regex *re_handle, const char *subject, const char *src, char **dst, unsigned long dstlen, int filter) { - int thislen = 0; - /* was int, should match the type we compare to in the end! */ - unsigned long len = 0; - int pin; - int *ovector; + PCRE2_SIZE thislen = 0; + PCRE2_SIZE len = 0; + int32_t pin; + PCRE2_SIZE *ovector; char *mydst; const char *prev; const char *substring_start; const char *p; - if (NULL == src || NULL == re_handle || NULL == re_handle->re_ovector) { + if (NULL == src || NULL == re_handle || NULL == re_handle->match_data) { memset(*dst, '\0', dstlen); return -1; } else if (NULL == dst || NULL == *dst || 0 == dstlen) { return -1; } - ovector = re_handle->re_ovector; + ovector = pcre2_get_ovector_pointer(re_handle->match_data); mydst = *dst; prev = src; @@ -198,10 +202,6 @@ slapi_re_subs_ext(Slapi_Regex *re_handle, const char *subject, const char *src, if ('&' == *p) { /* Don't replace '&' if it's a filter AND: "(&(cn=a)(sn=b))" */ if (!filter || !(*prev == '(' && *(p + 1) == '(')) { - if (re_handle->re_oveccount <= 1) { - memset(*dst, '\0', dstlen); - return -1; - } substring_start = subject + ovector[0]; thislen = ovector[1] - ovector[0]; len += thislen; @@ -213,7 +213,7 @@ slapi_re_subs_ext(Slapi_Regex *re_handle, const char *subject, const char *src, } } else if (('\\' == *p) && ('0' <= *(p + 1) && *(p + 1) <= '9')) { pin = *(++p) - '0'; - if (re_handle->re_oveccount <= 2 * pin + 1) { + if (OVEC_MATCH_LIMIT <= 2 * pin + 1) { memset(*dst, '\0', dstlen); return -1; } @@ -226,7 +226,7 @@ slapi_re_subs_ext(Slapi_Regex *re_handle, const char *subject, const char *src, len++; } if (len >= dstlen) { - int offset = mydst - *dst; + int32_t offset = mydst - *dst; dstlen = len * 2; *dst = (char *)slapi_ch_realloc(*dst, dstlen); mydst = *dst + offset; @@ -236,6 +236,7 @@ slapi_re_subs_ext(Slapi_Regex *re_handle, const char *subject, const char *src, prev = p; } *mydst = '\0'; + return 0; } @@ -250,9 +251,14 @@ slapi_re_free(Slapi_Regex *re_handle) { if (re_handle) { if (re_handle->re_pcre) { - pcre_free(re_handle->re_pcre); + pcre2_code_free(re_handle->re_pcre); + } + if (re_handle->match_data) { + pcre2_match_data_free(re_handle->match_data); + } + if (re_handle->mcontext) { + pcre2_match_context_free(re_handle->mcontext); } - slapi_ch_free((void **)&re_handle->re_ovector); slapi_ch_free((void **)&re_handle); } } diff --git a/ldap/servers/slapd/sasl_map.c b/ldap/servers/slapd/sasl_map.c index 9a561b314..3e953d3cd 100644 --- a/ldap/servers/slapd/sasl_map.c +++ b/ldap/servers/slapd/sasl_map.c @@ -581,7 +581,7 @@ sasl_map_check(sasl_map_data *dp, char *sasl_user_and_realm, char **ldap_search_ Slapi_Regex *re = NULL; int ret = 0; int matched = 0; - const char *recomp_result = NULL; + char *recomp_result = NULL; slapi_log_err(SLAPI_LOG_TRACE, "sasl_map_check", "=>\n"); /* Compiles the regex */ @@ -590,6 +590,7 @@ sasl_map_check(sasl_map_data *dp, char *sasl_user_and_realm, char **ldap_search_ slapi_log_err(SLAPI_LOG_ERR, "sasl_map_check", "slapi_re_comp failed for expression (%s): %s\n", dp->regular_expression, recomp_result ? recomp_result : "unknown"); + slapi_ch_free_string(&recomp_result); } else { /* Matches the compiled regex against sasl_user_and_realm */ matched = slapi_re_exec_nt(re, sasl_user_and_realm); diff --git a/ldap/servers/slapd/slapi-plugin.h b/ldap/servers/slapd/slapi-plugin.h index 80346f64f..13d3a2580 100644 --- a/ldap/servers/slapd/slapi-plugin.h +++ b/ldap/servers/slapd/slapi-plugin.h @@ -7527,7 +7527,7 @@ typedef struct slapi_regex_handle Slapi_Regex; * the compiled pattern. NULL if the compile fails. * \warning The regex handler should be released by slapi_re_free(). */ -Slapi_Regex *slapi_re_comp(const char *pat, const char **error); +Slapi_Regex *slapi_re_comp(const char *pat, char **error); /** * Matches a compiled regular expression pattern against a given string. * A thin wrapper of pcre_exec. diff --git a/rpm/389-ds-base.spec.in b/rpm/389-ds-base.spec.in index 63696772d..21b4eff1c 100644 --- a/rpm/389-ds-base.spec.in +++ b/rpm/389-ds-base.spec.in @@ -83,7 +83,7 @@ BuildRequires: lmdb-devel BuildRequires: cyrus-sasl-devel BuildRequires: icu BuildRequires: libicu-devel -BuildRequires: pcre-devel +BuildRequires: pcre2-devel BuildRequires: cracklib-devel BuildRequires: json-c-devel %if %{use_clang} @@ -120,6 +120,7 @@ BuildRequires: rust BuildRequires: pkgconfig BuildRequires: pkgconfig(systemd) BuildRequires: pkgconfig(krb5) +BuildRequires: pkgconfig(libpcre2-8) # Needed to support regeneration of the autotool artifacts. BuildRequires: autoconf BuildRequires: automake
0
cd994fd6ad2eaf25e6cf4f8654e9c245f76404a3
389ds/389-ds-base
184585 - SASL context needs to be disposed of and a new one created when re-binding
commit cd994fd6ad2eaf25e6cf4f8654e9c245f76404a3 Author: Nathan Kinder <[email protected]> Date: Tue Mar 14 19:18:03 2006 +0000 184585 - SASL context needs to be disposed of and a new one created when re-binding diff --git a/ldap/servers/slapd/saslbind.c b/ldap/servers/slapd/saslbind.c index 640ccbfb1..5f8996858 100644 --- a/ldap/servers/slapd/saslbind.c +++ b/ldap/servers/slapd/saslbind.c @@ -881,6 +881,36 @@ void ids_sasl_check_bind(Slapi_PBlock *pb) sasl_start: + /* Check if we are already authenticated via sasl. If so, + * dispose of the current sasl_conn and create a new one + * using the new mechanism. We also need to do this if the + * mechanism changed in the middle of the SASL authentication + * process. */ + if ((pb->pb_conn->c_flags & CONN_FLAG_SASL_COMPLETE) || continuing) { + /* Lock the connection mutex */ + PR_Lock(pb->pb_conn->c_mutex); + + /* reset flag */ + pb->pb_conn->c_flags &= ~CONN_FLAG_SASL_COMPLETE; + + /* remove any SASL I/O from the connection */ + sasl_io_cleanup(pb->pb_conn); + + /* dispose of sasl_conn and create a new sasl_conn */ + sasl_dispose(&sasl_conn); + ids_sasl_server_new(pb->pb_conn); + sasl_conn = (sasl_conn_t*)pb->pb_conn->c_sasl_conn; + + /* Unlock the connection mutex */ + PR_Unlock(pb->pb_conn->c_mutex); + + if (sasl_conn == NULL) { + send_ldap_result( pb, LDAP_AUTH_METHOD_NOT_SUPPORTED, NULL, + "sasl library unavailable", 0, NULL ); + return; + } + } + rc = sasl_server_start(sasl_conn, mech, cred->bv_val, cred->bv_len, &sdata, &slen); @@ -889,6 +919,8 @@ void ids_sasl_check_bind(Slapi_PBlock *pb) switch (rc) { case SASL_OK: /* complete */ + /* Set a flag to signify that sasl bind is complete */ + pb->pb_conn->c_flags |= CONN_FLAG_SASL_COMPLETE; /* retrieve the authenticated username */ if (sasl_getprop(sasl_conn, SASL_USERNAME, diff --git a/ldap/servers/slapd/slap.h b/ldap/servers/slapd/slap.h index 1e957f4c4..2fc89acb3 100644 --- a/ldap/servers/slapd/slap.h +++ b/ldap/servers/slapd/slap.h @@ -1271,6 +1271,9 @@ typedef struct conn { * Start TLS request operation. */ +#define CONN_FLAG_SASL_COMPLETE 32 /* Flag set when a sasl bind has been + * successfully completed. + */ #define START_TLS_OID "1.3.6.1.4.1.1466.20037"
0
71cf23ca177c15c548ae70ad0d5a742b14ce7e8d
389ds/389-ds-base
Issue 50774 - Account.enroll_certificate() should not check for DS version Bug Description: `Account.enroll_certificate()` assumes that `userCertificate` can be added only to `nsAccount` and does a check for DS version where this objectClass was introduced. But `userCertificate` is a valid attribute for `inetOrgPerson` objectClass too. And `enroll_certificate()` can be used with this objectClass. Fix Description: Instead of relying on a DS version we should trust the server to add or reject an invalid attribute. Fixes: https://pagure.io/389-ds-base/issue/50774 Reviewed by: mhonek (Thanks!)
commit 71cf23ca177c15c548ae70ad0d5a742b14ce7e8d Author: Viktor Ashirov <[email protected]> Date: Tue Dec 10 11:31:20 2019 +0100 Issue 50774 - Account.enroll_certificate() should not check for DS version Bug Description: `Account.enroll_certificate()` assumes that `userCertificate` can be added only to `nsAccount` and does a check for DS version where this objectClass was introduced. But `userCertificate` is a valid attribute for `inetOrgPerson` objectClass too. And `enroll_certificate()` can be used with this objectClass. Fix Description: Instead of relying on a DS version we should trust the server to add or reject an invalid attribute. Fixes: https://pagure.io/389-ds-base/issue/50774 Reviewed by: mhonek (Thanks!) diff --git a/src/lib389/lib389/idm/account.py b/src/lib389/lib389/idm/account.py index 8a9e36d4c..cea672595 100644 --- a/src/lib389/lib389/idm/account.py +++ b/src/lib389/lib389/idm/account.py @@ -259,8 +259,6 @@ class Account(DSLdapObject): :param der_path: the certificate file in DER format to include. :type der_path: str """ - if ds_is_older('1.4.0'): - raise Exception("This version of DS does not support nsAccount") # Given a cert path, add this to the object as a userCertificate crt = None with open(der_path, 'rb') as f:
0
1a7abef18d408ec2574a03cf5b6f1e8ad014a86c
389ds/389-ds-base
Issue 6159 - Add a test to check URP add and delete conflict (#6160) Add URP tests that run if URP_VERY_LONG_TEST environment variable is set One test spends 6 days and check the 5770 different way of running the (Add, sync agmt 1, sync agmt 2, Del) sequence on 3 suppliers and check that when everything is in sync, the entries are the same everywhere Second test generate crossed entries and conflict entries (In theory that should not happen but we have sometime seen them) And tries to remove one of the entry. Then once everything is back in sync, it check that the entry are the same The second test fails - Apparently there is a problem with URP in that corner case
commit 1a7abef18d408ec2574a03cf5b6f1e8ad014a86c Author: progier389 <[email protected]> Date: Mon May 27 11:40:44 2024 +0200 Issue 6159 - Add a test to check URP add and delete conflict (#6160) Add URP tests that run if URP_VERY_LONG_TEST environment variable is set One test spends 6 days and check the 5770 different way of running the (Add, sync agmt 1, sync agmt 2, Del) sequence on 3 suppliers and check that when everything is in sync, the entries are the same everywhere Second test generate crossed entries and conflict entries (In theory that should not happen but we have sometime seen them) And tries to remove one of the entry. Then once everything is back in sync, it check that the entry are the same The second test fails - Apparently there is a problem with URP in that corner case diff --git a/dirsrvtests/tests/data/urp/db1.ldif b/dirsrvtests/tests/data/urp/db1.ldif new file mode 100644 index 000000000..85f891e66 --- /dev/null +++ b/dirsrvtests/tests/data/urp/db1.ldif @@ -0,0 +1,33 @@ + +# entry-id: 21 +dn: cn=u22449+nsUniqueId=c6654281-f11b11ee-ad93a02d-7ba2db25 + ,dc=example,dc=com +objectClass;vucsn-660c4965000000010000: extensibleobject +objectClass;vucsn-660c4965000000010000: ldapsubentry +objectClass;vucsn-660c4965000000010000: top +cn;vucsn-660c4965000000010000;mdcsn-660c4965000000010000: u22449 +creatorsName;vucsn-660c4965000000010000: cn=Directory Manager +modifiersName;vucsn-660c4965000000010000: cn=Directory Manager +createTimestamp;vucsn-660c4965000000010000: 20240402180733Z +modifyTimestamp;vucsn-660c4965000000010000: 20240402180733Z +nsUniqueId: c6654281-f11b11ee-ad93a02d-7ba2db25 +nsds5ReplConflict;vucsn-660c4965000000020000: namingConflict (ADD) cn=u22449,d + c=example,dc=com +ConflictCSN;vucsn-660c4965000000010000: 660c4965000000010000 +entryUUID: bf5a0e9e-07cb-4271-a85d-f8bc0e50e748 +dsEntryDN: cn=u22449+nsUniqueId=c6654281-f11b11ee-ad93a02d-7ba2db25 + ,dc=example,dc=com + +# entry-id: 22 +dn: cn=u22449,dc=example,dc=com +objectClass;vucsn-660c4965000000020000: extensibleobject +objectClass;vucsn-660c4965000000020000: top +cn;vucsn-660c4965000000020000;mdcsn-660c4965000000020000: u22449 +creatorsName;vucsn-660c4965000000020000: cn=Directory Manager +modifiersName;vucsn-660c4965000000020000: cn=Directory Manager +createTimestamp;vucsn-660c4965000000020000: 20240402180733Z +modifyTimestamp;vucsn-660c4965000000020000: 20240402180733Z +nsUniqueId: cd8c5081-f11b11ee-9829d059-db401164 +entryUUID;vucsn-660c4965000000020000: 180e45bf-1fdf-4ee8-9eb9-a53e95276abb +dsEntryDN: cn=u22449,dc=example,dc=com + diff --git a/dirsrvtests/tests/data/urp/db2.ldif b/dirsrvtests/tests/data/urp/db2.ldif new file mode 100644 index 000000000..764df84a8 --- /dev/null +++ b/dirsrvtests/tests/data/urp/db2.ldif @@ -0,0 +1,32 @@ + +# entry-id: 21 +dn: cn=u22449,dc=example,dc=com +objectClass;vucsn-660c4965000000010000: extensibleobject +objectClass;vucsn-660c4965000000010000: top +cn;vucsn-660c4965000000010000;mdcsn-660c4965000000010000: u22449 +creatorsName;vucsn-660c4965000000010000: cn=Directory Manager +modifiersName;vucsn-660c4965000000010000: cn=Directory Manager +createTimestamp;vucsn-660c4965000000010000: 20240402180733Z +modifyTimestamp;vucsn-660c4965000000010000: 20240402180733Z +nsUniqueId: c6654281-f11b11ee-ad93a02d-7ba2db25 +entryUUID: bf5a0e9e-07cb-4271-a85d-f8bc0e50e748 +dsEntryDN: cn=u22449,dc=example,dc=com + +# entry-id: 22 +dn: cn=u22449+nsUniqueId=cd8c5081-f11b11ee-9829d059-db401164,dc=example,dc=com +objectClass;vucsn-660c4965000000020000: extensibleobject +objectClass;vucsn-660c4965000000020000: top +objectClass;vucsn-660c4965000000020000: ldapsubentry +cn;vucsn-660c4965000000020000;mdcsn-660c4965000000020000: u22449 +creatorsName;vucsn-660c4965000000020000: cn=Directory Manager +modifiersName;vucsn-660c4965000000020000: cn=Directory Manager +createTimestamp;vucsn-660c4965000000020000: 20240402180733Z +modifyTimestamp;vucsn-660c4965000000020000: 20240402180733Z +nsUniqueId: cd8c5081-f11b11ee-9829d059-db401164 +nsds5ReplConflict;vucsn-660c4965000000020000: namingConflict (ADD) cn=u22449,d + c=example,dc=com +ConflictCSN;vucsn-660c4965000000020000: 660c4965000000020000 +entryUUID;vucsn-660c4965000000020000: 180e45bf-1fdf-4ee8-9eb9-a53e95276abb +dsEntryDN: cn=u22449+nsUniqueId=cd8c5081-f11b11ee-9829d059-db401164,dc=example + ,dc=com + diff --git a/dirsrvtests/tests/suites/replication/urp.py b/dirsrvtests/tests/suites/replication/urp.py new file mode 100644 index 000000000..5f2f15aef --- /dev/null +++ b/dirsrvtests/tests/suites/replication/urp.py @@ -0,0 +1,387 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2024 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest, time +import os +import glob +import ldap +from shutil import copyfile, rmtree +from itertools import permutations, product +from contextlib import contextmanager +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_m3, topology_m2, set_timeout +from lib389.replica import * +from lib389._constants import * +from lib389.properties import TASK_WAIT +from lib389.index import * +from lib389.mappingTree import * +from lib389.backend import * +from lib389.conflicts import ConflictEntry +from lib389.idm.user import UserAccounts, UserAccount +from lib389.idm.organization import Organization +from lib389.agreement import Agreements +from lib389.idm.organizationalunit import OrganizationalUnits + + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) +DEBUGGING = os.getenv("DEBUGGING", default=False) +SKIP_THE_TESTS = os.getenv('URP_VERY_LONG_TEST') is None + +# Replication synchronization timeout in seconds +REPL_SYNC_TIMEOUT = 300 + +# test_urp_delete is a long test spending days +set_timeout(24*3600*10) + + +def normalize_values(v): + # Return lower case sorted values. + if isinstance(v, bytes): + return v.decode('utf-8').lower() + result = [ normalize_values(val) for val in v ] + result.sort() + return result + + +def get_entry_info(entry): + norm_entry = { k.lower():normalize_values(v) for k,v in entry.items() } + uuid = norm_entry['nsuniqueid'][0] + if 'nstombstone' in norm_entry['objectclass']: + return "tombstone:"+uuid + elif 'nsds5replconflict' in norm_entry: + return "conflict:"+uuid + else: + return "regular:"+uuid + + +def search_entries(inst, dnfilter): + ldapurl, certdir = get_ldapurl_from_serverid(inst.serverid) + assert 'ldapi://' in ldapurl + conn = ldap.initialize(ldapurl) + conn.sasl_interactive_bind_s("", ldap.sasl.external()) + filter = "(|(objectclass=*)(objectclass=ldapsubentry)(objectclass=nstombstone))" + attrlist = ['nsuniqueid','objectclass', 'nsds5replconflict' ] + result = conn.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, filterstr=filter, attrlist=attrlist) + entry_infos = [ get_entry_info(entry) for dn,entry in result if dnfilter.lower() in dn.lower() ] + entry_infos.sort() + return entry_infos + + +def list_all_order_combinations(m, n): + # list all way to order pair of m per n elements + # knowing that element (m, 0) should be the first for m + # But m=3, n=4 generates 1247400 combinations + # and testing them would requires 3.5 years ... + # So we are using list_order_combinations which is a subset + actions = list(product(range(m), range(n))) + actions.remove((0,0)) + for order in permutations(actions): + order = ((0,0),) + order + higher_instance_seen = 0 + ok = True + for (instance,action) in order: + if instance > higher_instance_seen+1: + ok = False + break + if instance > higher_instance_seen: + if action != 0: + ok = False + break + higher_instance_seen = instance + if ok is True: + yield order + + +def list_order_combinations(m, n): + # list all way to order pair of m per n elements + # while keeping the same action sequence on a given instances + # ( so action i on instance j is always done after action i-1 on instance j ) + # For m=3, n=4 There is 5774 orders The test requires 6 days + class OrderedCombination: + def __init__(self, m, n): + self.m = m + self.n = n + + def bypass(self, idx): + # Keep the first action in order + # Changing the order of the first action is + # equivalent to swapping the instances so lets avoid to + # do it to decrease the number of tests to perform. + for i in range(self.n): + for j in range(i+1, self.n): + if idx[j] > 0 and idx[i] == 0: + return True + return False + + def run(self, s=None, idx=None): + # Run self.action on every possible order for the n steps on m steps sets + result = [] + if s is None: + s = [] + if idx is None: + idx = [ 0 ] * self.n + stop = True + if self.bypass(idx): + return result + for i in range(self.n): + if idx[i] < self.m: + stop = False + idx2 = idx.copy() + s2 = s.copy() + idx2[i] += 1 + s2.append((i+1, idx2[i])) + result.extend(self.run(s=s2, idx=idx2)) + if stop: + result.append(s) + return result + + if SKIP_THE_TESTS: + # Lets avoid generating 5000 tests to skip + return [ 'skipped', ] + return OrderedCombination(m,n).run() + + [email protected](scope="module") +def urp_tester(topology_m3): + class UrpTesterInstances: + # Contains the data and the methods about a specific instance + def __init__(self, tester, inst): + self.tester = tester + self.inst = inst + self.users = UserAccounts(inst, DEFAULT_SUFFIX) + ldapurl, certdir = get_ldapurl_from_serverid(inst.serverid) + assert 'ldapi://' in ldapurl + self.conn = ldap.initialize(ldapurl) + self.conn.sasl_interactive_bind_s("", ldap.sasl.external()) + self.entriesinfo = [] + self.replicas = [ inst0 for inst0 in tester.topo if inst0 != inst ] + + def add_user(self): + user_properties = { + 'uid': 'testuser', + 'cn': 'testuser', + 'sn': 'testuser', + 'uidNumber': '1', + 'gidNumber': '1', + 'homeDirectory': '/home/testuser' + } + # Wait 1 second to ensure that csn time are differents + time.sleep(1) + self.user = self.users.create(properties=user_properties) + log.info(f"Adding entry {self.user.dn} on {self.inst.serverid}") + self.uuid = self.user.get_attr_val_utf8_l('nsuniqueid') + + def sync1(self): + self.tester.resync_agmt(self.replicas[0], self.inst) + + def sync2(self): + self.tester.resync_agmt(self.replicas[1], self.inst) + + def remove_user(self): + filter = f'(&(nsuniqueid={self.uuid})(|(objectclass=nsTombstone)(objectclass=ldapsubentry)(objectclass=*)))' + res = self.conn.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, filter, ['dn',]) + assert len(res) == 1 + dn,entry = res[0] + log.info(f'Removing entry: {dn}') + self.conn.delete_s(dn) + + def get_entries(self): + self.entry_info = search_entries(self.inst, 'uid=testuser') + + def run_action(self, action): + actions = ( ( None, None ), + ( "ADD ENTRY", self.add_user ), + ( "SYNC ENTRY FROM REPLICA ASSOCIATED WITH FIRST AGMT", self.sync1 ), + ( "SYNC ENTRY FROM REPLICA ASSOCIATED WITH SECOND AGMT", self.sync2 ), + ( "REMOVE ENTRY", self.remove_user ), ) + log.info (f'***** {actions[action][0]} on {self.inst.serverid}') + actions[action][1]() + + + class UrpTester: + # Contains the data and the methods for all instances + def __init__(self, topo): + self.topo = topo + self.repl = ReplicationManager(DEFAULT_SUFFIX) + self.insts = [ UrpTesterInstances(self, inst) for inst in topo ] + inst = topo[0] + self.ldif = f'{inst.get_ldif_dir()}/userroot.ldif' + tasks = Tasks(inst) + tasks.exportLDIF(DEFAULT_SUFFIX, output_file=self.ldif, args={EXPORT_REPL_INFO: True, TASK_WAIT: True}) + + def resync_agmt(self, instfrom, instto): + log.info(f"Enabling replication agreement from {instfrom.serverid} to {instto.serverid}") + self.repl.enable_to_supplier(instto, [instfrom,]) + self.repl.wait_for_replication(instfrom, instto, timeout=REPL_SYNC_TIMEOUT) + + def disable_all_agmts(self): + log.info(f"Disabling replication all replication agreements") + for inst in self.topo: + for agmt in Agreements(inst).list(): + agmt.pause() + + def resync_all_agmts(self): + ilist = [ inst for inst in self.topo ] + for inst in ilist: + for inst2 in ilist: + if inst != inst2: + self.resync_agmt(inst, inst2) + + def get_entries(self): + for inst in self.insts: + inst.get_entries() + + def reset(self): + # Reinitilize all the replicas to original data and disable all agmts + self.disable_all_agmts() + # In theory importing on all replicas should work but sometime the replication + # fails to restart (probably worth to investigate) - so let use total reinit instead. + # for inst in self.topo: + # tasks = Tasks(inst) + # tasks.importLDIF(DEFAULT_SUFFIX, input_file=self.ldif, args={TASK_WAIT: True}) + # self.resync_all_agmts() + inst = self.topo[0] + agmts = Agreements(inst).list() + for agmt in agmts: + agmt.resume() + tasks = Tasks(inst) + tasks.importLDIF(DEFAULT_SUFFIX, input_file=self.ldif, args={TASK_WAIT: True}) + for agmt in Agreements(inst).list(): + agmt.begin_reinit() + (done, error) = agmt.wait_reinit() + assert done is True + assert error is False + for agmt in agmts: + agmt.pause() + + return UrpTester(topology_m3) + + +# Warning. test_urp_delete is very long and requires a reasonably +# fast machine and enough memory to run 3 instances without swapping. +# FYI: the test spend 6 days on my laptop with i7-9850H CPU @ 2.60GHz +# and 32Gb of memory. [email protected](SKIP_THE_TESTS, reason="This test is meant to execute in specific test environment") [email protected]("actionorder", list_order_combinations(4,3)) +def test_urp_delete(urp_tester, actionorder): + """Test urp conflict handling for add and delete operations + + :id: 7b08b6ac-f362-11ee-bf7c-482ae39447e5 + :setup: Three suppliers + :parametrized: yes + :steps: + On every possible combinations (i.e 5775) for the actions on all instances + 1. Reinitialise the instances + 2. Run the action on specified order and specified instances + The actions are: + - Add the entry on current replica + - Sync first other replica changes with current replica + - Sync second other replica changes with current replica + - Find the entry added on this replica and remove it + 3. Wait until instances are in sync + 4. Check that entry type,nsuniqueids pair are the same on all replica + (entry type is either tombstone, conflct, or regular) + :expectedresults: + 1. Should Success. + 2. Should Success. + 3. Should Success. + 4. Should Success. + """ + log.info(f'actions order is: {actionorder}') + urp_tester.reset() + # Run the actions on specified server with specified order + for inst_idx,action in actionorder: + urp_tester.insts[inst_idx-1].run_action(action) + # Wait until replication is in sync + urp_tester.resync_all_agmts() + # Check that all replica have the sames entries + urp_tester.get_entries() + assert urp_tester.insts[0].entry_info == urp_tester.insts[1].entry_info + assert urp_tester.insts[0].entry_info == urp_tester.insts[2].entry_info + + +def gen_ldif_file(first, second, result): + with open(result, 'w') as output: + for file in [ first, second ]: + with open(file, 'r') as input: + for line in input: + output.write(line) + # ldif sould be readable by dirsrv + os.chmod(result, 0o755) + + [email protected](SKIP_THE_TESTS, reason="This test is meant to execute in specific test environment") [email protected](reason="URP does not properly handle this case") +def test_urp_with_crossed_entries(topology_m2): + """Test urp behaviour if entry conflict entries are crossed + + :id: 6b4adfc2-fd9b-11ee-a6f0-482ae39447e5 + :setup: Two suppliers + :steps: + 1. Generate ldif files with crossed conflict entries + 2. Import the ldif files + 3. Remove a conflict entry + 4. Wait until le replication is in sync + 5. Check that entries have the same type (tombstone/conflict/regular) on both replica. + 6. Import the ldif files + 7. Remove a regular entry + 8. Wait until le replication is in sync + 9. Check that entries have the same type (tombstone/conflict/regular) on both replica. + :expectedresults: + 1. Should Success. + 2. Should Success. + 3. Should Success. + 4. Should Success. + """ + # Note in theory crossed entries should not happen + # But this test shows that we are in trouble if that is the case + s1 = topology_m2.ms["supplier1"] + s2 = topology_m2.ms["supplier2"] + datadir = os.path.join(os.path.dirname(__file__), '../../data/urp') + # 1. Generate ldif files with crossed conflict entries + # Export the ldif to be sure that the replication credentials are ok + export_ldif = f'{s1.get_ldif_dir()}/db.ldif' + import_ldif1 = f'{s1.get_ldif_dir()}/db1.ldif' + import_ldif2 = f'{s1.get_ldif_dir()}/db2.ldif' + assert Tasks(s1).exportLDIF(DEFAULT_SUFFIX, output_file=export_ldif, + args={EXPORT_REPL_INFO: True, TASK_WAIT: True}) == 0 + gen_ldif_file(export_ldif, f'{datadir}/db1.ldif', import_ldif1) + gen_ldif_file(export_ldif, f'{datadir}/db2.ldif', import_ldif2) + # 2. import the ldif files + assert Tasks(s1).importLDIF(DEFAULT_SUFFIX, input_file=import_ldif1, args={TASK_WAIT: True}) == 0 + assert Tasks(s2).importLDIF(DEFAULT_SUFFIX, input_file=import_ldif2, args={TASK_WAIT: True}) == 0 + # 3. Remove a conflict entry + ConflictEntry(s1, 'cn=u22449+nsUniqueId=c6654281-f11b11ee-ad93a02d-7ba2db25,dc=example,dc=com').delete() + # 4. Wait until le replication is in sync + repl = ReplicationManager(DEFAULT_SUFFIX) + repl.wait_for_replication(s1, s2) + # 5. Check that entries have the same type (tombstone/conflict/regular) on both replica. + u1 = search_entries(s1, 'cn=u22449') + u2 = search_entries(s2, 'cn=u22449') + assert u1 == u2 + # 6. import the ldif files + Tasks(s1).importLDIF(DEFAULT_SUFFIX, input_file=import_ldif1, args={TASK_WAIT: True}) + Tasks(s2).importLDIF(DEFAULT_SUFFIX, input_file=import_ldif2, args={TASK_WAIT: True}) + # 7. Remove a regular entry + UserAccount(s2, 'cn=u22449,dc=example,dc=com').delete() + # 8. Wait until le replication is in sync + repl = ReplicationManager(DEFAULT_SUFFIX) + repl.wait_for_replication(s1, s2) + # 9. Check that entries have the same type (tombstone/conflict/regular) on both replica. + u1 = search_entries(s1, 'cn=u22449') + u2 = search_entries(s2, 'cn=u22449') + assert u1 == u2 + + +if __name__ == "__main__": + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE)
0
942c1af6f0c963f01671dadca8571995f332aaf3
389ds/389-ds-base
Bump version to 1.3.6.0
commit 942c1af6f0c963f01671dadca8571995f332aaf3 Author: Mark Reynolds <[email protected]> Date: Wed Aug 31 12:48:19 2016 -0400 Bump version to 1.3.6.0 diff --git a/VERSION.sh b/VERSION.sh index f83c1ec6f..1ae820fa8 100644 --- a/VERSION.sh +++ b/VERSION.sh @@ -10,7 +10,7 @@ vendor="389 Project" # PACKAGE_VERSION is constructed from these VERSION_MAJOR=1 VERSION_MINOR=3 -VERSION_MAINT=5.13 +VERSION_MAINT=6.0 # NOTE: VERSION_PREREL is automatically set for builds made out of a git tree VERSION_PREREL= VERSION_DATE=$(date -u +%Y%m%d)
0
0550cead9f7e62ffe47c64a853d200f75b93ff5d
389ds/389-ds-base
Ticket 48864 - Cleanup memory detection before we add cgroup support Bug Description: Our old memory detection code was really spaghetti like, and not very nice. Make a cleaner better interface. Fix Description: Add a new slapi platform abstraction. We will start to move pieces of abstraction in here as needed. This contains a struct of system information such an memory infomation. due to the design, we can now test not only the memory detection, but because we pass mi as a parameter to util_cachesize_issane, we can now test our cache checking function too. https://pagure.io/389-ds-base/issue/48864 Author: wibrown Review by: mreynolds (Thanks!)
commit 0550cead9f7e62ffe47c64a853d200f75b93ff5d Author: William Brown <[email protected]> Date: Mon Apr 3 12:02:36 2017 +1000 Ticket 48864 - Cleanup memory detection before we add cgroup support Bug Description: Our old memory detection code was really spaghetti like, and not very nice. Make a cleaner better interface. Fix Description: Add a new slapi platform abstraction. We will start to move pieces of abstraction in here as needed. This contains a struct of system information such an memory infomation. due to the design, we can now test not only the memory detection, but because we pass mi as a parameter to util_cachesize_issane, we can now test our cache checking function too. https://pagure.io/389-ds-base/issue/48864 Author: wibrown Review by: mreynolds (Thanks!) diff --git a/ldap/servers/slapd/back-ldbm/dblayer.c b/ldap/servers/slapd/back-ldbm/dblayer.c index 739a8b876..26a96f684 100644 --- a/ldap/servers/slapd/back-ldbm/dblayer.c +++ b/ldap/servers/slapd/back-ldbm/dblayer.c @@ -1391,7 +1391,7 @@ dblayer_start(struct ldbminfo *li, int dbmode) /* Oops---looks like the admin misconfigured, let's warn them */ slapi_log_err(SLAPI_LOG_WARNING,"dblayer_start", "Likely CONFIGURATION ERROR -" "dbcachesize is configured to use more than the available " - "physical memory, decreased to the largest available size (%lu bytes).\n", + "physical memory, decreased to the largest available size (%"PRIu64" bytes).\n", priv->dblayer_cachesize); li->li_dbcachesize = priv->dblayer_cachesize; } @@ -1692,9 +1692,6 @@ dblayer_start(struct ldbminfo *li, int dbmode) * nsslapd-import-cache-autosize: 0 * get the nsslapd-import-cachesize. * Calculate the memory size left after allocating the import cache size. - * If the size is less than the hard limit, it issues an error and quit. - * If the size is greater than the hard limit and less than the soft limit, - * it issues a warning, but continues the import task. * * Note: this function is called only if the import is executed as a stand * alone command line (ldif2db). @@ -1702,27 +1699,17 @@ dblayer_start(struct ldbminfo *li, int dbmode) int check_and_set_import_cache(struct ldbminfo *li) { - size_t import_pages = 0; - size_t pagesize, pages, procpages, availpages; - size_t soft_limit = 0; - size_t hard_limit = 0; - size_t page_delta = 0; + uint64_t import_cache = 0; char s[64]; /* big enough to hold %ld */ + /* Get our platform memory values. */ + slapi_pal_meminfo *mi = spal_meminfo_get(); - if (util_info_sys_pages(&pagesize, &pages, &procpages, &availpages) != 0 || 0 == pagesize || 0 == pages) { - slapi_log_err(SLAPI_LOG_ERR, "check_and_set_import_cache", - "Failed to get pagesize: %ld or pages: %ld\n", - pagesize, pages); + if (mi == NULL) { + slapi_log_err(SLAPI_LOG_ERR, "check_and_set_import_cache", "Failed to get system memory infomation\n"); return ENOENT; } - slapi_log_err(SLAPI_LOG_INFO, "check_and_set_import_cache", - "pagesize: %ld, pages: %ld, procpages: %ld\n", - pagesize, pages, procpages); + slapi_log_err(SLAPI_LOG_INFO, "check_and_set_import_cache", "pagesize: %"PRIu64", available bytes %"PRIu64", process usage %"PRIu64" \n", mi->pagesize_bytes, mi->system_available_bytes, mi->process_consumed_bytes); - /* Soft limit: pages equivalent to 1GB (defined in dblayer.h) */ - soft_limit = (DBLAYER_IMPORTCACHESIZE_SL*1024) / (pagesize/1024); - /* Hard limit: pages equivalent to 100MB (defined in dblayer.h) */ - hard_limit = (DBLAYER_IMPORTCACHESIZE_HL*1024) / (pagesize/1024); /* * default behavior for ldif2db import cache, * nsslapd-import-cache-autosize==-1, @@ -1743,48 +1730,29 @@ check_and_set_import_cache(struct ldbminfo *li) if (li->li_import_cache_autosize == 0) { /* user specified importCache */ - import_pages = li->li_import_cachesize / pagesize; + import_cache = li->li_import_cachesize; } else { /* autosizing importCache */ /* ./125 instead of ./100 is for adjusting the BDB overhead. */ -#ifdef LINUX - /* On linux, availpages is correct so we should use it! */ - import_pages = (li->li_import_cache_autosize * availpages) / 125; -#else - import_pages = (li->li_import_cache_autosize * pages) / 125; -#endif + import_cache = (li->li_import_cache_autosize * mi->system_available_bytes) / 125; } - page_delta = pages - import_pages; - if (page_delta < hard_limit) { - slapi_log_err(SLAPI_LOG_ERR, - "check_and_set_import_cache", "After allocating import cache %ldKB, " - "the available memory is %ldKB, " - "which is less than the hard limit %ldKB. " - "Please decrease the import cache size and rerun import.\n", - import_pages*(pagesize/1024), page_delta*(pagesize/1024), - hard_limit*(pagesize/1024)); + if (util_is_cachesize_sane(mi, &import_cache) == UTIL_CACHESIZE_ERROR) { + + slapi_log_err(SLAPI_LOG_INFO, "check_and_set_import_cache", "Import failed to run: unable to validate system memory limits.\n"); + spal_meminfo_destroy(mi); return ENOMEM; } - if (page_delta < soft_limit) { - slapi_log_err(SLAPI_LOG_WARNING, - "check_and_set_import_cache", "After allocating import cache %ldKB, " - "the available memory is %ldKB, " - "which is less than the soft limit %ldKB. " - "You may want to decrease the import cache size and " - "rerun import.\n", - import_pages*(pagesize/1024), page_delta*(pagesize/1024), - soft_limit*(pagesize/1024)); - } - slapi_log_err(SLAPI_LOG_INFO, "check_and_set_import_cache", "Import allocates %ldKB import cache.\n", - import_pages*(pagesize/1024)); - if (li->li_import_cache_autosize > 0) { /* import cache autosizing */ + slapi_log_err(SLAPI_LOG_INFO, "check_and_set_import_cache", "Import allocates %"PRIu64"KB import cache.\n", import_cache / 1024); + if (li->li_import_cache_autosize > 0) { + /* import cache autosizing */ /* set the calculated import cache size to the config */ - sprintf(s, "%lu", (unsigned long)(import_pages * pagesize)); + sprintf(s, "%"PRIu64, import_cache); ldbm_config_internal_set(li, CONFIG_IMPORT_CACHESIZE, s); } + spal_meminfo_destroy(mi); return 0; } diff --git a/ldap/servers/slapd/back-ldbm/dblayer.h b/ldap/servers/slapd/back-ldbm/dblayer.h index e4307fcec..816c943c3 100644 --- a/ldap/servers/slapd/back-ldbm/dblayer.h +++ b/ldap/servers/slapd/back-ldbm/dblayer.h @@ -68,14 +68,6 @@ #define DB_REGION_NAME 25 /* DB: named regions, no backing file. */ #endif -/* Used in check_and_set_import_cache */ -/* After allocating the import cache, free memory must be left more than - * the hard limit to run import. */ -/* If the free memory size left is greater than hard limit and less than - * soft limit, the import utility issues a warning, but it runs */ -#define DBLAYER_IMPORTCACHESIZE_HL 100 /* import cache hard limit 100MB */ -#define DBLAYER_IMPORTCACHESIZE_SL 1024 /* import cache soft limit 1GB */ - struct dblayer_private_env { DB_ENV *dblayer_DB_ENV; Slapi_RWLock * dblayer_env_lock; diff --git a/ldap/servers/slapd/slapi-private.h b/ldap/servers/slapd/slapi-private.h index 6349c8fab..0c765809a 100644 --- a/ldap/servers/slapd/slapi-private.h +++ b/ldap/servers/slapd/slapi-private.h @@ -1365,19 +1365,6 @@ time_t slapi_parse_duration(const char *value); long long slapi_parse_duration_longlong(const char *value); int slapi_is_duration_valid(const char *value); -/** - * Populate the pointers with the system memory information. - * At this time, Linux is the only "reliable" system for returning these values - * - * \param pagesize Will return the system page size in bytes. - * \param pages The total number of memory pages on the system. May include swap pages depending on OS. - * \param procpages Number of memory pages our current process is consuming. May not be accurate on all platforms as this could be the VMSize rather than the actual number of consumed pages. - * \param availpages Number of available pages of memory on the system. Not all operating systems set this correctly. - * - * \return 0 on success, non-zero on failure to determine memory sizings. - */ -int util_info_sys_pages(size_t *pagesize, size_t *pages, size_t *procpages, size_t *availpages); - /** * Possible results of a cachesize check */ diff --git a/ldap/servers/slapd/slapi_pal.c b/ldap/servers/slapd/slapi_pal.c new file mode 100644 index 000000000..4bafd4bca --- /dev/null +++ b/ldap/servers/slapd/slapi_pal.c @@ -0,0 +1,250 @@ +/** BEGIN COPYRIGHT BLOCK + * Copyright (C) 2017 Red Hat, Inc. + * All rights reserved. + * + * License: GPL (version 3 or any later version). + * See LICENSE for details. + * END COPYRIGHT BLOCK **/ + +/* + * Implementation of functions to abstract from platform + * specific issues. + */ + +/* Provide ch_malloc etc. */ +#include <slapi-plugin.h> +/* Provide slapi_log_err macro wrapper */ +#include <slapi-private.h> +#include <slapi_pal.h> + +/* Assert macros */ +#include <assert.h> +/* Access errno */ +#include <errno.h> + +/* For getpagesize */ +#include <unistd.h> + +/* For rlimit */ +#include <sys/time.h> +#include <sys/resource.h> + +#ifdef OS_solaris +#include <sys/procfs.h> +#endif + +#if defined ( hpux ) +#include <sys/pstat.h> +#endif + +static int_fast32_t +_spal_rlimit_get(int resource, uint64_t *soft_limit, uint64_t *hard_limit) { + struct rlimit rl = {0}; + + if (getrlimit(resource, &rl) != 0) { + int errsrv = errno; + slapi_log_err(SLAPI_LOG_ERR, "_spal_rlimit_mem_get", "Failed to access system resource limits %d\n", errsrv); + return 1; + } + + if (rl.rlim_cur != RLIM_INFINITY) { + *soft_limit = (uint64_t)rl.rlim_cur; + } + if (rl.rlim_max != RLIM_INFINITY) { + *hard_limit = (uint64_t)rl.rlim_max; + } + + return 0; +} + + +#ifdef LINUX +static int_fast32_t +_spal_uint64_t_file_get(char *name, char *prefix, uint64_t *dest) { + FILE *f; + char s[40] = {0}; + size_t prefix_len = strlen(prefix); + + /* Make sure we can fit into our buffer */ + assert((prefix_len + 20) < 39); + + f = fopen(name, "r"); + if (!f) { /* fopen failed */ + int errsrv = errno; + slapi_log_err(SLAPI_LOG_ERR,"_spal_get_uint64_t_file", "Unable to open file \"%s\". errno=%d\n", name, errsrv); + return 1; + } + + int_fast32_t retval = 0; + while (! feof(f)) { + if (!fgets(s, 39, f)) { + retval = 1; + break; /* error or eof */ + } + if (feof(f)) { + retval = 1; + break; + } + if (strncmp(s, prefix, prefix_len) == 0) { + sscanf(s + prefix_len, "%"SCNu64, dest); + break; + } + } + fclose(f); + return retval; +} + + + +slapi_pal_meminfo * +spal_meminfo_get() { + slapi_pal_meminfo *mi = (slapi_pal_meminfo *)slapi_ch_calloc(1, sizeof(slapi_pal_meminfo)); + + mi->pagesize_bytes = getpagesize(); + + /* + * We have to compare values from a number of sources to ensure we have + * the correct result. + */ + + char f_proc_status[30] = {0}; + sprintf(f_proc_status, "/proc/%d/status", getpid()); + char *p_vmrss = "VmRSS:"; + uint64_t vmrss = 0; + + if (_spal_uint64_t_file_get(f_proc_status, p_vmrss, &vmrss)) { + slapi_log_err(SLAPI_LOG_ERR, "spal_meminfo_get", "Unable to retrieve vmrss\n"); + } + + /* vmrss is in kb, so convert to bytes */ + vmrss = vmrss * 1024; + + uint64_t rl_mem_soft = 0; + uint64_t rl_mem_hard = 0; + uint64_t rl_mem_soft_avail = 0; + + if (_spal_rlimit_get(RLIMIT_AS, &rl_mem_soft, &rl_mem_hard)) { + slapi_log_err(SLAPI_LOG_ERR, "spal_meminfo_get", "Unable to retrieve memory rlimit\n"); + } + + if (rl_mem_soft != 0 && rl_mem_soft > vmrss) { + rl_mem_soft_avail = rl_mem_soft - vmrss; + } + + char *f_meminfo = "/proc/meminfo"; + char *p_memtotal = "MemTotal:"; + char *p_memavail = "MemAvailable:"; + + uint64_t memtotal = 0; + uint64_t memavail = 0; + + if (_spal_uint64_t_file_get(f_meminfo, p_memtotal, &memtotal)) { + slapi_log_err(SLAPI_LOG_ERR, "spal_meminfo_get", "Unable to retrieve %s : %s\n", f_meminfo, p_memtotal); + } + + if (_spal_uint64_t_file_get(f_meminfo, p_memavail, &memavail)) { + slapi_log_err(SLAPI_LOG_ERR, "spal_meminfo_get", "Unable to retrieve %s : %s\n", f_meminfo, p_memavail); + } + + /* Both memtotal and memavail are in kb */ + memtotal = memtotal * 1024; + memavail = memavail * 1024; + + /* Now, compare the values and make a choice to which is provided */ + + /* Process consumed memory */ + mi->process_consumed_bytes = vmrss; + mi->process_consumed_pages = vmrss / mi->pagesize_bytes; + + /* System Total memory */ + /* If we have a memtotal, OR if no memtotal but rlimit */ + if (rl_mem_hard != 0 && ((memtotal != 0 && rl_mem_hard < memtotal) || memtotal == 0)) { + mi->system_total_bytes = rl_mem_hard; + mi->system_total_pages = rl_mem_hard / mi->pagesize_bytes; + } else if (memtotal != 0) { + mi->system_total_bytes = memtotal; + mi->system_total_pages = memtotal / mi->pagesize_bytes; + } else { + slapi_log_err(SLAPI_LOG_CRIT, "spal_meminfo_get", "Unable to determine system total memory!\n"); + spal_meminfo_destroy(mi); + return NULL; + } + + /* System Available memory */ + + if (rl_mem_soft_avail != 0 && ((memavail != 0 && (rl_mem_soft_avail) < memavail) || memavail == 0)) { + mi->system_available_bytes = rl_mem_soft_avail; + mi->system_available_pages = rl_mem_soft_avail / mi->pagesize_bytes; + } else if (rl_mem_soft != 0 && ((memavail != 0 && (rl_mem_soft) < memavail) || memavail == 0)) { + mi->system_available_bytes = rl_mem_soft; + mi->system_available_pages = rl_mem_soft / mi->pagesize_bytes; + } else if (memavail != 0) { + mi->system_available_bytes = memavail; + mi->system_available_pages = memavail / mi->pagesize_bytes; + } else { + slapi_log_err(SLAPI_LOG_CRIT, "spal_meminfo_get", "Unable to determine system available memory!\n"); + spal_meminfo_destroy(mi); + return NULL; + } + + slapi_log_err(SLAPI_LOG_TRACE, "spal_meminfo_get", "{pagesize_bytes = %"PRIu64", system_total_pages = %"PRIu64", system_total_bytes = %"PRIu64", process_consumed_pages = %"PRIu64", process_consumed_bytes = %"PRIu64", system_available_pages = %"PRIu64", system_available_bytes = %"PRIu64"},\n", + mi->pagesize_bytes, mi->system_total_pages, mi->system_total_bytes, mi->process_consumed_pages, mi->process_consumed_bytes, mi->system_available_pages, mi->system_available_bytes); + + return mi; +} + + +#endif + +#ifdef OS_solaris +uint64_t +_spal_solaris_resident_pages_get() { + uint64_t procpages = 0; + struct prpsinfo psi = {0}; + char fn[40]; + int fd; + + sprintf(fn, "/proc/%d", getpid()); + fd = open(fn, O_RDONLY); + if (fd >= 0) { + if (ioctl(fd, PIOCPSINFO, (void *)&psi) == 0) { + procpages = (uint64_t)psi.pr_size; + } + close(fd); + } + return procpages; +} + +slapi_pal_meminfo * +spal_meminfo_get() { + slapi_pal_meminfo *mi = (slapi_pal_meminfo *)slapi_ch_calloc(1, sizeof(slapi_pal_meminfo)); + + uint64_t rl_mem_soft = 0; + uint64_t rl_mem_hard = 0; + + if (_spal_rlimit_get(RLIMIT_AS, &rl_mem_soft, &rl_mem_hard)) { + slapi_log_err(SLAPI_LOG_ERR, "spal_meminfo_get", "Unable to retrieve memory rlimit\n"); + } + + mi->pagesize_bytes = sysconf(_SC_PAGESIZE); + mi->system_total_pages = sysconf(_SC_PHYS_PAGES); + mi->system_total_bytes = mi->system_total_pages * mi->pagesize_bytes; + mi->system_available_bytes = rl_mem_soft; + if (rl_mem_soft != 0) { + mi->system_available_pages = rl_mem_soft / mi->pagesize_bytes; + } + mi->process_consumed_pages = _spal_solaris_resident_pages_get(); + mi->process_consumed_bytes = mi->process_consumed_pages * mi->pagesize_bytes; + + return mi; + +} +#endif + +#ifdef HPUX +#endif + +void +spal_meminfo_destroy(slapi_pal_meminfo *mi) { + slapi_ch_free((void **)&mi); +} diff --git a/ldap/servers/slapd/slapi_pal.h b/ldap/servers/slapd/slapi_pal.h new file mode 100644 index 000000000..cb61d848a --- /dev/null +++ b/ldap/servers/slapd/slapi_pal.h @@ -0,0 +1,62 @@ +/** BEGIN COPYRIGHT BLOCK + * Copyright (C) 2017 Red Hat, Inc. + * All rights reserved. + * + * License: GPL (version 3 or any later version). + * See LICENSE for details. + * END COPYRIGHT BLOCK **/ + +/* + * Header for the slapi platform abstraction layer. + * + * This implements a number of functions that help to provide vendor + * neutral requests. Candidates for this are memory, thread, disk size + * and other operations. + * + * Basically anywhere you see a "ifdef PLATFORM" is a candidate + * for this. + */ + +#pragma once + +#include <config.h> + +#ifdef HAVE_INTTYPES_H +#include <inttypes.h> +#else +#error Need to define portable format macros such as PRIu64 +#endif /* HAVE_INTTYPES_H */ + +/** + * Structure that contains our system memory information in bytes and pages. + * + */ +typedef struct _slapi_pal_meminfo { + uint64_t pagesize_bytes; + uint64_t system_total_pages; + uint64_t system_total_bytes; + uint64_t process_consumed_pages; + uint64_t process_consumed_bytes; + /* This value may be limited by cgroup or others. */ + uint64_t system_available_pages; + uint64_t system_available_bytes; +} slapi_pal_meminfo; + +/** + * Allocate and returne a populated memory info structure. This will be NULL + * on error, or contain a structure populated with platform information on + * success. You should free this with spal_meminfo_destroy. + * + * \return slapi_pal_meminfo * pointer to structure containing data, or NULL. + */ +slapi_pal_meminfo * spal_meminfo_get(); + +/** + * Destroy an allocated memory info structure. The caller is responsible for + * ensuring this is called. + * + * \param mi the allocated slapi_pal_meminfo structure from spal_meminfo_get(); + */ +void spal_meminfo_destroy(slapi_pal_meminfo *mi); + + diff --git a/ldap/servers/slapd/util.c b/ldap/servers/slapd/util.c index 48fa3c48b..a0753c004 100644 --- a/ldap/servers/slapd/util.c +++ b/ldap/servers/slapd/util.c @@ -40,20 +40,8 @@ #define FILTER_BUF 128 /* initial buffer size for attr value */ #define BUF_INCR 16 /* the amount to increase the FILTER_BUF once it fills up */ -/* Used by our util_info_sys_pages function - * - * platforms supported so far: - * Solaris, Linux, Windows - */ -#ifdef OS_solaris -#include <sys/procfs.h> -#endif -#ifdef LINUX -#include <linux/kernel.h> -#endif -#if defined ( hpux ) -#include <sys/pstat.h> -#endif +/* slapi-private contains the pal. */ +#include <slapi-private.h> static int special_filename(unsigned char c) { @@ -1496,336 +1484,25 @@ static size_t util_getvirtualmemsize(void) return rl.rlim_cur; } -/* pages = number of pages of physical ram on the machine (corrected for 32-bit build on 64-bit machine). - * procpages = pages currently used by this process (or working set size, sometimes) - * availpages = some notion of the number of pages 'free'. Typically this number is not useful. - */ -int util_info_sys_pages(size_t *pagesize, size_t *pages, size_t *procpages, size_t *availpages) +util_cachesize_result +util_is_cachesize_sane(slapi_pal_meminfo *mi, uint64_t *cachesize) { - if ((NULL == pagesize) || (NULL == pages) || (NULL == procpages) || (NULL == availpages)) { - slapi_log_err(SLAPI_LOG_ERR, "util_info_sys_pages", - "Null return variables are passed. Skip getting the system info.\n"); - return 1; - } - *pagesize = 0; - *pages = 0; - *availpages = 0; - *procpages = 0; - -#ifdef OS_solaris - *pagesize = (int)sysconf(_SC_PAGESIZE); - *pages = (int)sysconf(_SC_PHYS_PAGES); - *availpages = util_getvirtualmemsize() / *pagesize; - /* solaris has THE most annoying way to get this info */ - { - struct prpsinfo psi = {0}; - char fn[40]; - int fd; - - sprintf(fn, "/proc/%d", getpid()); - fd = open(fn, O_RDONLY); - if (fd >= 0) { - if (ioctl(fd, PIOCPSINFO, (void *)&psi) == 0) { - *procpages = psi.pr_size; - } - close(fd); - } - } -#endif - -#ifdef LINUX - { - /* - * On linux because of the way that the virtual memory system works, we - * don't really need to think about other processes, or fighting them. - * But that's not without quirks. - * - * We are given a virtual memory space, represented by vsize (man 5 proc) - * This space is a "funny number". It's a best effort based system - * where linux instead of telling us how much memory *actually* exists - * for us to use, gives us a virtual memory allocation which is the - * value of ram + swap.... sometimes. Depends on platform. - * - * But none of these pages even exist or belong to us on the real system - * until will malloc them AND write a non-zero to them. - * - * The biggest issue with this is that vsize does NOT consider the - * effect other processes have on the system. So a process can malloc - * 2 Gig from the host, and our vsize doesn't reflect that until we - * suddenly can't malloc anything. - * - * We can see exactly what we are using inside of the vmm by - * looking at rss (man 5 proc). This shows us the current actual - * allocation of memory we are using. This is a good thing. - * - * We obviously don't want to have any pages in swap, but sometimes we - * can't help that: And there is also no guarantee that while we have - * X bytes in vsize, that we can even allocate any of them. Plus, we - * don't know if we are about to allocate to swap or not .... or get us - * killed in a blaze of oom glory. - * - * So there are now two strategies avaliable in this function. - * The first is to blindly accept what the VMM tells us about vsize - * while we hope and pray that we don't get nailed because we used - * too much. - * - * The other is a more conservative approach: We check vsize from - * proc/pid/status, and we check /proc/meminfo for freemem - * Which ever value is "lower" is the upper bound on pages we could - * potentially allocate: generally, this will be MemAvailable. - */ - - size_t freesize = 0; - size_t rlimsize = 0; - - *pagesize = getpagesize(); - - /* Get the amount of freeram, rss */ - - FILE *f; - char fn[40], s[80]; - - sprintf(fn, "/proc/%d/status", getpid()); - f = fopen(fn, "r"); - if (!f) { /* fopen failed */ - /* We should probably make noise here! */ - int errsrv = errno; - slapi_log_err(SLAPI_LOG_ERR,"util_info_sys_pages", "Unable to open file /proc/%d/status. errno=%u\n", getpid(), errsrv); - return 1; - } - while (! feof(f)) { - if (!fgets(s, 79, f)) { - break; /* error or eof */ - } - if (feof(f)) { - break; - } - /* VmRSS shows us what we are ACTUALLY using for proc pages - * Rather than "funny" pages. - */ - if (strncmp(s, "VmRSS:", 6) == 0) { - sscanf(s+6, "%lu", (long unsigned int *)procpages); - } - } - fclose(f); - - FILE *fm; - char *fmn = "/proc/meminfo"; - fm = fopen(fmn, "r"); - if (!fm) { - int errsrv = errno; - slapi_log_err(SLAPI_LOG_ERR,"util_info_sys_pages", "Unable to open file /proc/meminfo. errno=%u\n", errsrv); - return 1; - } - while (! feof(fm)) { - if (!fgets(s, 79, fm)) { - break; /* error or eof */ - } - if (feof(fm)) { - break; - } - if (strncmp(s, "MemTotal:", 9) == 0) { - sscanf(s+9, "%lu", (long unsigned int *)pages); - } - if (strncmp(s, "MemAvailable:", 13) == 0) { - sscanf(s+13, "%lu", (long unsigned int *)&freesize); - } - } - fclose(fm); - - - *pages /= (*pagesize / 1024); - freesize /= (*pagesize / 1024); - /* procpages is now in kb not pages... */ - *procpages /= (*pagesize / 1024); - - rlimsize = util_getvirtualmemsize(); - /* On a 64 bit system, this is uint64 max, but on 32 it's -1 */ - /* Either way, we should be ignoring it at this point if it's infinite */ - if (rlimsize != RLIM_INFINITY) { - /* This is in bytes, make it pages */ - rlimsize = rlimsize / *pagesize; - } - - /* Pages is the total ram on the system. */ - slapi_log_err(SLAPI_LOG_TRACE,"util_info_sys_pages", "pages=%lu, \n", - (unsigned long) *pages); - slapi_log_err(SLAPI_LOG_TRACE,"util_info_sys_pages", "using pages for pages \n"); - - /* Availpages is how much we *could* alloc. We should take the smallest: - * - pages - * - getrlimit (availpages) - * - freesize - */ - if (rlimsize == RLIM_INFINITY) { - slapi_log_err(SLAPI_LOG_TRACE,"util_info_sys_pages", "pages=%lu, getrlim=RLIM_INFINITY, freesize=%lu\n", - (unsigned long)*pages, (unsigned long)freesize); - } else { - slapi_log_err(SLAPI_LOG_TRACE,"util_info_sys_pages", "pages=%lu, getrlim=%lu, freesize=%lu\n", - (unsigned long)*pages, (unsigned long)*availpages, (unsigned long)freesize); - } - - if (rlimsize != RLIM_INFINITY && rlimsize < freesize && rlimsize < *pages && rlimsize > 0) { - slapi_log_err(SLAPI_LOG_TRACE,"util_info_sys_pages", "using getrlim for availpages \n"); - *availpages = rlimsize; - } else if (freesize < *pages && freesize > 0) { - slapi_log_err(SLAPI_LOG_TRACE,"util_info_sys_pages", "using freesize for availpages \n"); - *availpages = freesize; - } else { - slapi_log_err(SLAPI_LOG_TRACE,"util_info_sys_pages", "using pages for availpages \n"); - *availpages = *pages; - } - + /* Check we have a valid meminfo struct */ + if (mi->system_available_bytes == 0) { + slapi_log_err(SLAPI_LOG_CRIT, "util_is_cachesize_sane", ""); + return UTIL_CACHESIZE_ERROR; } -#endif /* linux */ - - -#if defined ( hpux ) - { - struct pst_static pst; - int rval = pstat_getstatic(&pst, sizeof(pst), (size_t)1, 0); - if (rval < 0) { /* pstat_getstatic failed */ - return 1; - } - *pagesize = pst.page_size; - *pages = pst.physical_memory; - *availpages = util_getvirtualmemsize() / *pagesize; - if (procpages) - { -#define BURST (size_t)32 /* get BURST proc info at one time... */ - struct pst_status psts[BURST]; - int i, count; - int idx = 0; /* index within the context */ - int mypid = getpid(); - - *procpages = 0; - /* loop until count == 0, will occur all have been returned */ - while ((count = pstat_getproc(psts, sizeof(psts[0]), BURST, idx)) > 0) { - /* got count (max of BURST) this time. process them */ - for (i = 0; i < count; i++) { - if (psts[i].pst_pid == mypid) - { - *procpages = (size_t)(psts[i].pst_dsize + psts[i].pst_tsize + psts[i].pst_ssize); - break; - } - } - if (i < count) - break; - - /* - * now go back and do it again, using the next index after - * the current 'burst' - */ - idx = psts[count-1].pst_idx + 1; - } - } - } -#endif - /* If this is a 32-bit build, it might be running on a 64-bit machine, - * in which case, if the box has tons of ram, we can end up telling - * the auto cache code to use more memory than the process can address. - * so we cap the number returned here. - */ -#if defined(__LP64__) || defined (_LP64) -#else - { -#define GIGABYTE (1024*1024*1024) - size_t one_gig_pages = GIGABYTE / *pagesize; - if (*pages > (2 * one_gig_pages) ) { - slapi_log_err(SLAPI_LOG_TRACE,"util_info_sys_pages", - "More than 2Gbytes physical memory detected. Since this is a 32-bit process, truncating memory size used for auto cache calculations to 2Gbytes\n"); - *pages = (2 * one_gig_pages); - } - } -#endif - - /* This is stupid. If you set %u to %zu to print a size_t, you get literal %zu in your logs - * So do the filthy cast instead. - */ - slapi_log_err(SLAPI_LOG_TRACE,"util_info_sys_pages", "USING pages=%lu, procpages=%lu, availpages=%lu \n", - (unsigned long)*pages, (unsigned long)*procpages, (unsigned long)*availpages); - return 0; - -} - -int util_is_cachesize_sane(size_t *cachesize) -{ - size_t pages = 0; - size_t pagesize = 0; - size_t procpages = 0; - size_t availpages = 0; - - size_t cachepages = 0; - - int issane = 1; - - if (util_info_sys_pages(&pagesize, &pages, &procpages, &availpages) != 0) { - goto out; - } -#ifdef LINUX - /* Linux we calculate availpages correctly, so USE IT */ - if (!pagesize || !availpages) { - goto out; - } -#else - if (!pagesize || !pages) { - goto out; - } -#endif - /* do nothing when we can't get the avail mem */ - - - /* If the requested cache size is larger than the remaining physical memory - * after the current working set size for this process has been subtracted, - * then we say that's insane and try to correct. - */ - - cachepages = *cachesize / pagesize; - slapi_log_err(SLAPI_LOG_TRACE,"util_is_cachesize_sane", "cachesize=%lu / pagesize=%lu \n", - (unsigned long)*cachesize,(unsigned long)pagesize); - -#ifdef LINUX - /* Linux we calculate availpages correctly, so USE IT */ - issane = (int)(cachepages <= availpages); - slapi_log_err(SLAPI_LOG_TRACE,"util_is_cachesize_sane", "cachepages=%lu <= availpages=%lu\n", - (unsigned long)cachepages,(unsigned long)availpages); - - if (!issane) { + slapi_log_err(SLAPI_LOG_TRACE, "util_is_cachesize_sane", "Available bytes %"PRIu64", requested bytes %"PRIu64"\n", mi->system_available_bytes, *cachesize); + if (*cachesize > mi->system_available_bytes) { /* Since we are ask for more than what's available, we give 3/4 of the remaining. * the remaining system mem to the cachesize instead, and log a warning */ - *cachesize = (size_t)((availpages * 0.75 ) * pagesize); - /* These are now trace warnings, because it was to confusing to log this *then* kill the request anyway. - * Instead, we will let the caller worry about the notification, and we'll just use this in debugging and tracing. - */ - slapi_log_err(SLAPI_LOG_TRACE, "util_is_cachesize_sane", - "Available pages %lu, requested pages %lu, pagesize %lu\n", (unsigned long)availpages, (unsigned long)cachepages, (unsigned long)pagesize); - slapi_log_err(SLAPI_LOG_TRACE, "util_is_cachesize_sane", - "WARNING adjusted cachesize to %lu\n", (unsigned long)*cachesize); + *cachesize = (mi->system_available_bytes * 0.75); + slapi_log_err(SLAPI_LOG_TRACE, "util_is_cachesize_sane", "Adjusted cachesize to %"PRIu64"\n", *cachesize); + return UTIL_CACHESIZE_REDUCED; } -#else - size_t freepages = 0; - freepages = pages - procpages; - slapi_log_err(SLAPI_LOG_TRACE,"util_is_cachesize_sane", "pages=%lu - procpages=%lu\n", - (unsigned long)pages,(unsigned long)procpages); - - issane = (int)(cachepages <= freepages); - slapi_log_err(SLAPI_LOG_TRACE,"util_is_cachesize_sane", "cachepages=%lu <= freepages=%lu\n", - (unsigned long)cachepages,(unsigned long)freepages); - - if (!issane) { - *cachesize = (size_t)((pages - procpages) * pagesize); - slapi_log_err(SLAPI_LOG_WARNING, "util_is_cachesize_sane", "WARNING adjusted cachesize to %lu\n", - (unsigned long )*cachesize); - } -#endif -out: - if (!issane) { - slapi_log_err(SLAPI_LOG_TRACE,"util_is_cachesize_sane", "WARNING: Cachesize not sane \n"); - } - - return issane; + return UTIL_CACHESIZE_VALID; } long diff --git a/test/libslapd/spal/meminfo.c b/test/libslapd/spal/meminfo.c new file mode 100644 index 000000000..776141a3b --- /dev/null +++ b/test/libslapd/spal/meminfo.c @@ -0,0 +1,54 @@ +/** BEGIN COPYRIGHT BLOCK + * Copyright (C) 2017 Red Hat, Inc. + * All rights reserved. + * + * License: GPL (version 3 or any later version). + * See LICENSE for details. + * END COPYRIGHT BLOCK **/ + +#include "../../test_slapd.h" + +#include <slapi_pal.h> +#include <slapi-private.h> + +/* + * Assert that our meminfo interface in slapi_pal works. + */ + +void +test_libslapd_pal_meminfo(void **state __attribute__((unused))) { + slapi_pal_meminfo *mi = spal_meminfo_get(); + assert_true(mi->pagesize_bytes > 0); + assert_true(mi->system_total_pages > 0); + assert_true(mi->system_total_bytes > 0); + assert_true(mi->process_consumed_pages > 0); + assert_true(mi->process_consumed_bytes > 0); + assert_true(mi->system_available_pages > 0); + assert_true(mi->system_available_bytes > 0); + spal_meminfo_destroy(mi); +} + +void +test_libslapd_util_cachesane(void **state __attribute__((unused))) { + slapi_pal_meminfo *mi = spal_meminfo_get(); + uint64_t request = 0; + mi->system_available_bytes = 0; + assert_true(util_is_cachesize_sane(mi, &request) == UTIL_CACHESIZE_ERROR); + + // Set the values to known quantities + request = 50000; + mi->system_available_bytes = 99999; + assert_true(util_is_cachesize_sane(mi, &request) == UTIL_CACHESIZE_VALID); + + request = 99999; + assert_true(util_is_cachesize_sane(mi, &request) == UTIL_CACHESIZE_VALID); + + request = 100000; + assert_true(util_is_cachesize_sane(mi, &request) == UTIL_CACHESIZE_REDUCED); + assert_true(request <= 75000); + + spal_meminfo_destroy(mi); +} + + + diff --git a/test/libslapd/test.c b/test/libslapd/test.c index 6e1171a2c..6fa7996c9 100644 --- a/test/libslapd/test.c +++ b/test/libslapd/test.c @@ -26,6 +26,8 @@ run_libslapd_tests (void) { cmocka_unit_test(test_libslapd_operation_v3c_target_spec), cmocka_unit_test(test_libslapd_counters_atomic_usage), cmocka_unit_test(test_libslapd_counters_atomic_overflow), + cmocka_unit_test(test_libslapd_pal_meminfo), + cmocka_unit_test(test_libslapd_util_cachesane), }; return cmocka_run_group_tests(tests, NULL, NULL); } diff --git a/test/test_slapd.h b/test/test_slapd.h index b8f1aba42..50de11b7e 100644 --- a/test/test_slapd.h +++ b/test/test_slapd.h @@ -42,3 +42,8 @@ void test_libslapd_operation_v3c_target_spec(void **state); void test_libslapd_counters_atomic_usage(void **state); void test_libslapd_counters_atomic_overflow(void **state); +/* libslapd-pal-meminfo */ + +void test_libslapd_pal_meminfo(void **state); +void test_libslapd_util_cachesane(void **state); +
0
58902a28c077309cf0e942bfb03762d96eb9c8af
389ds/389-ds-base
Bug 693455 - nsMatchingRule does not work with multiple values https://bugzilla.redhat.com/show_bug.cgi?id=693455 Resolves: bug 693455 Bug Description: nsMatchingRule does not work with multiple values Reviewed by: nkinder (Thanks!) Branch: master Fix Description: The variable isFirst was not being used correctly. Instead of using it, just use tmpBuf[0] to see if this is the first iteration or not. Platforms tested: RHEL6 x86_64 Flag Day: no Doc impact: no
commit 58902a28c077309cf0e942bfb03762d96eb9c8af Author: Rich Megginson <[email protected]> Date: Mon Apr 4 11:55:30 2011 -0600 Bug 693455 - nsMatchingRule does not work with multiple values https://bugzilla.redhat.com/show_bug.cgi?id=693455 Resolves: bug 693455 Bug Description: nsMatchingRule does not work with multiple values Reviewed by: nkinder (Thanks!) Branch: master Fix Description: The variable isFirst was not being used correctly. Instead of using it, just use tmpBuf[0] to see if this is the first iteration or not. Platforms tested: RHEL6 x86_64 Flag Day: no Doc impact: no diff --git a/ldap/servers/slapd/back-ldbm/ldbm_index_config.c b/ldap/servers/slapd/back-ldbm/ldbm_index_config.c index 5575ef277..aa47584a8 100644 --- a/ldap/servers/slapd/back-ldbm/ldbm_index_config.c +++ b/ldap/servers/slapd/back-ldbm/ldbm_index_config.c @@ -202,12 +202,11 @@ static int ldbm_index_parse_entry(ldbm_instance *inst, Slapi_Entry *e, tmpBuf[0] = 0; /* Get the list of matching rules from the entry. */ if (0 == slapi_entry_attr_find(e, "nsMatchingRule", &attr)) { - isFirst = 1; for (i = slapi_attr_first_value(attr, &sval); i != -1; i = slapi_attr_next_value(attr, i, &sval)) { attrValue = slapi_value_get_berval(sval); if (NULL != attrValue->bv_val && strlen(attrValue->bv_val) > 0) { - if (isFirst) { + if (0 == tmpBuf[0]) { ZCAT_SAFE(tmpBuf, "", attrValue->bv_val); } else { ZCAT_SAFE(tmpBuf, ",", attrValue->bv_val);
0
f771ff7e6ea97641d1abfc5816e9631b699454b3
389ds/389-ds-base
Issue 51165 - add more logconv stats for the new access log keywords Description: Add "average" stats for wtime, optime, and etime relates: https://pagure.io/389-ds-base/issue/51165 Reviewed by: firstyear(Thanks!)
commit f771ff7e6ea97641d1abfc5816e9631b699454b3 Author: Mark Reynolds <[email protected]> Date: Sun Jun 28 15:46:59 2020 -0400 Issue 51165 - add more logconv stats for the new access log keywords Description: Add "average" stats for wtime, optime, and etime relates: https://pagure.io/389-ds-base/issue/51165 Reviewed by: firstyear(Thanks!) diff --git a/ldap/admin/src/logconv.pl b/ldap/admin/src/logconv.pl index 1ed44a888..d23bfb3e8 100755 --- a/ldap/admin/src/logconv.pl +++ b/ldap/admin/src/logconv.pl @@ -1,5 +1,4 @@ #!/usr/bin/env perl - # # BEGIN COPYRIGHT BLOCK # Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. @@ -249,6 +248,12 @@ my $deleteStat; my $modrdnStat; my $compareStat; my $bindCountStat; +my $totalEtime = 0.0; +my $totalWtime = 0.0; +my $totalOpTime = 0.0; +my $etimeCount = 0; +my $wtimeCount = 0; +my $opTimeCount = 0; my %cipher = (); my @removefiles = (); @@ -807,7 +812,7 @@ if ($totalTimeInNsecs == 0){ # # Continue with standard report # -print "Restarts: $serverRestartCount\n"; +print "Restarts: $serverRestartCount\n"; if(%cipher){ print "Secure Protocol Versions:\n"; @@ -817,42 +822,42 @@ if(%cipher){ print "\n"; } -print "Peak Concurrent Connections: $maxsimConnection\n"; -print "Total Operations: $allOps\n"; -print "Total Results: $allResults\n"; +print "Peak Concurrent Connections: $maxsimConnection\n"; +print "Total Operations: $allOps\n"; +print "Total Results: $allResults\n"; my ($perf, $tmp); if ($allOps ne "0"){ - print sprintf "Overall Performance: %.1f%%\n\n" , ($perf = ($tmp = ($allResults / $allOps)*100) > 100 ? 100.0 : $tmp) ; + print sprintf "Overall Performance: %.1f%%\n\n" , ($perf = ($tmp = ($allResults / $allOps)*100) > 100 ? 100.0 : $tmp) ; } else { - print "Overall Performance: No Operations to evaluate\n\n"; + print "Overall Performance: No Operations to evaluate\n\n"; } format STDOUT = -Total Connections: @<<<<<<<<<<<< @<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< - $connectionCount, $connStat, - - LDAP Connections: @<<<<<<<<<<<< @<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< - ($connectionCount - $sslCount - $ldapiCount), $ldapConnStat - - LDAPI Connections: @<<<<<<<<<<<< @<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< - $ldapiCount, $ldapiConnStat - - LDAPS Connections: @<<<<<<<<<<<< @<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< - $sslCount, $sslConnStat - - StartTLS Extended Ops: @<<<<<<<<<<<< @<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< - $startTLSCount, $tlsConnStat - -Searches: @<<<<<<<<<<<< @<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< - $srchCount, $searchStat, -Modifications: @<<<<<<<<<<<< @<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< - $modCount, $modStat, -Adds: @<<<<<<<<<<<< @<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< - $addCount, $addStat, -Deletes: @<<<<<<<<<<<< @<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< - $delCount, $deleteStat, -Mod RDNs: @<<<<<<<<<<<< @<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< - $modrdnCount, $modrdnStat, -Compares: @<<<<<<<<<<<< @<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< - $cmpCount, $compareStat, -Binds: @<<<<<<<<<<<< @<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< - $bindCount, $bindCountStat, +Total Connections: @<<<<<<<<<<<< @<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< + $connectionCount, $connStat, + - LDAP Connections: @<<<<<<<<<<<< @<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< + ($connectionCount - $sslCount - $ldapiCount), $ldapConnStat + - LDAPI Connections: @<<<<<<<<<<<< @<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< + $ldapiCount, $ldapiConnStat + - LDAPS Connections: @<<<<<<<<<<<< @<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< + $sslCount, $sslConnStat + - StartTLS Extended Ops: @<<<<<<<<<<<< @<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< + $startTLSCount, $tlsConnStat + +Searches: @<<<<<<<<<<<< @<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< + $srchCount, $searchStat, +Modifications: @<<<<<<<<<<<< @<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< + $modCount, $modStat, +Adds: @<<<<<<<<<<<< @<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< + $addCount, $addStat, +Deletes: @<<<<<<<<<<<< @<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< + $delCount, $deleteStat, +Mod RDNs: @<<<<<<<<<<<< @<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< + $modrdnCount, $modrdnStat, +Compares: @<<<<<<<<<<<< @<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< + $cmpCount, $compareStat, +Binds: @<<<<<<<<<<<< @<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< + $bindCount, $bindCountStat, . write STDOUT; @@ -863,23 +868,32 @@ sub dummy { } print "\n"; -print "Proxied Auth Operations: $proxiedAuthCount\n"; -print "Persistent Searches: $persistentSrchCount\n"; -print "Internal Operations: $internalOpCount\n"; -print "Entry Operations: $entryOpCount\n"; -print "Extended Operations: $extopCount\n"; -print "Abandoned Requests: $abandonCount\n"; -print "Smart Referrals Received: $referralCount\n"; +if ($wtimeCount ne "0") { + print sprintf "Average wtime (wait time): %.9f\n", $totalWtime / $wtimeCount; +} +if ($opTimeCount ne "0") { + print sprintf "Average optime (op time): %.9f\n", $totalOpTime / $opTimeCount; +} +print sprintf "Average etime (elapsed time): %.9f\n", $totalEtime / $etimeCount; + +print "\n"; +print "Proxied Auth Operations: $proxiedAuthCount\n"; +print "Persistent Searches: $persistentSrchCount\n"; +print "Internal Operations: $internalOpCount\n"; +print "Entry Operations: $entryOpCount\n"; +print "Extended Operations: $extopCount\n"; +print "Abandoned Requests: $abandonCount\n"; +print "Smart Referrals Received: $referralCount\n"; print "\n"; -print "VLV Operations: $vlvCount\n"; -print "VLV Unindexed Searches: $vlvNotesACount\n"; -print "VLV Unindexed Components: $vlvNotesUCount\n"; -print "SORT Operations: $vlvSortCount\n"; +print "VLV Operations: $vlvCount\n"; +print "VLV Unindexed Searches: $vlvNotesACount\n"; +print "VLV Unindexed Components: $vlvNotesUCount\n"; +print "SORT Operations: $vlvSortCount\n"; print "\n"; -print "Entire Search Base Queries: $objectclassTopCount\n"; -print "Paged Searches: $pagedSearchCount\n"; -print "Unindexed Searches: $unindexedSrchCountNotesA\n"; -print "Unindexed Components: $unindexedSrchCountNotesU\n"; +print "Entire Search Base Queries: $objectclassTopCount\n"; +print "Paged Searches: $pagedSearchCount\n"; +print "Unindexed Searches: $unindexedSrchCountNotesA\n"; +print "Unindexed Components: $unindexedSrchCountNotesU\n"; if ($verb eq "yes" || $usage =~ /u/ || $usage =~ /U/){ if ($unindexedSrchCountNotesA > 0){ @@ -1053,7 +1067,7 @@ if ($verb eq "yes" || $usage =~ /u/ || $usage =~ /U/){ print "\n"; } -print "Invalid Attribute Filters: $invalidFilterCount\n"; +print "Invalid Attribute Filters: $invalidFilterCount\n"; if ($invalidFilterCount > 0 && $verb eq "yes"){ my $conn_hash = $hashes->{conn_hash}; my $notesf_conn_op = $hashes->{notesf_conn_op}; @@ -1104,10 +1118,10 @@ if ($invalidFilterCount > 0 && $verb eq "yes"){ print "\n"; } -print "FDs Taken: $fdTaken\n"; -print "FDs Returned: $fdReturned\n"; -print "Highest FD Taken: $highestFdTaken\n\n"; -print "Broken Pipes: $brokenPipeCount\n"; +print "FDs Taken: $fdTaken\n"; +print "FDs Returned: $fdReturned\n"; +print "Highest FD Taken: $highestFdTaken\n\n"; +print "Broken Pipes: $brokenPipeCount\n"; if ($brokenPipeCount > 0){ my $rc = $hashes->{rc}; my @etext; @@ -1121,7 +1135,7 @@ if ($brokenPipeCount > 0){ print "\n"; } -print "Connections Reset By Peer: $connResetByPeerCount\n"; +print "Connections Reset By Peer: $connResetByPeerCount\n"; if ($connResetByPeerCount > 0){ my $src = $hashes->{src}; my @retext; @@ -1135,7 +1149,7 @@ if ($connResetByPeerCount > 0){ print "\n"; } -print "Resource Unavailable: $resourceUnavailCount\n"; +print "Resource Unavailable: $resourceUnavailCount\n"; if ($resourceUnavailCount > 0){ my $rsrc = $hashes->{rsrc}; my @rtext; @@ -1147,27 +1161,27 @@ if ($resourceUnavailCount > 0){ } print @rtext; } -print "Max BER Size Exceeded: $maxBerSizeCount\n"; +print "Max BER Size Exceeded: $maxBerSizeCount\n"; print "\n"; -print "Binds: $bindCount\n"; -print "Unbinds: $unbindCount\n"; -print "------------------------------"; +print "Binds: $bindCount\n"; +print "Unbinds: $unbindCount\n"; +print "-------------------------------"; print "-" x length $bindCount; print "\n"; -print " - LDAP v2 Binds: $v2BindCount\n"; -print " - LDAP v3 Binds: $v3BindCount\n"; -print " - AUTOBINDs(LDAPI): $autobindCount\n"; -print " - SSL Client Binds: $sslClientBindCount\n"; -print " - Failed SSL Client Binds: $sslClientFailedCount\n"; -print " - SASL Binds: $saslBindCount\n"; +print " - LDAP v2 Binds: $v2BindCount\n"; +print " - LDAP v3 Binds: $v3BindCount\n"; +print " - AUTOBINDs(LDAPI): $autobindCount\n"; +print " - SSL Client Binds: $sslClientBindCount\n"; +print " - Failed SSL Client Binds: $sslClientFailedCount\n"; +print " - SASL Binds: $saslBindCount\n"; if ($saslBindCount > 0){ my $saslmech = $hashes->{saslmech}; foreach my $saslb ( sort {$saslmech->{$b} <=> $saslmech->{$a} } (keys %{$saslmech}) ){ printf " - %-4s: %s\n",$saslb, $saslmech->{$saslb}; } } -print " - Directory Manager Binds: $rootDNBindCount\n"; -print " - Anonymous Binds: $anonymousBindCount\n"; +print " - Directory Manager Binds: $rootDNBindCount\n"; +print " - Anonymous Binds: $anonymousBindCount\n"; ########################################################################## # Verbose Logging Section # @@ -2572,16 +2586,22 @@ sub parseLineNormal } if ($_ =~ /etime= *([0-9.]+)/ ) { my $etime_val = $1; + $totalEtime = $totalEtime + $1; + $etimeCount++; if ($usage =~ /t/i || $verb eq "yes"){ $hashes->{etime}->{$etime_val}++; } if ($reportStats){ inc_stats_val('etime',$etime_val,$s_stats,$m_stats); } } if ($_ =~ /wtime= *([0-9.]+)/ ) { my $wtime_val = $1; + $totalWtime = $totalWtime + $1; + $wtimeCount++; if ($usage =~ /t/i || $verb eq "yes"){ $hashes->{wtime}->{$wtime_val}++; } if ($reportStats){ inc_stats_val('wtime',$wtime_val,$s_stats,$m_stats); } } if ($_ =~ /optime= *([0-9.]+)/ ) { my $optime_val = $1; + $totalOpTime = $totalOpTime + $1; + $opTimeCount++; if ($usage =~ /t/i || $verb eq "yes"){ $hashes->{optime}->{$optime_val}++; } if ($reportStats){ inc_stats_val('optime',$optime_val,$s_stats,$m_stats); } }
0
d7e641286759990ffa6be102f3502587e02c48b9
389ds/389-ds-base
Issue 5671 - covscan - clang warning (#5672) Description: covscan reported CLANG_WARNING relates: https://github.com/389ds/389-ds-base/issues/5671 Reviewed by: @progier389 (Thank you)
commit d7e641286759990ffa6be102f3502587e02c48b9 Author: James Chapman <[email protected]> Date: Tue Feb 21 15:14:08 2023 +0000 Issue 5671 - covscan - clang warning (#5672) Description: covscan reported CLANG_WARNING relates: https://github.com/389ds/389-ds-base/issues/5671 Reviewed by: @progier389 (Thank you) diff --git a/ldap/servers/slapd/log.c b/ldap/servers/slapd/log.c index cef14a2f3..cf7babfb0 100644 --- a/ldap/servers/slapd/log.c +++ b/ldap/servers/slapd/log.c @@ -3973,9 +3973,11 @@ slapi_log_security(Slapi_PBlock *pb, const char *event_type, const char *msg) } else if (external_bind && pb_conn->c_unix_local) { /* LDAPI */ PR_snprintf(method_and_mech, sizeof(method_and_mech), "LDAPI"); - } else if (!strcasecmp(saslmech, "GSSAPI") || !strcasecmp(saslmech, "DIGEST-MD5")) { - /* SASL */ - PR_snprintf(method_and_mech, sizeof(method_and_mech), "SASL/%s", saslmech); + } else if (saslmech) { + if (!strcasecmp(saslmech, "GSSAPI") || !strcasecmp(saslmech, "DIGEST-MD5")) { + /* SASL */ + PR_snprintf(method_and_mech, sizeof(method_and_mech), "SASL/%s", saslmech); + } } break; default:
0
68d53e39a919560741e47668b8a46b59f4ced524
389ds/389-ds-base
Bug 668619 - slapd stops responding https://bugzilla.redhat.com/show_bug.cgi?id=668619 Description: The server uses c_timelimit field in the connection table element to check the staled Simple Paged Results request. The field is supposed to be cleaned up when the staled Simple Paged Results connection is disconnected. But the cleanup was sometimes incomplete. It causes the following requests which happens to acquire the same connection table element timed out. This patch forces to clean up the c_timelimit every time the connection table is acquired and renewed. Also, the timeout check is done only for the Simple Paged Results connection.
commit 68d53e39a919560741e47668b8a46b59f4ced524 Author: Noriko Hosoi <[email protected]> Date: Mon Jan 31 09:26:07 2011 -0800 Bug 668619 - slapd stops responding https://bugzilla.redhat.com/show_bug.cgi?id=668619 Description: The server uses c_timelimit field in the connection table element to check the staled Simple Paged Results request. The field is supposed to be cleaned up when the staled Simple Paged Results connection is disconnected. But the cleanup was sometimes incomplete. It causes the following requests which happens to acquire the same connection table element timed out. This patch forces to clean up the c_timelimit every time the connection table is acquired and renewed. Also, the timeout check is done only for the Simple Paged Results connection. diff --git a/ldap/servers/slapd/connection.c b/ldap/servers/slapd/connection.c index 519546b81..7b00a21c1 100644 --- a/ldap/servers/slapd/connection.c +++ b/ldap/servers/slapd/connection.c @@ -2721,6 +2721,8 @@ disconnect_server_nomutex( Connection *conn, PRUint64 opconnid, int opid, PRErro if ( ( conn->c_sd != SLAPD_INVALID_SOCKET && conn->c_connid == opconnid ) && !(conn->c_flags & CONN_FLAG_CLOSING) ) { + pagedresults_cleanup(conn); /* In case the connection is on pagedresult */ + /* * PR_Close must be called before anything else is done because * of NSPR problem on NT which requires that the socket on which diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c index 0242902eb..667826949 100644 --- a/ldap/servers/slapd/daemon.c +++ b/ldap/servers/slapd/daemon.c @@ -1221,7 +1221,8 @@ setup_pr_read_pds(Connection_Table *ct, PRFileDesc **n_tcps, PRFileDesc **s_tcps c->c_fdi = SLAPD_INVALID_SOCKET_INDEX; } } - if (c->c_timelimit > 0) /* check timeout for PAGED RESULTS */ + /* check timeout for PAGED RESULTS */ + if (c->c_current_be && (c->c_timelimit > 0)) { time_t ctime = current_time(); if (ctime > c->c_timelimit) diff --git a/ldap/servers/slapd/pagedresults.c b/ldap/servers/slapd/pagedresults.c index 47b3490c9..a1b0333e6 100644 --- a/ldap/servers/slapd/pagedresults.c +++ b/ldap/servers/slapd/pagedresults.c @@ -381,9 +381,9 @@ pagedresults_cleanup(Connection *conn) conn->c_search_result_set = NULL; } conn->c_current_be = 0; - conn->c_search_result_count = 0; - conn->c_timelimit = 0; rc = 1; } + conn->c_search_result_count = 0; + conn->c_timelimit = 0; return rc; }
0
ed89524bd7499419ebd31531d6a91c0284f60f81
389ds/389-ds-base
Ticket #47448 - Segfault in 389-ds-base-1.3.1.4-1.fc19 when setting up FreeIPA replication https://fedorahosted.org/389/ticket/47448 Reviewed by: lkrispenz (Thanks!) Branch: master Fix Description: valueset_add_valueset() sets the values in the vs1 destination valueset. It expects that vs1 is empty. Particularly, the sorted array. If the source valueset vs2->sorted is NULL, it assumes vs1->sorted is NULL already, and does not free it and set it to NULL. The fix is to free both vs1->sorted and vs1->va. NOTE: this fixes the crash, but does not address the larger issue that the semantics of valueset_add_valueset are not correct - valueset_add_valueset should add the values from vs2 to vs1, rather than replace vs1 with vs2. Also added post-condition assertions. Platforms tested: RHEL6 x86_64 Flag Day: no Doc impact: no
commit ed89524bd7499419ebd31531d6a91c0284f60f81 Author: Rich Megginson <[email protected]> Date: Mon Jul 29 12:36:02 2013 -0600 Ticket #47448 - Segfault in 389-ds-base-1.3.1.4-1.fc19 when setting up FreeIPA replication https://fedorahosted.org/389/ticket/47448 Reviewed by: lkrispenz (Thanks!) Branch: master Fix Description: valueset_add_valueset() sets the values in the vs1 destination valueset. It expects that vs1 is empty. Particularly, the sorted array. If the source valueset vs2->sorted is NULL, it assumes vs1->sorted is NULL already, and does not free it and set it to NULL. The fix is to free both vs1->sorted and vs1->va. NOTE: this fixes the crash, but does not address the larger issue that the semantics of valueset_add_valueset are not correct - valueset_add_valueset should add the values from vs2 to vs1, rather than replace vs1 with vs2. Also added post-condition assertions. Platforms tested: RHEL6 x86_64 Flag Day: no Doc impact: no diff --git a/ldap/servers/slapd/valueset.c b/ldap/servers/slapd/valueset.c index 416a838f8..de576ec58 100644 --- a/ldap/servers/slapd/valueset.c +++ b/ldap/servers/slapd/valueset.c @@ -605,6 +605,7 @@ slapi_valueset_done(Slapi_ValueSet *vs) { if(vs!=NULL) { + PR_ASSERT((vs->sorted == NULL) || (vs->num == 0) || ((vs->sorted[0] >= 0) && (vs->sorted[0] < vs->num))); if(vs->va!=NULL) { valuearray_free(&vs->va); @@ -636,6 +637,7 @@ slapi_valueset_set_from_smod(Slapi_ValueSet *vs, Slapi_Mod *smod) Slapi_Value **va= NULL; valuearray_init_bervalarray(slapi_mod_get_ldapmod_byref(smod)->mod_bvalues, &va); valueset_set_valuearray_passin(vs, va); + PR_ASSERT((vs->sorted == NULL) || (vs->num == 0) || ((vs->sorted[0] >= 0) && (vs->sorted[0] < vs->num))); } void @@ -656,7 +658,7 @@ valueset_set_valuearray_byval(Slapi_ValueSet *vs, Slapi_Value **addvals) } } vs->va[j] = NULL; - + PR_ASSERT((vs->sorted == NULL) || (vs->num == 0) || ((vs->sorted[0] >= 0) && (vs->sorted[0] < vs->num))); } void @@ -666,6 +668,7 @@ valueset_set_valuearray_passin(Slapi_ValueSet *vs, Slapi_Value **addvals) vs->va= addvals; vs->num = valuearray_count(addvals); vs->max = vs->num + 1; + PR_ASSERT((vs->sorted == NULL) || (vs->num == 0) || ((vs->sorted[0] >= 0) && (vs->sorted[0] < vs->num))); } void @@ -765,24 +768,27 @@ valueset_remove_value_sorted(const Slapi_Attr *a, Slapi_ValueSet *vs, const Slap for (i=0; i < vs->num; i++) { if (vs->sorted[i] > index) vs->sorted[i]--; } + PR_ASSERT((vs->sorted == NULL) || (vs->num == 0) || ((vs->sorted[0] >= 0) && (vs->sorted[0] < vs->num))); } return r; } + Slapi_Value * valueset_remove_value(const Slapi_Attr *a, Slapi_ValueSet *vs, const Slapi_Value *v) { + Slapi_Value *r = NULL; if (vs->sorted) { - return (valueset_remove_value_sorted(a, vs, v)); + r = valueset_remove_value_sorted(a, vs, v); } else { - Slapi_Value *r= NULL; if(!valuearray_isempty(vs->va)) { r= valuearray_remove_value(a, vs->va, v); if (r) vs->num--; } - return r; } + PR_ASSERT((vs->sorted == NULL) || (vs->num == 0) || ((vs->sorted[0] >= 0) && (vs->sorted[0] < vs->num))); + return r; } /* @@ -809,6 +815,7 @@ valueset_purge(Slapi_ValueSet *vs, const CSN *csn) slapi_ch_free ((void **)&vs->sorted); vs->sorted = NULL; } + PR_ASSERT((vs->sorted == NULL) || (vs->num == 0) || ((vs->sorted[0] >= 0) && (vs->sorted[0] < vs->num))); } return 0; } @@ -999,6 +1006,7 @@ valueset_array_to_sorted (const Slapi_Attr *a, Slapi_ValueSet *vs) } vs->sorted[j+1] = swap; } + PR_ASSERT((vs->sorted == NULL) || (vs->num == 0) || ((vs->sorted[0] >= 0) && (vs->sorted[0] < vs->num))); } /* insert a value into a sorted array, if dupcheck is set no duplicate values will be accepted * (is there a reason to allow duplicates ? LK @@ -1014,10 +1022,12 @@ valueset_insert_value_to_sorted(const Slapi_Attr *a, Slapi_ValueSet *vs, Slapi_V if (vs->num == 0) { vs->sorted[0] = 0; vs->num++; + PR_ASSERT((vs->sorted == NULL) || (vs->num == 0) || ((vs->sorted[0] >= 0) && (vs->sorted[0] < vs->num))); return(0); } else if (valueset_value_cmp (a, vi, vs->va[vs->sorted[vs->num-1]]) > 0 ) { vs->sorted[vs->num] = vs->num; vs->num++; + PR_ASSERT((vs->sorted == NULL) || (vs->num == 0) || ((vs->sorted[0] >= 0) && (vs->sorted[0] < vs->num))); return (vs->num); } v = valueset_find_sorted (a, vs, vi, &index); @@ -1028,6 +1038,7 @@ valueset_insert_value_to_sorted(const Slapi_Attr *a, Slapi_ValueSet *vs, Slapi_V memmove(&vs->sorted[index+1],&vs->sorted[index],(vs->num - index)* sizeof(int)); vs->sorted[index] = vs->num; vs->num++; + PR_ASSERT((vs->sorted == NULL) || (vs->num == 0) || ((vs->sorted[0] >= 0) && (vs->sorted[0] < vs->num))); return(index); } @@ -1116,6 +1127,7 @@ slapi_valueset_add_attr_valuearray_ext(const Slapi_Attr *a, Slapi_ValueSet *vs, } (vs->va)[vs->num] = NULL; + PR_ASSERT((vs->sorted == NULL) || (vs->num == 0) || ((vs->sorted[0] >= 0) && (vs->sorted[0] < vs->num))); return (rc); } @@ -1153,6 +1165,8 @@ valueset_add_valueset(Slapi_ValueSet *vs1, const Slapi_ValueSet *vs2) int i; if (vs1 && vs2) { + valuearray_free(&vs1->va); + slapi_ch_free((void **)&vs1->sorted); if (vs2->va) { /* need to copy valuearray */ if (vs2->max == 0) { @@ -1173,6 +1187,7 @@ valueset_add_valueset(Slapi_ValueSet *vs1, const Slapi_ValueSet *vs2) vs1->sorted = (int *) slapi_ch_malloc( vs1->max* sizeof(int)); memcpy(&vs1->sorted[0],&vs2->sorted[0],vs1->num* sizeof(int)); } + PR_ASSERT((vs1->sorted == NULL) || (vs1->num == 0) || ((vs1->sorted[0] >= 0) && (vs1->sorted[0] < vs1->num))); } } @@ -1327,6 +1342,7 @@ valueset_replace_valuearray_ext(Slapi_Attr *a, Slapi_ValueSet *vs, Slapi_Value * vs->va = valstoreplace; vs->num = vals_count; vs->max = vals_count + 1; + PR_ASSERT((vs->sorted == NULL) || (vs->num == 0) || ((vs->sorted[0] >= 0) && (vs->sorted[0] < vs->num))); } else { /* verify the given values are not duplicated. */ Slapi_ValueSet *vs_new = slapi_valueset_new(); @@ -1347,6 +1363,7 @@ valueset_replace_valuearray_ext(Slapi_Attr *a, Slapi_ValueSet *vs, Slapi_Value * vs->num = vs_new->num; vs->max = vs_new->max; slapi_valueset_free (vs_new); + PR_ASSERT((vs->sorted == NULL) || (vs->num == 0) || ((vs->sorted[0] >= 0) && (vs->sorted[0] < vs->num))); } else { @@ -1354,6 +1371,7 @@ valueset_replace_valuearray_ext(Slapi_Attr *a, Slapi_ValueSet *vs, Slapi_Value * use them, just delete them */ slapi_valueset_free(vs_new); valuearray_free(&valstoreplace); + PR_ASSERT((vs->sorted == NULL) || (vs->num == 0) || ((vs->sorted[0] >= 0) && (vs->sorted[0] < vs->num))); } } return rc;
0
4934b57afbe68cb7a2d792608ef37b34adc2308b
389ds/389-ds-base
Ticket 50431 - Fix covscan warnings Description: Most coverity errors happen when something fails. https://pagure.io/389-ds-base/issue/50431 Reviewed by: firstyear & spichugi(Thanks!)
commit 4934b57afbe68cb7a2d792608ef37b34adc2308b Author: Mark Reynolds <[email protected]> Date: Wed Jun 5 14:09:52 2019 -0400 Ticket 50431 - Fix covscan warnings Description: Most coverity errors happen when something fails. https://pagure.io/389-ds-base/issue/50431 Reviewed by: firstyear & spichugi(Thanks!) diff --git a/autogen.sh b/autogen.sh index 8bb628b25..06a5facd1 100755 --- a/autogen.sh +++ b/autogen.sh @@ -35,12 +35,16 @@ checkvers() { vers="$1"; shift needmaj="$1"; shift needmin="$1"; shift - needrev="$1"; shift + if [ "$#" != "0" ]; then + needrev="$1"; shift + fi verslist=`echo $vers | tr '.' ' '` set $verslist maj=$1; shift min=$1; shift - rev=$1; shift + if [ "$#" != "0" ]; then + rev=$1; shift + fi if [ "$maj" -gt "$needmaj" ] ; then return 0; fi if [ "$maj" -lt "$needmaj" ] ; then return 1; fi # if we got here, maj == needmaj diff --git a/ldap/servers/plugins/acl/acl.c b/ldap/servers/plugins/acl/acl.c index 6d105f4fa..5680de669 100644 --- a/ldap/servers/plugins/acl/acl.c +++ b/ldap/servers/plugins/acl/acl.c @@ -644,7 +644,8 @@ cleanup_and_ret: if (aclpb) aclpb->aclpb_curr_attrEval = NULL; - print_access_control_summary("main", ret_val, clientDn, aclpb, right, + print_access_control_summary("main", ret_val, clientDn, aclpb, + (right ? right : "NULL"), (attr ? attr : "NULL"), n_edn, &decision_reason); TNF_PROBE_0_DEBUG(acl_cleanup_end, "ACL", ""); @@ -2590,12 +2591,9 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a * that applies to the current attribute. * Then the (attribute,value) pair being added/deleted better * match that filter. - * - * */ - Targetattrfilter **attrFilterArray = NULL; - Targetattrfilter *attrFilter; + Targetattrfilter *attrFilter = NULL; int found = 0; if ((aclpb->aclpb_access & ACLPB_SLAPI_ACL_WRITE_ADD) && @@ -2606,15 +2604,13 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a attrFilterArray = aci->targetAttrDelFilters; } - /* * Scan this filter list for an applicable filter. */ - found = 0; num_attrs = 0; - while (attrFilterArray[num_attrs] && !found) { + while (attrFilterArray && attrFilterArray[num_attrs] && !found) { attrFilter = attrFilterArray[num_attrs]; /* If this filter applies to the attribute, stop. */ @@ -2630,8 +2626,7 @@ acl__resource_match_aci(Acl_PBlock *aclpb, aci_t *aci, int skip_attrEval, int *a * Here, if found an applicable filter, then apply the filter to the * (attr,val) pair. * Otherwise, ignore the targetattrfilters. - */ - + */ if (found) { if (acl__make_filter_test_entry(&aclpb->aclpb_filter_test_entry, diff --git a/ldap/servers/plugins/acl/acleffectiverights.c b/ldap/servers/plugins/acl/acleffectiverights.c index 8a0cb9122..5dd46a064 100644 --- a/ldap/servers/plugins/acl/acleffectiverights.c +++ b/ldap/servers/plugins/acl/acleffectiverights.c @@ -869,14 +869,14 @@ _ger_generate_template_entry( if (dntype) { siz += strlen(dntype) + 30 + strlen(object) + strlen(dn); } else { - siz += strlen(attrs[0]) + 30 + strlen(object) + strlen(dn); + siz += strlen(attrs[0] ? attrs[0] : "") + 30 + strlen(object) + strlen(dn); } } else { /* dn: <attr>=<template_name>\n\0 */ if (dntype) { siz += strlen(dntype) + 30 + strlen(object); } else { - siz += strlen(attrs[0]) + 30 + strlen(object); + siz += strlen(attrs[0] ? attrs[0] : "") + 30 + strlen(object); } } templateentry = (char *)slapi_ch_malloc(siz); @@ -1030,7 +1030,9 @@ bailout: * slapi_pblock_set() will free any previous data, and * pblock_done() will free SLAPI_PB_RESULT_TEXT. */ - slapi_pblock_set(pb, SLAPI_PB_RESULT_TEXT, gerstr); + if (gerstr) { + slapi_pblock_set(pb, SLAPI_PB_RESULT_TEXT, gerstr); + } if (!iscritical) { /* @@ -1040,7 +1042,7 @@ bailout: rc = LDAP_SUCCESS; } - slapi_ch_free((void **)&subjectndn); - slapi_ch_free((void **)&gerstr); + slapi_ch_free_string(&subjectndn); + slapi_ch_free_string(&gerstr); return rc; } diff --git a/ldap/servers/plugins/acl/acllist.c b/ldap/servers/plugins/acl/acllist.c index 79786b723..e80c567c3 100644 --- a/ldap/servers/plugins/acl/acllist.c +++ b/ldap/servers/plugins/acl/acllist.c @@ -255,7 +255,9 @@ __acllist_add_aci(aci_t *aci) t_aci = t_aci->aci_next; /* Now add the new one to the end of the list */ - t_aci->aci_next = aci; + if (t_aci) { + t_aci->aci_next = aci; + } slapi_log_err(SLAPI_LOG_ACL, plugin_name, "__acllist_add_aci - Added the ACL:%s to existing container:[%d]%s\n", aci->aclName, head->acic_index, slapi_sdn_get_ndn(head->acic_sdn)); diff --git a/ldap/servers/plugins/linkedattrs/linked_attrs.c b/ldap/servers/plugins/linkedattrs/linked_attrs.c index f6eee1957..4f9fb102b 100644 --- a/ldap/servers/plugins/linkedattrs/linked_attrs.c +++ b/ldap/servers/plugins/linkedattrs/linked_attrs.c @@ -1256,7 +1256,9 @@ linked_attrs_del_backpointers(Slapi_PBlock *pb, char *linkdn, struct configEntry slapi_pblock_get(pb, SLAPI_ENTRY_PRE_OP, &pre_e); slapi_entry_attr_find(pre_e, config->linktype, &pre_attr); - slapi_attr_get_valueset(pre_attr, &vals); + if (pre_attr) { + slapi_attr_get_valueset(pre_attr, &vals); + } } else { vals = slapi_valueset_new(); slapi_valueset_set_from_smod(vals, smod); diff --git a/ldap/servers/plugins/memberof/memberof_config.c b/ldap/servers/plugins/memberof/memberof_config.c index 89fd012e7..124217ee0 100644 --- a/ldap/servers/plugins/memberof/memberof_config.c +++ b/ldap/servers/plugins/memberof/memberof_config.c @@ -550,13 +550,17 @@ memberof_apply_config(Slapi_PBlock *pb __attribute__((unused)), } /* Build the new list */ - for (i = 0; theConfig.group_slapiattrs && theConfig.groupattrs && theConfig.groupattrs[i]; i++) { + for (i = 0; theConfig.group_slapiattrs && theConfig.group_slapiattrs[i] && + theConfig.groupattrs && theConfig.groupattrs[i]; i++) + { theConfig.group_slapiattrs[i] = slapi_attr_new(); slapi_attr_init(theConfig.group_slapiattrs[i], theConfig.groupattrs[i]); } /* Terminate the list. */ - theConfig.group_slapiattrs[i] = NULL; + if (theConfig.group_slapiattrs) { + theConfig.group_slapiattrs[i] = NULL; + } /* The filter is based off of the groupattr, so we update it here too. */ slapi_filter_free(theConfig.group_filter, 1); @@ -736,7 +740,9 @@ memberof_copy_config(MemberOfConfig *dest, MemberOfConfig *src) } /* Terminate the array. */ - dest->group_slapiattrs[i] = NULL; + if (dest->group_slapiattrs) { + dest->group_slapiattrs[i] = NULL; + } } if (src->memberof_attr) { diff --git a/ldap/servers/plugins/posix-winsync/posix-winsync.c b/ldap/servers/plugins/posix-winsync/posix-winsync.c index 8e5ca4fff..c56e55f49 100644 --- a/ldap/servers/plugins/posix-winsync/posix-winsync.c +++ b/ldap/servers/plugins/posix-winsync/posix-winsync.c @@ -1114,7 +1114,7 @@ posix_winsync_pre_ds_mod_group_cb(void *cbdata __attribute__((unused)), slapi_value_init_string(voc, "posixGroup"); slapi_entry_attr_find(ds_entry, "objectClass", &oc_attr); - if (slapi_attr_value_find(oc_attr, slapi_value_get_berval(voc)) != 0) { + if (oc_attr && slapi_attr_value_find(oc_attr, slapi_value_get_berval(voc)) != 0) { Slapi_ValueSet *oc_vs = NULL; Slapi_Value *oc_nv = slapi_value_new(); diff --git a/ldap/servers/plugins/replication/cl5_api.c b/ldap/servers/plugins/replication/cl5_api.c index c035db290..6b5b28b0b 100644 --- a/ldap/servers/plugins/replication/cl5_api.c +++ b/ldap/servers/plugins/replication/cl5_api.c @@ -2749,6 +2749,7 @@ _cl5UpgradeMajor(char *fromVersion, char *toVersion) if (rc != CL5_SUCCESS) { slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name_cl, "_cl5UpgradeMajor - Failed to open the db env\n"); + s_cl5Desc.dbOpenMode = backup; return rc; } s_cl5Desc.dbOpenMode = backup; diff --git a/ldap/servers/plugins/replication/repl5_replica.c b/ldap/servers/plugins/replication/repl5_replica.c index b3d619862..6a5363e43 100644 --- a/ldap/servers/plugins/replication/repl5_replica.c +++ b/ldap/servers/plugins/replication/repl5_replica.c @@ -2317,7 +2317,7 @@ replica_check_for_tasks(time_t when __attribute__((unused)), void *arg) "missing original task flag. Aborting abort task!\n", clean_vals[i]); replica_delete_task_config(e, (char *)type_replicaAbortCleanRUV, orig_val); - goto done; + goto done2; } if (!is_cleaned_rid(rid)) { diff --git a/ldap/servers/plugins/replication/repl5_replica_config.c b/ldap/servers/plugins/replication/repl5_replica_config.c index 02b645f41..7649aa14e 100644 --- a/ldap/servers/plugins/replication/repl5_replica_config.c +++ b/ldap/servers/plugins/replication/repl5_replica_config.c @@ -223,6 +223,7 @@ replica_config_add(Slapi_PBlock *pb __attribute__((unused)), } slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name, "replica_config_add - "MSG_NOREPLICANORMRDN); slapi_rdn_free(&replicardn); + slapi_ch_free_string(&replica_root); *returncode = LDAP_UNWILLING_TO_PERFORM; return SLAPI_DSE_CALLBACK_ERROR; } else { @@ -232,6 +233,7 @@ replica_config_add(Slapi_PBlock *pb __attribute__((unused)), } slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name,"replica_config_add - "MSG_CNREPLICA, nrdn, REPLICA_RDN); slapi_rdn_free(&replicardn); + slapi_ch_free_string(&replica_root); *returncode = LDAP_UNWILLING_TO_PERFORM; return SLAPI_DSE_CALLBACK_ERROR; } @@ -242,6 +244,7 @@ replica_config_add(Slapi_PBlock *pb __attribute__((unused)), strcpy(errortext, MSG_NOREPLICARDN); } slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name, "replica_config_add - "MSG_NOREPLICARDN); + slapi_ch_free_string(&replica_root); *returncode = LDAP_UNWILLING_TO_PERFORM; return SLAPI_DSE_CALLBACK_ERROR; } @@ -287,7 +290,7 @@ done: PR_Unlock(s_configLock); /* slapi_ch_free accepts NULL pointer */ - slapi_ch_free((void **)&replica_root); + slapi_ch_free_string(&replica_root); if (*returncode != LDAP_SUCCESS) { if (mtnode_ext->replica) @@ -2083,7 +2086,6 @@ check_replicas_are_done_cleaning(cleanruv_data *data) while (not_all_cleaned && !is_task_aborted(data->rid) && !slapi_is_shutting_down()) { agmt_obj = agmtlist_get_first_agreement_for_replica(data->replica); if (agmt_obj == NULL) { - not_all_cleaned = 0; break; } while (agmt_obj && !slapi_is_shutting_down()) { @@ -2196,7 +2198,6 @@ check_replicas_are_done_aborting(cleanruv_data *data) while (not_all_aborted && !slapi_is_shutting_down()) { agmt_obj = agmtlist_get_first_agreement_for_replica(data->replica); if (agmt_obj == NULL) { - not_all_aborted = 0; break; } while (agmt_obj && !slapi_is_shutting_down()) { @@ -2803,6 +2804,7 @@ delete_cleaned_rid_config(cleanruv_data *clean_data) cleanruv_log(clean_data->task, clean_data->rid, CLEANALLRUV_ID, SLAPI_LOG_ERR, "delete_cleaned_rid_config - Failed to remove task data from (%s) error (%d), rid (%d)", edn, rc, clean_data->rid); + slapi_ch_array_free(attr_val); goto bail; } } diff --git a/ldap/servers/plugins/replication/urp.c b/ldap/servers/plugins/replication/urp.c index 37fe77379..b134409e4 100644 --- a/ldap/servers/plugins/replication/urp.c +++ b/ldap/servers/plugins/replication/urp.c @@ -1602,6 +1602,7 @@ urp_find_tombstone_for_glue (Slapi_PBlock *pb, char *sessionid, const Slapi_Entr for (int i = 0; entries && (entries[i] != NULL); i++) { char *tombstone_csn_value = slapi_entry_attr_get_charptr(entries[i], "nstombstonecsn"); if (tombstone_csn_value) { + csn_free(&tombstone_csn); tombstone_csn = csn_new_by_string(tombstone_csn_value); slapi_ch_free_string(&tombstone_csn_value); if( csn_compare(tombstone_csn, conflict_csn) > 0 ) { diff --git a/ldap/servers/plugins/roles/roles_cache.c b/ldap/servers/plugins/roles/roles_cache.c index 1e5865af8..4e3b0af54 100644 --- a/ldap/servers/plugins/roles/roles_cache.c +++ b/ldap/servers/plugins/roles/roles_cache.c @@ -1276,6 +1276,8 @@ roles_cache_create_object_from_entry(Slapi_Entry *role_entry, role_object **resu if (rc == 0) { *result = this_role; + } else { + slapi_ch_free((void **)&this_role); } slapi_log_err(SLAPI_LOG_PLUGIN, ROLES_PLUGIN_SUBSYSTEM, diff --git a/ldap/servers/plugins/views/views.c b/ldap/servers/plugins/views/views.c index 6f784f599..5d8464761 100644 --- a/ldap/servers/plugins/views/views.c +++ b/ldap/servers/plugins/views/views.c @@ -783,10 +783,12 @@ views_cache_create_applied_filter(viewEntry *pView) "views_cache_create_applied_filter - View filter [%s] in entry [%s] is not valid\n", buf, current->pDn); } - if (pBuiltFilter && pCurrentFilter) + if (pBuiltFilter && pCurrentFilter) { pBuiltFilter = slapi_filter_join_ex(LDAP_FILTER_AND, pBuiltFilter, pCurrentFilter, 0); - else + } else { + slapi_filter_free(pBuiltFilter, 1); pBuiltFilter = pCurrentFilter; + } slapi_ch_free((void **)&buf); @@ -952,10 +954,12 @@ views_cache_create_descendent_filter(viewEntry *ancestor, PRBool useEntryID) "views_cache_create_descendent_filter - View filter [%s] in entry [%s] is invalid\n", buf, currentChild->pDn); } - if (pOrSubFilter && pCurrentFilter) + if (pOrSubFilter && pCurrentFilter) { pOrSubFilter = slapi_filter_join_ex(LDAP_FILTER_OR, pOrSubFilter, pCurrentFilter, 0); - else + } else { + slapi_filter_free(pOrSubFilter, 1); pOrSubFilter = pCurrentFilter; + } PR_smprintf_free(buf); } @@ -1756,7 +1760,9 @@ view_search_rewrite_callback(Slapi_PBlock *pb) #endif /* make it happen */ - slapi_pblock_set(pb, SLAPI_SEARCH_FILTER, outFilter); + if (outFilter) { + slapi_pblock_set(pb, SLAPI_SEARCH_FILTER, outFilter); + } ret = -2; diff --git a/ldap/servers/slapd/agtmmap.c b/ldap/servers/slapd/agtmmap.c index 352ccefda..bc5fe1ee1 100644 --- a/ldap/servers/slapd/agtmmap.c +++ b/ldap/servers/slapd/agtmmap.c @@ -243,7 +243,7 @@ agt_mread_stats(int hdl, struct hdr_stats_t *pHdrInfo, struct ops_stats_t *pDsOp return (EINVAL); /* Inavlid handle */ } - if (mmap_tbl[hdl].fp <= (caddr_t)0) { + if (mmap_tbl[hdl].fp <= 0) { return (EFAULT); /* Something got corrupted */ } diff --git a/ldap/servers/slapd/back-ldbm/dblayer.c b/ldap/servers/slapd/back-ldbm/dblayer.c index f813447b6..1fee8050a 100644 --- a/ldap/servers/slapd/back-ldbm/dblayer.c +++ b/ldap/servers/slapd/back-ldbm/dblayer.c @@ -3009,7 +3009,7 @@ dblayer_erase_index_file_ex(backend *be, struct attrinfo *a, PRBool use_lock, in ldbm_instance *inst = NULL; dblayer_handle *handle = NULL; char dbName[MAXPATHLEN] = {0}; - char *dbNamep; + char *dbNamep = NULL; char *p; int dbbasenamelen, dbnamelen; int rc = 0; @@ -3102,11 +3102,12 @@ dblayer_erase_index_file_ex(backend *be, struct attrinfo *a, PRBool use_lock, in sprintf(p, "%c%s%s", get_sep(dbNamep), a->ai_type, LDBM_FILENAME_SUFFIX); rc = dblayer_db_remove_ex(pEnv, dbNamep, 0, 0); a->ai_dblayer = NULL; - if (dbNamep != dbName) - slapi_ch_free_string(&dbNamep); } else { rc = -1; } + if (dbNamep != dbName) { + slapi_ch_free_string(&dbNamep); + } slapi_ch_free((void **)&handle); } else { /* no handle to close */ @@ -5661,7 +5662,9 @@ dblayer_copy_directory(struct ldbminfo *li, inst_dir, MAXPATHLEN); if (!inst_dirp || !*inst_dirp) { slapi_log_err(SLAPI_LOG_ERR, "dblayer_copy_directory", "Instance dir is NULL.\n"); - slapi_ch_free_string(&inst_dirp); + if (inst_dirp != inst_dir) { + slapi_ch_free_string(&inst_dirp); + } return return_value; } len = strlen(inst_dirp); @@ -5975,7 +5978,9 @@ dblayer_backup(struct ldbminfo *li, char *dest_dir, Slapi_Task *task) slapi_task_log_notice(task, "Backup: Instance dir is empty\n"); } - slapi_ch_free_string(&inst_dirp); + if (inst_dirp != inst_dir) { + slapi_ch_free_string(&inst_dirp); + } return_value = -1; goto bail; } @@ -5993,8 +5998,9 @@ dblayer_backup(struct ldbminfo *li, char *dest_dir, Slapi_Task *task) "(%s -> %s): err=%d\n", inst_dirp, dest_dir, return_value); } - if (inst_dirp != inst_dir) + if (inst_dirp != inst_dir) { slapi_ch_free_string(&inst_dirp); + } goto bail; } if (inst_dirp != inst_dir) @@ -6292,7 +6298,6 @@ dblayer_copy_dirand_contents(char *src_dir, char *dst_dir, int mode, Slapi_Task break; } - PR_snprintf(filename1, MAXPATHLEN, "%s/%s", src_dir, direntry->name); PR_snprintf(filename2, MAXPATHLEN, "%s/%s", dst_dir, direntry->name); slapi_log_err(SLAPI_LOG_ERR, "dblayer_copy_dirand_contents", "Moving file %s\n", @@ -6305,8 +6310,7 @@ dblayer_copy_dirand_contents(char *src_dir, char *dst_dir, int mode, Slapi_Task mode, task); if (return_value) { if (task) { - slapi_task_log_notice(task, - "Failed to copy directory %s", filename1); + slapi_task_log_notice(task, "Failed to copy directory %s", filename1); } break; } @@ -6523,13 +6527,13 @@ dblayer_restore(struct ldbminfo *li, char *src_dir, Slapi_Task *task, char *bena return LDAP_UNWILLING_TO_PERFORM; } if (!dbversion_exists(li, src_dir)) { - slapi_log_err(SLAPI_LOG_ERR, "dblayer_restore", "Backup directory %s does not " - "contain a complete backup\n", + slapi_log_err(SLAPI_LOG_ERR, "dblayer_restore", + "Backup directory %s does not contain a complete backup\n", src_dir); if (task) { - slapi_task_log_notice(task, "Restore: backup directory %s does not " - "contain a complete backup", - src_dir); + slapi_task_log_notice(task, + "Restore: backup directory %s does not contain a complete backup", + src_dir); } return LDAP_UNWILLING_TO_PERFORM; } @@ -6585,13 +6589,10 @@ dblayer_restore(struct ldbminfo *li, char *src_dir, Slapi_Task *task, char *bena } if (slapd_comp_path(src_dir, inst->inst_parent_dir_name) == 0) { slapi_log_err(SLAPI_LOG_ERR, - "dblayer_restore", "Backup dir %s and target dir %s " - "are identical\n", + "dblayer_restore", "Backup dir %s and target dir %s are identical\n", src_dir, inst->inst_parent_dir_name); if (task) { slapi_task_log_notice(task, - "Restore: backup dir %s and target dir %s " - "are identical\n", src_dir, inst->inst_parent_dir_name); } PR_CloseDir(dirhandle); @@ -7060,8 +7061,12 @@ dblayer_get_instance_data_dir(backend *be) full_namep = dblayer_get_full_inst_dir(inst->inst_li, inst, full_name, MAXPATHLEN); if (!full_namep || !*full_namep) { + if (full_namep != full_name) { + slapi_ch_free_string(&full_namep); + } return ret; } + /* Does this directory already exist? */ if ((db_dir = PR_OpenDir(full_namep)) != NULL) { /* yep. */ @@ -7072,8 +7077,9 @@ dblayer_get_instance_data_dir(backend *be) ret = mkdir_p(full_namep, 0700); } - if (full_name != full_namep) + if (full_name != full_namep) { slapi_ch_free_string(&full_namep); + } return ret; } @@ -7097,7 +7103,6 @@ dblayer_in_import(ldbm_instance *inst) inst_dirp = dblayer_get_full_inst_dir(inst->inst_li, inst, inst_dir, MAXPATHLEN); if (!inst_dirp || !*inst_dirp) { - slapi_ch_free_string(&inst_dirp); rval = -1; goto done; } @@ -7117,8 +7122,9 @@ dblayer_in_import(ldbm_instance *inst) } PR_CloseDir(dirhandle); done: - if (inst_dirp != inst_dir) + if (inst_dirp != inst_dir) { slapi_ch_free_string(&inst_dirp); + } return rval; } @@ -7149,7 +7155,9 @@ dblayer_update_db_ext(ldbm_instance *inst, char *oldext, char *newext) if (NULL == inst_dirp || '\0' == *inst_dirp) { slapi_log_err(SLAPI_LOG_ERR, "dblayer_update_db_ext", "Instance dir is NULL\n"); - slapi_ch_free_string(&inst_dirp); + if (inst_dirp != inst_dir) { + slapi_ch_free_string(&inst_dirp); + } return -1; /* non zero */ } for (a = (struct attrinfo *)avl_getfirst(inst->inst_attrs); @@ -7210,8 +7218,9 @@ dblayer_update_db_ext(ldbm_instance *inst, char *oldext, char *newext) done: slapi_ch_free_string(&ofile); slapi_ch_free_string(&nfile); - if (inst_dirp != inst_dir) + if (inst_dirp != inst_dir) { slapi_ch_free_string(&inst_dirp); + } return rval; } diff --git a/ldap/servers/slapd/back-ldbm/index.c b/ldap/servers/slapd/back-ldbm/index.c index f0b969ff4..2a7529b81 100644 --- a/ldap/servers/slapd/back-ldbm/index.c +++ b/ldap/servers/slapd/back-ldbm/index.c @@ -1776,13 +1776,14 @@ index_range_read_ext( } #endif error: + slapi_log_err(SLAPI_LOG_TRACE, "index_range_read_ext", "(%s,%s) %lu candidates\n", + type, prefix ? prefix : "", (u_long)IDL_NIDS(idl)); + index_free_prefix(prefix); DBT_FREE_PAYLOAD(cur_key); DBT_FREE_PAYLOAD(upperkey); - dblayer_release_index_file(be, ai, db); - slapi_log_err(SLAPI_LOG_TRACE, "index_range_read_ext", "(%s,%s) %lu candidates\n", - type, prefix, (u_long)IDL_NIDS(idl)); + return (idl); } @@ -2406,7 +2407,7 @@ index_free_prefix(char *prefix) prefix == prefix_SUB) { /* do nothing */ } else { - slapi_ch_free((void **)&prefix); + slapi_ch_free_string(&prefix); } } diff --git a/ldap/servers/slapd/back-ldbm/ldbm_add.c b/ldap/servers/slapd/back-ldbm/ldbm_add.c index 0d82ae92b..a2585575f 100644 --- a/ldap/servers/slapd/back-ldbm/ldbm_add.c +++ b/ldap/servers/slapd/back-ldbm/ldbm_add.c @@ -162,7 +162,8 @@ ldbm_back_add(Slapi_PBlock *pb) txn.back_txn_txn = parent_txn; } else { parent_txn = txn.back_txn_txn; - slapi_pblock_set(pb, SLAPI_TXN, parent_txn); + if (parent_txn) + slapi_pblock_set(pb, SLAPI_TXN, parent_txn); } /* The dblock serializes writes to the database, diff --git a/ldap/servers/slapd/back-ldbm/ldbm_attrcrypt_config.c b/ldap/servers/slapd/back-ldbm/ldbm_attrcrypt_config.c index 9ecb09903..fd2c7dbc8 100644 --- a/ldap/servers/slapd/back-ldbm/ldbm_attrcrypt_config.c +++ b/ldap/servers/slapd/back-ldbm/ldbm_attrcrypt_config.c @@ -142,7 +142,7 @@ ldbm_instance_attrcrypt_config_add_callback(Slapi_PBlock *pb __attribute__((unus /* If the cipher was invalid, return unwilling to perform */ if (0 == cipher) { - returntext = "invalid cipher"; + PR_snprintf(returntext, SLAPI_DSE_RETURNTEXT_SIZE, "invalid cipher"); *returncode = LDAP_UNWILLING_TO_PERFORM; ret = SLAPI_DSE_CALLBACK_ERROR; } else { @@ -167,7 +167,7 @@ ldbm_instance_attrcrypt_config_add_callback(Slapi_PBlock *pb __attribute__((unus slapi_log_err(SLAPI_LOG_ERR, "ldbm_instance_attrcrypt_config_add_callback - " "Attempt to encryption on a non-existent attribute: %s\n", attribute_name, 0, 0); - returntext = "attribute does not exist"; + PR_snprintf(returntext, SLAPI_DSE_RETURNTEXT_SIZE, "attribute does not exist"); *returncode = LDAP_UNWILLING_TO_PERFORM; ret = SLAPI_DSE_CALLBACK_ERROR; } diff --git a/ldap/servers/slapd/back-ldbm/ldbm_delete.c b/ldap/servers/slapd/back-ldbm/ldbm_delete.c index e9f3e32cc..76e37ae14 100644 --- a/ldap/servers/slapd/back-ldbm/ldbm_delete.c +++ b/ldap/servers/slapd/back-ldbm/ldbm_delete.c @@ -113,7 +113,8 @@ ldbm_back_delete(Slapi_PBlock *pb) txn.back_txn_txn = parent_txn; } else { parent_txn = txn.back_txn_txn; - slapi_pblock_set(pb, SLAPI_TXN, parent_txn); + if (parent_txn) + slapi_pblock_set(pb, SLAPI_TXN, parent_txn); } if (pb_conn) { diff --git a/ldap/servers/slapd/back-ldbm/ldif2ldbm.c b/ldap/servers/slapd/back-ldbm/ldif2ldbm.c index 60437f116..fb0fc5d1e 100644 --- a/ldap/servers/slapd/back-ldbm/ldif2ldbm.c +++ b/ldap/servers/slapd/back-ldbm/ldif2ldbm.c @@ -1154,7 +1154,7 @@ ldbm_back_ldbm2ldif(Slapi_PBlock *pb) int task_flags; Slapi_Task *task; int run_from_cmdline = 0; - char *instance_name; + char *instance_name = NULL; ldbm_instance *inst = NULL; int str2entry_options = 0; int retry; @@ -1203,11 +1203,11 @@ ldbm_back_ldbm2ldif(Slapi_PBlock *pb) goto bye; } + slapi_pblock_get(pb, SLAPI_BACKEND_INSTANCE_NAME, &instance_name); if (run_from_cmdline) { /* Now that we have processed the config information, we look for * the be that should do the db2ldif. */ - slapi_pblock_get(pb, SLAPI_BACKEND_INSTANCE_NAME, &instance_name); inst = ldbm_instance_find_by_name(li, instance_name); if (NULL == inst) { slapi_task_log_notice(task, "Unknown backend instance: %s", instance_name); diff --git a/ldap/servers/slapd/back-ldbm/vlv_srch.c b/ldap/servers/slapd/back-ldbm/vlv_srch.c index 368417483..1ac3e009e 100644 --- a/ldap/servers/slapd/back-ldbm/vlv_srch.c +++ b/ldap/servers/slapd/back-ldbm/vlv_srch.c @@ -168,7 +168,8 @@ vlvSearch_init(struct vlvSearch *p, Slapi_PBlock *pb, const Slapi_Entry *e, ldbm /* switch context back to the DSE backend */ slapi_pblock_set(pb, SLAPI_BACKEND, oldbe); - slapi_pblock_set(pb, SLAPI_PLUGIN, oldbe ? oldbe->be_database: NULL); + if (oldbe) + slapi_pblock_set(pb, SLAPI_PLUGIN, oldbe->be_database); } /* make (&(parentid=idofbase)(|(originalfilter)(objectclass=referral))) */ diff --git a/ldap/servers/slapd/connection.c b/ldap/servers/slapd/connection.c index 1dc53434c..d502dff4b 100644 --- a/ldap/servers/slapd/connection.c +++ b/ldap/servers/slapd/connection.c @@ -1860,6 +1860,7 @@ connection_threadmain() signal_listner(); } } else if (1 == is_timedout) { + /* covscan reports this code is unreachable (2019/6/4) */ connection_make_readable_nolock(conn); signal_listner(); } diff --git a/ldap/servers/slapd/dse.c b/ldap/servers/slapd/dse.c index 1f1f51630..125684329 100644 --- a/ldap/servers/slapd/dse.c +++ b/ldap/servers/slapd/dse.c @@ -2530,7 +2530,8 @@ dse_delete(Slapi_PBlock *pb) /* JCM There should only be one exit point from thi dse_call_callback(pdse, pb, SLAPI_OPERATION_DELETE, DSE_FLAG_POSTOP, ec, NULL, &returncode, returntext); done: slapi_pblock_get(pb, SLAPI_DELETE_BEPOSTOP_ENTRY, &orig_entry); - slapi_pblock_set(pb, SLAPI_DELETE_BEPOSTOP_ENTRY, ec); + if (ec) + slapi_pblock_set(pb, SLAPI_DELETE_BEPOSTOP_ENTRY, ec); /* make sure OPRETURN and RESULT_CODE are set */ slapi_pblock_get(pb, SLAPI_PLUGIN_OPRETURN, &rc); if (returncode || rc) { @@ -2571,8 +2572,8 @@ done: rc = LDAP_UNWILLING_TO_PERFORM; } } - - slapi_pblock_set(pb, SLAPI_DELETE_BEPOSTOP_ENTRY, orig_entry); + if (orig_entry) + slapi_pblock_set(pb, SLAPI_DELETE_BEPOSTOP_ENTRY, orig_entry); slapi_send_ldap_result(pb, returncode, NULL, returntext, 0, NULL); return dse_delete_return(returncode, ec); } diff --git a/ldap/servers/slapd/entrywsi.c b/ldap/servers/slapd/entrywsi.c index 080eb15aa..5d1d7238a 100644 --- a/ldap/servers/slapd/entrywsi.c +++ b/ldap/servers/slapd/entrywsi.c @@ -359,7 +359,6 @@ entry_add_present_attribute_wsi(Slapi_Entry *e, Slapi_Attr *a) * Preserves LDAP Information Model constraints, * returning an LDAP result code. */ -static void entry_dump_stateinfo(char *msg, Slapi_Entry* e); static Slapi_Value *attr_most_recent_deleted_value(Slapi_Attr *a); static void resolve_single_valued_two_values(Slapi_Entry *e, Slapi_Attr *a, int attribute_state, Slapi_Value *current_value, Slapi_Value *second_current_value); static void resolve_single_valued_check_restore_deleted_value(Slapi_Entry *e, Slapi_Attr *a); @@ -397,6 +396,7 @@ entry_add_present_values_wsi(Slapi_Entry *e, const char *type, struct berval **b /* Used for debug purpose, it dumps into the error log the * entry with the replication stateinfo */ +#if 0 static void entry_dump_stateinfo(char *msg, Slapi_Entry* e) { @@ -407,6 +407,7 @@ entry_dump_stateinfo(char *msg, Slapi_Entry* e) slapi_log_err(SLAPI_LOG_ERR, msg, "%s\n", s); slapi_ch_free((void **)&s); } +#endif static int entry_add_present_values_wsi_single_valued(Slapi_Entry *e, const char *type, struct berval **bervals, const CSN *csn, int urp, long flags) @@ -1270,7 +1271,7 @@ attr_most_recent_deleted_value(Slapi_Attr *a) most_recent_v = v; while (i != -1) { - vdcsn = value_get_csn(v, CSN_TYPE_VALUE_DELETED); + vdcsn = (CSN *)value_get_csn(v, CSN_TYPE_VALUE_DELETED); if (csn_compare((const CSN *)most_recent_vdcsn, (const CSN *)vdcsn) < 0) { most_recent_v = v; @@ -1289,20 +1290,20 @@ static void resolve_single_valued_two_values(Slapi_Entry *e, Slapi_Attr *a, int attribute_state, Slapi_Value *current_value, Slapi_Value *second_current_value) { - CSN *current_value_vucsn; - CSN *second_current_value_vucsn; + const CSN *current_value_vucsn; + const CSN *second_current_value_vucsn; Slapi_Value *value_to_zap; current_value_vucsn = value_get_csn(current_value, CSN_TYPE_VALUE_UPDATED); second_current_value_vucsn = value_get_csn(second_current_value, CSN_TYPE_VALUE_UPDATED); /* First determine which present value will be zapped */ - if (csn_compare((const CSN *)second_current_value_vucsn, (const CSN *)current_value_vucsn) < 0) { + if (csn_compare(second_current_value_vucsn, current_value_vucsn) < 0) { /* * The second value is older but was distinguished at the time the current value was added * then the second value should become current */ - if (value_distinguished_at_csn(e, a, second_current_value, (const CSN *)current_value_vucsn)) { + if (value_distinguished_at_csn(e, a, second_current_value, current_value_vucsn)) { value_to_zap = current_value; } else { /* The second value being not distinguished, zap it as it is a single valued attribute */ @@ -1311,7 +1312,7 @@ resolve_single_valued_two_values(Slapi_Entry *e, Slapi_Attr *a, int attribute_st } else { /* Here the current_value is older than the second_current_value */ - if (value_distinguished_at_csn(e, a, current_value, (const CSN *)second_current_value_vucsn)) { + if (value_distinguished_at_csn(e, a, current_value, second_current_value_vucsn)) { /* current_value was distinguished at the time the second value was added * then the current_value should become the current */ value_to_zap = second_current_value; @@ -1348,17 +1349,17 @@ resolve_single_valued_check_restore_deleted_value(Slapi_Entry *e, Slapi_Attr *a) /* An attribute needs a present value */ entry_deleted_value_to_present_value(a, deleted_value); } else { - CSN *current_value_vucsn; - CSN *deleted_value_vucsn; - CSN *deleted_value_vdcsn; + const CSN *current_value_vucsn; + const CSN *deleted_value_vucsn; + const CSN *deleted_value_vdcsn; deleted_value_vucsn = value_get_csn(deleted_value, CSN_TYPE_VALUE_UPDATED); deleted_value_vdcsn = value_get_csn(deleted_value, CSN_TYPE_VALUE_DELETED); current_value_vucsn = value_get_csn(current_value, CSN_TYPE_VALUE_UPDATED); if (deleted_value_vucsn && - !value_distinguished_at_csn(e, a, current_value, (const CSN *)deleted_value_vucsn) && - (csn_compare((const CSN *)current_value_vucsn, (const CSN *)deleted_value_vucsn) < 0) && - (csn_compare((const CSN *)deleted_value_vdcsn, (const CSN *)current_value_vucsn) < 0)) { + !value_distinguished_at_csn(e, a, current_value, deleted_value_vucsn) && + (csn_compare((const CSN *)current_value_vucsn, deleted_value_vucsn) < 0) && + (csn_compare((const CSN *)deleted_value_vdcsn, current_value_vucsn) < 0)) { /* the condition to resurrect the deleted value is * - it is more recent than the current value * - its value was deleted before the current value @@ -1376,8 +1377,8 @@ static void resolve_single_valued_zap_current(Slapi_Entry *e, Slapi_Attr *a) { Slapi_Value *current_value = NULL; - CSN *current_value_vucsn; - CSN *adcsn; + const CSN *current_value_vucsn; + const CSN *adcsn; /* check if the current value should be deleted because * older than adcsn and not distinguished @@ -1386,7 +1387,7 @@ resolve_single_valued_zap_current(Slapi_Entry *e, Slapi_Attr *a) current_value_vucsn = value_get_csn(current_value, CSN_TYPE_VALUE_UPDATED); adcsn = attr_get_deletion_csn(a); if (current_value != NULL) { - if (csn_compare((const CSN *)adcsn, (const CSN *) current_value_vucsn) > 0) { + if (csn_compare(adcsn, (const CSN *) current_value_vucsn) > 0) { /* the attribute was deleted after the value was last updated */ if (!value_distinguished_at_csn(e, a, current_value, (const CSN *) current_value_vucsn)) { entry_present_value_to_zapped_value(a, current_value); @@ -1404,17 +1405,17 @@ resolve_single_valued_set_adcsn(Slapi_Attr *a) { Slapi_Value *deleted_value = NULL; Slapi_Value *current_value = NULL; - CSN *current_value_vucsn; - CSN *deleted_value_vucsn; - CSN *adcsn; + const CSN *current_value_vucsn; + const CSN *deleted_value_vucsn; + const CSN *adcsn; slapi_attr_first_value(a, &current_value); current_value_vucsn = value_get_csn(current_value, CSN_TYPE_VALUE_UPDATED); deleted_value = attr_most_recent_deleted_value(a); deleted_value_vucsn = value_get_csn(deleted_value, CSN_TYPE_VALUE_UPDATED); adcsn = attr_get_deletion_csn(a); - if ((deleted_value != NULL && (csn_compare(adcsn, (const CSN *) deleted_value_vucsn) < 0)) || - (deleted_value == NULL && (csn_compare(adcsn, (const CSN *) current_value_vucsn) < 0))) { + if ((deleted_value != NULL && (csn_compare(adcsn, deleted_value_vucsn) < 0)) || + (deleted_value == NULL && (csn_compare(adcsn, current_value_vucsn) < 0))) { attr_set_deletion_csn(a, NULL); } } @@ -1430,10 +1431,10 @@ resolve_single_valued_zap_deleted(Slapi_Attr *a) { Slapi_Value *deleted_value = NULL; Slapi_Value *current_value = NULL; - CSN *current_value_vucsn; - CSN *deleted_value_vucsn; - CSN *deleted_value_vdcsn; - CSN *deleted_value_csn; + const CSN *current_value_vucsn; + const CSN *deleted_value_vucsn; + const CSN *deleted_value_vdcsn; + const CSN *deleted_value_csn; PRBool deleted_on_mod_del = PR_FALSE; /* flag if a value was deleted specifically */ /* Now determine if the deleted value worth to be kept */ @@ -1445,16 +1446,16 @@ resolve_single_valued_zap_deleted(Slapi_Attr *a) deleted_value_vdcsn = value_get_csn(deleted_value, CSN_TYPE_VALUE_DELETED); /* get the appropriate csn to take into consideration: either from MOD_REPL or from MOD_DEL_specific */ - if (csn_compare((const CSN *) deleted_value_vdcsn, (const CSN *) deleted_value_vucsn) <= 0) { + if (csn_compare(deleted_value_vdcsn, deleted_value_vucsn) <= 0) { deleted_value_csn = deleted_value_vucsn; } else { deleted_value_csn = deleted_value_vdcsn; - if (0 == csn_compare_ext((const CSN *) current_value_vucsn, (const CSN *) deleted_value_vdcsn, CSN_COMPARE_SKIP_SUBSEQ)) { + if (0 == csn_compare_ext(current_value_vucsn, deleted_value_vdcsn, CSN_COMPARE_SKIP_SUBSEQ)) { /* the deleted value was specifically delete in the same operation that set the current value */ deleted_on_mod_del = PR_TRUE; } } - if ((csn_compare((const CSN *) deleted_value_csn, (const CSN *) current_value_vucsn) < 0) || deleted_on_mod_del) { + if ((csn_compare(deleted_value_csn, current_value_vucsn) < 0) || deleted_on_mod_del) { entry_deleted_value_to_zapped_value(a, deleted_value); } } diff --git a/ldap/servers/slapd/main.c b/ldap/servers/slapd/main.c index 33ca9ce1d..2c7b53214 100644 --- a/ldap/servers/slapd/main.c +++ b/ldap/servers/slapd/main.c @@ -1220,6 +1220,7 @@ main(int argc, char **argv) vattr_cleanup(); sasl_map_done(); cleanup: + slapi_ch_free_string(&(mcfg.myname)); compute_terminate(); SSL_ShutdownServerSessionIDCache(); SSL_ClearSessionCache(); @@ -2194,7 +2195,6 @@ slapd_exemode_ldif2db(struct main_config *mcfg) return_value = -1; } slapi_pblock_destroy(pb); - slapi_ch_free((void **)&(mcfg->myname)); charray_free(instances); charray_free(mcfg->cmd_line_instance_names); charray_free(mcfg->db2ldif_include); @@ -2377,7 +2377,6 @@ slapd_exemode_db2ldif(int argc, char **argv, struct main_config *mcfg) slapi_ch_free((void **)&my_ldiffile); } } - slapi_ch_free((void **)&(mcfg->myname)); charray_free(mcfg->cmd_line_instance_names); charray_free(mcfg->db2ldif_include); if (mcfg->db2ldif_dump_replica) { @@ -2505,8 +2504,6 @@ slapd_exemode_db2index(struct main_config *mcfg) slapi_pblock_destroy(pb); charray_free(mcfg->db2index_attrs); - slapi_ch_free((void **)&(mcfg->myname)); - charray_free(mcfg->db2ldif_include); /* This frees mcfg->cmd_line_instance_name */ charray_free(instances); @@ -2557,7 +2554,6 @@ slapd_exemode_db2archive(struct main_config *mcfg) int32_t task_flags = SLAPI_TASK_RUNNING_FROM_COMMANDLINE; slapi_pblock_set(pb, SLAPI_TASK_FLAGS, &task_flags); return_value = (backend_plugin->plg_db2archive)(pb); - slapi_ch_free((void **)&(mcfg->myname)); slapi_pblock_destroy(pb); return return_value; } @@ -2605,7 +2601,6 @@ slapd_exemode_archive2db(struct main_config *mcfg) slapi_pblock_set(pb, SLAPI_TASK_FLAGS, &task_flags); slapi_pblock_set(pb, SLAPI_BACKEND_INSTANCE_NAME, mcfg->cmd_line_instance_name); return_value = (backend_plugin->plg_archive2db)(pb); - slapi_ch_free((void **)&(mcfg->myname)); slapi_pblock_destroy(pb); return return_value; } @@ -2674,7 +2669,6 @@ slapd_exemode_upgradedb(struct main_config *mcfg) return_value = -1; } slapi_pblock_destroy(pb); - slapi_ch_free((void **)&(mcfg->myname)); return (return_value); } @@ -2747,7 +2741,6 @@ slapd_exemode_upgradednformat(struct main_config *mcfg) } slapi_pblock_destroy(pb); bail: - slapi_ch_free((void **)&(mcfg->myname)); return (rc); } diff --git a/ldap/servers/slapd/mapping_tree.c b/ldap/servers/slapd/mapping_tree.c index 834949a67..b90424985 100644 --- a/ldap/servers/slapd/mapping_tree.c +++ b/ldap/servers/slapd/mapping_tree.c @@ -539,9 +539,6 @@ free_mapping_tree_node_arrays(backend ***be_list, char ***be_names, int **be_sta { int i; - /* sanity check */ - PR_ASSERT(be_list != NULL && be_names != NULL && be_states != NULL && be_list_count != NULL); - if (*be_names != NULL) for (i = 0; i < *be_list_count; ++i) { slapi_ch_free((void **)&((*be_names)[i])); @@ -647,9 +644,7 @@ mapping_tree_entry_add(Slapi_Entry *entry, mapping_tree_node **newnodep) if (get_backends_from_attr(attr, &be_list, &be_names, &be_states, &be_list_count, &be_list_size, NULL)) { - free_mapping_tree_node_arrays(&be_list, &be_names, &be_states, &be_list_count); - slapi_sdn_free(&subtree); - return lderr; + goto free_and_return; } if (NULL == be_list) { @@ -660,6 +655,7 @@ mapping_tree_entry_add(Slapi_Entry *entry, mapping_tree_node **newnodep) } } else if (!strcasecmp(type, "nsslapd-referral")) { + slapi_ch_array_free(referral); referral = mtn_get_referral_from_entry(entry); } else if (!strcasecmp(type, "nsslapd-state")) { @@ -684,6 +680,7 @@ mapping_tree_entry_add(Slapi_Entry *entry, mapping_tree_node **newnodep) slapi_entry_get_dn(entry)); continue; } + slapi_ch_free_string(&plugin_lib); plugin_lib = slapi_ch_strdup(slapi_value_get_string(val)); } else if (!strcasecmp(type, "nsslapd-distribution-funct")) { slapi_attr_first_value(attr, &val); @@ -693,6 +690,7 @@ mapping_tree_entry_add(Slapi_Entry *entry, mapping_tree_node **newnodep) slapi_entry_get_dn(entry)); continue; } + slapi_ch_free_string(&plugin_funct); plugin_funct = slapi_ch_strdup(slapi_value_get_string(val)); } else if (!strcasecmp(type, "nsslapd-distribution-root-update")) { const char *sval; @@ -737,13 +735,16 @@ mapping_tree_entry_add(Slapi_Entry *entry, mapping_tree_node **newnodep) if (be == NULL) { slapi_log_err(SLAPI_LOG_ERR, "mapping_tree_entry_add", "Default container has not been created for the NULL SUFFIX node.\n"); - slapi_sdn_free(&subtree); - return -1; + lderr = -1; + goto free_and_return; } be_list_size = 1; be_list_count = 0; + /* We're in a loop and potentially overwriting these pointers so free them first */ + free_mapping_tree_node_arrays(&be_list, &be_names, &be_states, &be_list_count); + be_list = (backend **)slapi_ch_calloc(1, sizeof(backend *)); be_names = (char **)slapi_ch_calloc(1, sizeof(char *)); be_states = (int *)slapi_ch_calloc(1, sizeof(int)); @@ -767,17 +768,13 @@ mapping_tree_entry_add(Slapi_Entry *entry, mapping_tree_node **newnodep) slapi_log_err(SLAPI_LOG_ERR, "Node %s must define a backend\n", slapi_entry_get_dn(entry), 0, 0); - slapi_sdn_free(&subtree); - free_mapping_tree_node_arrays(&be_list, &be_names, &be_states, &be_list_count); - return lderr; + goto free_and_return; } if (((state == MTN_REFERRAL) || (state == MTN_REFERRAL_ON_UPDATE)) && (referral == NULL)) { slapi_log_err(SLAPI_LOG_ERR, "Node %s must define referrals to be in referral state\n", slapi_entry_get_dn(entry), 0, 0); - slapi_sdn_free(&subtree); - free_mapping_tree_node_arrays(&be_list, &be_names, &be_states, &be_list_count); - return lderr; + goto free_and_return; } if (plugin_lib && plugin_funct) { @@ -787,11 +784,7 @@ mapping_tree_entry_add(Slapi_Entry *entry, mapping_tree_node **newnodep) slapi_log_err(SLAPI_LOG_ERR, "mapping_tree_entry_add", "Node %s cannot find distribution plugin. " SLAPI_COMPONENT_NAME_NSPR " %d (%s)\n", slapi_entry_get_dn(entry), PR_GetError(), slapd_pr_strerror(PR_GetError())); - slapi_sdn_free(&subtree); - slapi_ch_free((void **)&plugin_funct); - slapi_ch_free((void **)&plugin_lib); - free_mapping_tree_node_arrays(&be_list, &be_names, &be_states, &be_list_count); - return lderr; + goto free_and_return; } } else if ((plugin_lib == NULL) && (plugin_funct == NULL)) { /* nothing configured -> OK */ @@ -801,11 +794,7 @@ mapping_tree_entry_add(Slapi_Entry *entry, mapping_tree_node **newnodep) slapi_log_err(SLAPI_LOG_ERR, "mapping_tree_entry_add", "Node %s must define both lib and funct for distribution plugin\n", slapi_entry_get_dn(entry)); - slapi_sdn_free(&subtree); - slapi_ch_free((void **)&plugin_funct); - slapi_ch_free((void **)&plugin_lib); - free_mapping_tree_node_arrays(&be_list, &be_names, &be_states, &be_list_count); - return lderr; + goto free_and_return; } /* Now we can create the node for this mapping tree entry. */ @@ -838,6 +827,15 @@ mapping_tree_entry_add(Slapi_Entry *entry, mapping_tree_node **newnodep) *newnodep = node; } + return lderr; + +free_and_return: + slapi_sdn_free(&subtree); + slapi_ch_array_free(referral); + slapi_ch_free_string(&plugin_funct); + slapi_ch_free_string(&plugin_lib); + free_mapping_tree_node_arrays(&be_list, &be_names, &be_states, &be_list_count); + return lderr; } diff --git a/ldap/servers/slapd/modify.c b/ldap/servers/slapd/modify.c index 896cccfb9..bbc0ab71a 100644 --- a/ldap/servers/slapd/modify.c +++ b/ldap/servers/slapd/modify.c @@ -1444,9 +1444,11 @@ optimize_mods(Slapi_Mods *smods) prev_mod = slapi_mods_get_first_mod(smods); while ((mod = slapi_mods_get_next_mod(smods))) { - if ((SLAPI_IS_MOD_ADD(prev_mod->mod_op) || SLAPI_IS_MOD_DELETE(prev_mod->mod_op)) && + if (prev_mod && + (SLAPI_IS_MOD_ADD(prev_mod->mod_op) || SLAPI_IS_MOD_DELETE(prev_mod->mod_op)) && (prev_mod->mod_op == mod->mod_op) && - (!strcasecmp(prev_mod->mod_type, mod->mod_type))) { + (!strcasecmp(prev_mod->mod_type, mod->mod_type))) + { /* Get the current number of mod values from the previous mod. Do it once per attr */ if (mod_count == 0) { for (; prev_mod->mod_bvalues != NULL && prev_mod->mod_bvalues[mod_count] != NULL; mod_count++) diff --git a/ldap/servers/slapd/opshared.c b/ldap/servers/slapd/opshared.c index dd6917363..dac42eb13 100644 --- a/ldap/servers/slapd/opshared.c +++ b/ldap/servers/slapd/opshared.c @@ -998,7 +998,8 @@ free_and_return_nolock: slapi_sdn_free(&sdn); } slapi_sdn_free(&basesdn); - slapi_pblock_set(pb, SLAPI_SEARCH_TARGET_SDN, orig_sdn); + if (orig_sdn) + slapi_pblock_set(pb, SLAPI_SEARCH_TARGET_SDN, orig_sdn); slapi_ch_free_string(&proxydn); slapi_ch_free_string(&proxystr); diff --git a/ldap/servers/slapd/plugin_internal_op.c b/ldap/servers/slapd/plugin_internal_op.c index 9da266b61..622daffdb 100644 --- a/ldap/servers/slapd/plugin_internal_op.c +++ b/ldap/servers/slapd/plugin_internal_op.c @@ -368,7 +368,8 @@ seq_internal_callback_pb(Slapi_PBlock *pb, void *callback_data, plugin_result_ca slapi_pblock_set(pb, SLAPI_BACKEND, be); slapi_pblock_set(pb, SLAPI_PLUGIN, be->be_database); slapi_pblock_set(pb, SLAPI_SEQ_ATTRNAME, attrname); - slapi_pblock_set(pb, SLAPI_SEQ_VAL, val); + if (val) + slapi_pblock_set(pb, SLAPI_SEQ_VAL, val); slapi_pblock_set(pb, SLAPI_REQCONTROLS, controls); /* set actions taken to process the operation */ diff --git a/ldap/servers/slapd/plugin_syntax.c b/ldap/servers/slapd/plugin_syntax.c index e208442d5..dc7106da5 100644 --- a/ldap/servers/slapd/plugin_syntax.c +++ b/ldap/servers/slapd/plugin_syntax.c @@ -247,7 +247,9 @@ plugin_call_syntax_filter_sub_sv( Operation *op = NULL; /* to pass SLAPI_SEARCH_TIMELIMIT & SLAPI_OPINITATED_TIME */ slapi_pblock_get(pb, SLAPI_OPERATION, &op); - slapi_pblock_set(pipb, SLAPI_OPERATION, op); + if (op) { + slapi_pblock_set(pipb, SLAPI_OPERATION, op); + } } rc = (*sub_fn)(pipb, fsub->sf_initial, fsub->sf_any, fsub->sf_final, va); } else { diff --git a/ldap/servers/slapd/rdn.c b/ldap/servers/slapd/rdn.c index 41c075036..fdb6b1773 100644 --- a/ldap/servers/slapd/rdn.c +++ b/ldap/servers/slapd/rdn.c @@ -934,10 +934,7 @@ slapi_rdn_get_dn(Slapi_RDN *srdn, char **dn) if (NULL == srdn || NULL == srdn->all_rdns || NULL == dn) { return -1; } - for (rdnp = srdn->all_rdns; rdnp && *rdnp; rdnp++) { - len += strlen(*rdnp) + 1; /* 1 for ',' */ - } - len += 1; + len = slapi_rdn_get_dn_len(srdn); *dn = (char *)slapi_ch_malloc(len); enddn = *dn + len - 1; diff --git a/ldap/servers/slapd/sasl_map.c b/ldap/servers/slapd/sasl_map.c index 72bd01079..9593fd075 100644 --- a/ldap/servers/slapd/sasl_map.c +++ b/ldap/servers/slapd/sasl_map.c @@ -336,7 +336,9 @@ _sasl_unescape_parenthesis(char *input) *d++ = *s; } } - *d = '\0'; + if (d) { + *d = '\0'; + } return input; } diff --git a/ldap/servers/slapd/schema.c b/ldap/servers/slapd/schema.c index 379fe208b..f609c220d 100644 --- a/ldap/servers/slapd/schema.c +++ b/ldap/servers/slapd/schema.c @@ -2363,9 +2363,9 @@ schema_delete_attributes(Slapi_Entry *entryBefore __attribute__((unused)), LDAPM attr_ldif[k] = tolower(attr_ldif[k]); } - sizedbuffer_allocate(psbAttrName, strlen(attr_ldif)); - sizedbuffer_allocate(psbAttrOid, strlen(attr_ldif)); - sizedbuffer_allocate(psbAttrSyntax, strlen(attr_ldif)); + sizedbuffer_allocate(psbAttrName, strlen(attr_ldif) + 1); + sizedbuffer_allocate(psbAttrOid, strlen(attr_ldif) + 1); + sizedbuffer_allocate(psbAttrSyntax, strlen(attr_ldif) + 1); sscanf(attr_ldif, "%s name %s syntax %s", psbAttrOid->buffer, psbAttrName->buffer, psbAttrSyntax->buffer); diff --git a/ldap/servers/slapd/search.c b/ldap/servers/slapd/search.c index 2a9979552..75accbba6 100644 --- a/ldap/servers/slapd/search.c +++ b/ldap/servers/slapd/search.c @@ -37,7 +37,7 @@ do_search(Slapi_PBlock *pb) { Slapi_Operation *operation; BerElement *ber; - int i, err, attrsonly; + int i, err = 0, attrsonly; ber_int_t scope, deref, sizelimit, timelimit; char *rawbase = NULL; int rawbase_set_in_pb = 0; /* was rawbase set in pb? */ @@ -233,6 +233,7 @@ do_search(Slapi_PBlock *pb) log_search_access(pb, base, scope, fstr, "invalid attribute request"); send_ldap_result(pb, LDAP_PROTOCOL_ERROR, NULL, NULL, 0, NULL); slapi_ch_free_string(&normaci); + err = 1; /* Make sure we free everything */ goto free_and_return; } } @@ -358,8 +359,8 @@ do_search(Slapi_PBlock *pb) ps_add(pb, changetypes, send_entchg_controls); } -free_and_return:; - if (!psearch || rc != 0) { +free_and_return: + if (!psearch || rc != 0 || err != 0) { slapi_ch_free_string(&fstr); slapi_filter_free(filter, 1); slapi_pblock_get(pb, SLAPI_SEARCH_ATTRS, &attrs); diff --git a/ldap/servers/slapd/tools/dbscan.c b/ldap/servers/slapd/tools/dbscan.c index 53cdb8985..c74d4823c 100644 --- a/ldap/servers/slapd/tools/dbscan.c +++ b/ldap/servers/slapd/tools/dbscan.c @@ -532,7 +532,7 @@ print_changelog(unsigned char *data, int len __attribute__((unused))) replgen = ntohl(thetime32); pos += sizeof(uint32_t); thetime = (time_t)replgen; - db_printf("\treplgen: %ld %s", replgen, ctime((time_t *)&thetime)); + db_printf("\treplgen: %u %s", replgen, ctime((time_t *)&thetime)); /* read csn */ print_attr("csn", &pos); @@ -717,12 +717,15 @@ display_item(DBC *cursor, DBT *key, DBT *data) tmpbuflen = (key->size > data->size ? key->size : data->size) + 1024; } if (buflen < tmpbuflen) { + unsigned char *tmp = NULL; buflen = tmpbuflen; - buf = (unsigned char *)realloc(buf, buflen); - if (NULL == buf) { + tmp = (unsigned char *)realloc(buf, buflen); + if (NULL == tmp) { + free(buf); printf("\t(malloc failed -- %d bytes)\n", buflen); return; } + buf = tmp; } if (display_mode & RAWDATA) { diff --git a/ldap/servers/slapd/tools/ldclt/ldapfct.c b/ldap/servers/slapd/tools/ldclt/ldapfct.c index 373076500..ca0912d6c 100644 --- a/ldap/servers/slapd/tools/ldclt/ldapfct.c +++ b/ldap/servers/slapd/tools/ldclt/ldapfct.c @@ -986,9 +986,9 @@ buildVersatileAttribute( break; default: /* - * Should not happen, unless new variant parsed and not - * integrated here, or "jardinage".... - */ + * Should not happen, unless new variant parsed and not + * integrated here, or "jardinage".... + */ field = NULL; field->how = 22; /* Crash !!! */ break; @@ -3231,7 +3231,7 @@ doExactSearch( case LDAP_RES_SEARCH_ENTRY: nentries++; /* get dereferenced value into resctrls: deref parsing */ - parse_rc = ldap_get_entry_controls(tttctx->ldapCtx, e, &resctrls); + ldap_get_entry_controls(tttctx->ldapCtx, e, &resctrls); if (resctrls != NULL) { /* parse it only when we have return saved in server control */ /* get dn */ if ((dn = ldap_get_dn(tttctx->ldapCtx, e)) != NULL) { diff --git a/ldap/servers/slapd/tools/ldclt/ldclt.c b/ldap/servers/slapd/tools/ldclt/ldclt.c index e72b775e2..586a14713 100644 --- a/ldap/servers/slapd/tools/ldclt/ldclt.c +++ b/ldap/servers/slapd/tools/ldclt/ldclt.c @@ -816,7 +816,7 @@ trapVector( int initMainThread(void) { - struct sigaction act; + struct sigaction act = {0}; /* * Trap SIGINT. diff --git a/ldap/servers/slapd/tools/ldif.c b/ldap/servers/slapd/tools/ldif.c index 3548c7d49..a13e99f89 100644 --- a/ldap/servers/slapd/tools/ldif.c +++ b/ldap/servers/slapd/tools/ldif.c @@ -92,11 +92,13 @@ main(int argc, char **argv) } if (nread + cur > max) { max += BUFSIZ; - if ((val = (char *)realloc(val, max)) == - NULL) { + char *tmp = NULL; + if ((tmp = (char *)realloc(val, max)) == NULL) { + free(val); perror("realloc"); return (1); } + val = tmp; } memcpy(val + cur, buf, nread); cur += nread; @@ -125,12 +127,14 @@ main(int argc, char **argv) /* if buffer was filled, expand and keep reading unless last char is linefeed, in which case it is OK for buffer to be full */ while (((curlen = strlen(buf)) == (maxlen - 1)) && buf[curlen - 1] != '\n') { + char *tmp = NULL; maxlen *= 2; - if ((buf = (char *)realloc(buf, maxlen)) == NULL) { + if ((tmp = (char *)realloc(buf, maxlen)) == NULL) { perror("realloc"); free(buf); return (1); } + buf = tmp; if (NULL == fgets(buf + curlen, maxlen / 2 + 1, stdin)) { /* no more input to read. */ break; diff --git a/ldap/servers/slapd/tools/mmldif.c b/ldap/servers/slapd/tools/mmldif.c index cdcf4b16d..96b606a29 100644 --- a/ldap/servers/slapd/tools/mmldif.c +++ b/ldap/servers/slapd/tools/mmldif.c @@ -700,7 +700,6 @@ readrec(edfFILE *edf1, attrib1_t **attrib) attrib1_t *freelist = *attrib; attrib1_t *newlist = NULL; attrib1_t *a; - int ignore_rec = FALSE; int free_it = 0; *attrib = NULL; @@ -758,7 +757,7 @@ readrec(edfFILE *edf1, attrib1_t **attrib) if (!stricmp(line, "authoritative")) continue; if (!freelist) { - att = (attrib1_t *)malloc(sizeof(attrib1_t)); + att = (attrib1_t *)calloc(1, sizeof(struct attrib1_t *)); free_it = 1; } else { att = freelist; @@ -881,8 +880,6 @@ readrec(edfFILE *edf1, attrib1_t **attrib) } *attrib = newlist; freefreelist(freelist); - if (ignore_rec) - return IDDS_MM_ABSENT; return IDDS_MM_OK; } diff --git a/ldap/servers/slapd/tools/pwenc.c b/ldap/servers/slapd/tools/pwenc.c index d92e94e17..87ca17488 100644 --- a/ldap/servers/slapd/tools/pwenc.c +++ b/ldap/servers/slapd/tools/pwenc.c @@ -362,6 +362,7 @@ slapd_config(const char *configdir, const char *givenconfigfile) } } else if (slapi_sdn_compare(&config_dn, slapi_entry_get_sdn_const(e)) == 0) { /* Get the root scheme out and initialise it (if it exists) */ + slapi_ch_free_string(&rootschemename); rootschemename = slapi_entry_attr_get_charptr(e, CONFIG_ROOTPWSTORAGESCHEME_ATTRIBUTE); } diff --git a/ldap/servers/slapd/tools/rsearch/infadd.c b/ldap/servers/slapd/tools/rsearch/infadd.c index db6fb23ae..6fe84f9e1 100644 --- a/ldap/servers/slapd/tools/rsearch/infadd.c +++ b/ldap/servers/slapd/tools/rsearch/infadd.c @@ -309,7 +309,7 @@ main(int argc, char **argv) at_getThread(threads[x], NULL), min, max, count, ntot); } - if (!quiet && (numThreads > 1 || !verbose)) { + if (numThreads > 1 && !quiet && !verbose) { double val = 1000.0 * (double)total / (double)sampleInterval; fprintf(stdout, "Rate: %7.2f/thr (%6.2f/sec =%7.4fms/op), " "total: %u (%d thr)\n", diff --git a/ldap/servers/slapd/tools/rsearch/rsearch.c b/ldap/servers/slapd/tools/rsearch/rsearch.c index 7602b16d8..347c84d2a 100644 --- a/ldap/servers/slapd/tools/rsearch/rsearch.c +++ b/ldap/servers/slapd/tools/rsearch/rsearch.c @@ -467,7 +467,10 @@ main(int argc, char **argv) printf("T%d min=%4ums, max=%4ums, count = %u\n", st_getThread(threads[x], NULL), min, max, count); } - rate = (double)total / (double)numThreads; + rate = 0.0; + if (numThreads) { + rate = (double)total / (double)numThreads; + } val = 1000.0 * (double)total / (double)sampleInterval; cumrate += rate; if ((numThreads > 1) || (!verbose)) { diff --git a/lib/ldaputil/certmap.c b/lib/ldaputil/certmap.c index efe531a51..dc2fdde43 100644 --- a/lib/ldaputil/certmap.c +++ b/lib/ldaputil/certmap.c @@ -374,6 +374,7 @@ dbinfo_to_certinfo(DBConfDBInfo_t *db_info, rv = ldapu_list_add_info(propval_list, propval); if (rv != LDAPU_SUCCESS) { + ldapu_propval_free((void *)propval, (void *)propval); goto error; } @@ -700,15 +701,14 @@ certmap_read_certconfig_file(const char *file) while (curdb) { nextdb = curdb->next; rv = dbinfo_to_certinfo(curdb, &certinfo); - if (rv != LDAPU_SUCCESS) { dbconf_free_confinfo(conf_info); return rv; } rv = process_certinfo(certinfo); - if (rv != LDAPU_SUCCESS) { + ldapu_certinfo_free(certinfo); dbconf_free_confinfo(conf_info); return rv; } @@ -1330,8 +1330,11 @@ ldapu_cert_to_ldap_entry(void *cert, LDAP *ld, const char *basedn, LDAPMessage * rv = (*mapfn)(cert, ld, certmap_info, &ldapDN, &filter); - if (rv != LDAPU_SUCCESS) + if (rv != LDAPU_SUCCESS) { + free(ldapDN); + free(filter); return rv; + } /* Get the search function from the certmap_info - certinfo maybe NULL */ searchfn = ldapu_get_cert_searchfn_sub(certmap_info); @@ -1339,10 +1342,8 @@ ldapu_cert_to_ldap_entry(void *cert, LDAP *ld, const char *basedn, LDAPMessage * rv = (*searchfn)(cert, ld, certmap_info, basedn, ldapDN, filter, certmap_attrs, &res_array); - if (ldapDN) - free(ldapDN); - if (filter) - free(filter); + free(ldapDN); + free(filter); /* * Get the verify cert function & call it. diff --git a/lib/libaccess/usrcache.cpp b/lib/libaccess/usrcache.cpp index 5ea8259fe..b6ac58d3c 100644 --- a/lib/libaccess/usrcache.cpp +++ b/lib/libaccess/usrcache.cpp @@ -157,11 +157,11 @@ int acl_usr_cache_init () } else { singleDbTable = 0; - databaseUserCacheTable = PR_NewHashTable(0, + databaseUserCacheTable = PR_NewHashTable(0, PR_HashCaseString, PR_CompareCaseStrings, PR_CompareValues, - &ACLPermAllocOps, + &ACLPermAllocOps, usrcache_pool); } @@ -176,7 +176,7 @@ int acl_usr_cache_init () for(i = 0; i < num_usrobj; i++){ usrobj = (UserCacheObj *)pool_malloc(usrcache_pool, sizeof(UserCacheObj)); - + if (!usrobj) return -1; memset((void *)usrobj, 0, sizeof(UserCacheObj)); PR_INSERT_AFTER(&usrobj->list, usrobj_list); diff --git a/src/cockpit/389-console/src/lib/database/chaining.jsx b/src/cockpit/389-console/src/lib/database/chaining.jsx index 3dd3ec48e..c169e7bd3 100644 --- a/src/cockpit/389-console/src/lib/database/chaining.jsx +++ b/src/cockpit/389-console/src/lib/database/chaining.jsx @@ -916,7 +916,7 @@ export class ChainingConfig extends React.Component { </Checkbox>; } else { proxiedAuth = - <Checkbox id="nsproxiedauthorization" onChange={this.handleChange} defaultChecked + <Checkbox id="nsproxiedauthorization" onChange={this.handleChange} title="Allow proxied authentication to the remote server. (nsproxiedauthorization)."> Allow Proxied Authentication </Checkbox>; diff --git a/src/cockpit/389-console/src/lib/database/suffix.jsx b/src/cockpit/389-console/src/lib/database/suffix.jsx index 9cfb95b92..3f3bc82ec 100644 --- a/src/cockpit/389-console/src/lib/database/suffix.jsx +++ b/src/cockpit/389-console/src/lib/database/suffix.jsx @@ -254,10 +254,10 @@ export class Suffix extends React.Component { // Do import let export_cmd = [ "dsconf", "-j", "ldapi://%2fvar%2frun%2fslapd-" + this.props.serverId + ".socket", - "backend", "export", this.props.suffix, "--ldif=" + this.state.ldifLocation, "--encrypted" + "backend", "export", this.props.suffix, "--ldif=" + this.state.ldifLocation ]; - if (this.state.attrEncrpytion) { + if (this.state.attrEncryption) { export_cmd.push("--encrypted"); } diff --git a/src/cockpit/389-console/src/replication.js b/src/cockpit/389-console/src/replication.js index 6ef363523..2fe3a6e48 100644 --- a/src/cockpit/389-console/src/replication.js +++ b/src/cockpit/389-console/src/replication.js @@ -1185,7 +1185,7 @@ $(document).ready( function() { } } else { if ( !('nsds5replicatedattributelisttotal' in repl_agmt_values) || - agmt_tot_exclude != repl_agmt_values['nsds5replicatedattributelisttotal'].replace(frac_prefix, "")); + agmt_tot_exclude != repl_agmt_values['nsds5replicatedattributelisttotal'].replace(frac_prefix, "")) { cmd_args.push('--frac-list-total=' + frac_prefix + ' ' + agmt_tot_exclude); } diff --git a/src/lib389/lib389/agreement.py b/src/lib389/lib389/agreement.py index dcab900b8..84e2f8c61 100644 --- a/src/lib389/lib389/agreement.py +++ b/src/lib389/lib389/agreement.py @@ -266,6 +266,7 @@ class Agreement(DSLdapObject): # Extract the csn timstamps and compare them agmt_time = 0 + con_time = 0 match = Agreement.csnre.match(agmt_maxcsn) if match: agmt_time = int(match.group(1), 16) diff --git a/src/lib389/lib389/backend.py b/src/lib389/lib389/backend.py index 64a40c15a..353a3e117 100644 --- a/src/lib389/lib389/backend.py +++ b/src/lib389/lib389/backend.py @@ -480,6 +480,9 @@ class Backend(DSLdapObject): :returns: DSLdapObject of the created entry """ + sample_entries = False + parent_suffix = False + # normalize suffix (remove spaces between comps) if dn is not None: dn_comps = ldap.dn.explode_dn(dn.lower()) @@ -490,9 +493,8 @@ class Backend(DSLdapObject): dn_comps = ldap.dn.explode_dn(suffix_dn) ndn = ",".join(dn_comps) properties['nsslapd-suffix'] = ndn - - sample_entries = properties.pop(BACKEND_SAMPLE_ENTRIES, False) - parent_suffix = properties.pop('parent', False) + sample_entries = properties.pop(BACKEND_SAMPLE_ENTRIES, False) + parent_suffix = properties.pop('parent', False) # Okay, now try to make the backend. super(Backend, self).create(dn, properties, basedn) diff --git a/src/lib389/lib389/cli_conf/backend.py b/src/lib389/lib389/cli_conf/backend.py index 77ce3ddec..36e32ec48 100644 --- a/src/lib389/lib389/cli_conf/backend.py +++ b/src/lib389/lib389/cli_conf/backend.py @@ -422,7 +422,7 @@ def backend_set(inst, basedn, log, args): if args.add_referral: be.add('nsslapd-referral', args.add_referral) if args.del_referral: - be.remove('nsslapd-referral', args.add_referral) + be.remove('nsslapd-referral', args.del_referral) if args.cache_size: be.set('nsslapd-cachesize', args.cache_size) if args.cache_memsize:
0
8309fd046c0e3ca9385fdb93f54515eb6004e69f
389ds/389-ds-base
Ticket 47966 - CI test: added test cases for ticket 47966 Summary: slapd crashes during Dogtag clone reinstallation Testing bulk import when the backend with VLV was recreated.
commit 8309fd046c0e3ca9385fdb93f54515eb6004e69f Author: Noriko Hosoi <[email protected]> Date: Fri Apr 10 14:10:40 2015 -0700 Ticket 47966 - CI test: added test cases for ticket 47966 Summary: slapd crashes during Dogtag clone reinstallation Testing bulk import when the backend with VLV was recreated. diff --git a/dirsrvtests/tickets/ticket47966_test.py b/dirsrvtests/tickets/ticket47966_test.py new file mode 100644 index 000000000..0e784af03 --- /dev/null +++ b/dirsrvtests/tickets/ticket47966_test.py @@ -0,0 +1,219 @@ +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +installation1_prefix = None + +m1_m2_agmt = "" + +class TopologyReplication(object): + def __init__(self, master1, master2): + master1.open() + self.master1 = master1 + master2.open() + self.master2 = master2 + + [email protected](scope="module") +def topology(request): + global installation1_prefix + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + + # Creating master 1... + master1 = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_MASTER_1 + args_instance[SER_PORT] = PORT_MASTER_1 + args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1 + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_master = args_instance.copy() + master1.allocate(args_master) + instance_master1 = master1.exists() + if instance_master1: + master1.delete() + master1.create() + master1.open() + master1.replica.enableReplication(suffix=DEFAULT_SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1) + + # Creating master 2... + master2 = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_MASTER_2 + args_instance[SER_PORT] = PORT_MASTER_2 + args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2 + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_master = args_instance.copy() + master2.allocate(args_master) + instance_master2 = master2.exists() + if instance_master2: + master2.delete() + master2.create() + master2.open() + master2.replica.enableReplication(suffix=DEFAULT_SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2) + + # + # Create all the agreements + # + # Creating agreement from master 1 to master 2 + properties = {RA_NAME: r'meTo_$host:$port', + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + global m1_m2_agmt + m1_m2_agmt = master1.agreement.create(suffix=DEFAULT_SUFFIX, host=master2.host, port=master2.port, properties=properties) + if not m1_m2_agmt: + log.fatal("Fail to create a master -> master replica agreement") + sys.exit(1) + log.debug("%s created" % m1_m2_agmt) + + # Creating agreement from master 2 to master 1 + properties = {RA_NAME: r'meTo_$host:$port', + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m2_m1_agmt = master2.agreement.create(suffix=DEFAULT_SUFFIX, host=master1.host, port=master1.port, properties=properties) + if not m2_m1_agmt: + log.fatal("Fail to create a master -> master replica agreement") + sys.exit(1) + log.debug("%s created" % m2_m1_agmt) + + # Allow the replicas to get situated with the new agreements... + time.sleep(5) + + # + # Initialize all the agreements + # + master1.agreement.init(DEFAULT_SUFFIX, HOST_MASTER_2, PORT_MASTER_2) + master1.waitForReplInit(m1_m2_agmt) + + # Check replication is working... + if master1.testReplication(DEFAULT_SUFFIX, master2): + log.info('Replication is working.') + else: + log.fatal('Replication is not working.') + assert False + + # Clear out the tmp dir + master1.clearTmpDir(__file__) + + return TopologyReplication(master1, master2) + + +def test_ticket47966(topology): + ''' + Testing bulk import when the backend with VLV was recreated. + If the test passes without the server crash, 47966 is verified. + ''' + log.info('Testing Ticket 47966 - [VLV] slapd crashes during Dogtag clone reinstallation') + M1 = topology.master1 + M2 = topology.master2 + + log.info('0. Create a VLV index on Master 2.') + # get the backend entry + be = M2.replica.conn.backend.list(suffix=DEFAULT_SUFFIX) + if not be: + log.fatal("ticket47966: enable to retrieve the backend for %s" % DEFAULT_SUFFIX) + raise ValueError("no backend for suffix %s" % DEFAULT_SUFFIX) + bent = be[0] + beName = bent.getValue('cn') + beDn = "cn=%s,cn=ldbm database,cn=plugins,cn=config" % beName + + # generate vlvSearch entry + vlvSrchDn = "cn=vlvSrch,%s" % beDn + log.info('0-1. vlvSearch dn: %s' % vlvSrchDn) + vlvSrchEntry = Entry(vlvSrchDn) + vlvSrchEntry.setValues('objectclass', 'top', 'vlvSearch') + vlvSrchEntry.setValues('cn', 'vlvSrch') + vlvSrchEntry.setValues('vlvBase', DEFAULT_SUFFIX) + vlvSrchEntry.setValues('vlvFilter', '(|(objectclass=*)(objectclass=ldapsubentry))') + vlvSrchEntry.setValues('vlvScope', '2') + M2.add_s(vlvSrchEntry) + + # generate vlvIndex entry + vlvIndexDn = "cn=vlvIdx,%s" % vlvSrchDn + log.info('0-2. vlvIndex dn: %s' % vlvIndexDn) + vlvIndexEntry = Entry(vlvIndexDn) + vlvIndexEntry.setValues('objectclass', 'top', 'vlvIndex') + vlvIndexEntry.setValues('cn', 'vlvIdx') + vlvIndexEntry.setValues('vlvSort', 'cn ou sn') + M2.add_s(vlvIndexEntry) + + log.info('1. Initialize Master 2 from Master 1.') + M1.agreement.init(DEFAULT_SUFFIX, HOST_MASTER_2, PORT_MASTER_2) + M1.waitForReplInit(m1_m2_agmt) + + # Check replication is working... + if M1.testReplication(DEFAULT_SUFFIX, M2): + log.info('1-1. Replication is working.') + else: + log.fatal('1-1. Replication is not working.') + assert False + + log.info('2. Delete the backend instance on Master 2.') + M2.delete_s(vlvIndexDn) + M2.delete_s(vlvSrchDn) + # delete the agreement, replica, and mapping tree, too. + M2.replica.disableReplication(DEFAULT_SUFFIX) + mappingTree = 'cn="%s",cn=mapping tree,cn=config' % DEFAULT_SUFFIX + M2.mappingtree.delete(DEFAULT_SUFFIX, beName, mappingTree) + M2.backend.delete(DEFAULT_SUFFIX, beDn, beName) + + log.info('3. Recreate the backend and the VLV index on Master 2.') + M2.mappingtree.create(DEFAULT_SUFFIX, beName) + M2.backend.create(DEFAULT_SUFFIX, {BACKEND_NAME: beName}) + log.info('3-1. Recreating %s and %s on Master 2.' % (vlvSrchDn, vlvIndexDn)) + M2.add_s(vlvSrchEntry) + M2.add_s(vlvIndexEntry) + M2.replica.enableReplication(suffix=DEFAULT_SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2) + # agreement m2_m1_agmt is not needed... :p + + log.info('4. Initialize Master 2 from Master 1 again.') + M1.agreement.init(DEFAULT_SUFFIX, HOST_MASTER_2, PORT_MASTER_2) + M1.waitForReplInit(m1_m2_agmt) + + # Check replication is working... + if M1.testReplication(DEFAULT_SUFFIX, M2): + log.info('4-1. Replication is working.') + else: + log.fatal('4-1. Replication is not working.') + assert False + + log.info('5. Check Master 2 is up.') + entries = M2.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(cn=*)') + assert len(entries) > 0 + log.info('5-1. %s entries are returned from M2.' % len(entries)) + + log.info('Test complete') + + +def test_ticket47966_final(topology): + topology.master1.delete() + topology.master2.delete() + log.info('Testcase PASSED') + + +def run_isolated(): + global installation1_prefix + installation1_prefix = None + + topo = topology(True) + test_ticket47966(topo) + test_ticket47966_final(topo) + + +if __name__ == '__main__': + run_isolated() +
0
24d458afbd93d1fcd79d732b15197a830c3e49f9
389ds/389-ds-base
Issue 4848 - Force to require nss version greater or equal as the version available at the build time Description: In our spec file we require nss >= 3.34, but not the exact (or greater, as they are backward compatible) version available at the build time. Fix Description: We should record nss version available at the build time and require it at the runtime. Adapt a macro from samba spec file. Fixes: https://github.com/389ds/389-ds-base/issues/4848 Reviewed by: @mreynolds389, @Firstyear, @droideck (Thank you!)
commit 24d458afbd93d1fcd79d732b15197a830c3e49f9 Author: Viktor Ashirov <[email protected]> Date: Tue Jul 27 15:24:01 2021 +0200 Issue 4848 - Force to require nss version greater or equal as the version available at the build time Description: In our spec file we require nss >= 3.34, but not the exact (or greater, as they are backward compatible) version available at the build time. Fix Description: We should record nss version available at the build time and require it at the runtime. Adapt a macro from samba spec file. Fixes: https://github.com/389ds/389-ds-base/issues/4848 Reviewed by: @mreynolds389, @Firstyear, @droideck (Thank you!) diff --git a/rpm/389-ds-base.spec.in b/rpm/389-ds-base.spec.in index 4c910bc51..3bba6e045 100644 --- a/rpm/389-ds-base.spec.in +++ b/rpm/389-ds-base.spec.in @@ -28,9 +28,6 @@ # This enables rust in the build. %global use_rust __RUST_ON__ -%define nspr_version 4.6 -%define nss_version 3.11 - %if %{use_asan} || %{use_msan} || %{use_tsan} || %{use_ubsan} %global variant base-xsan %endif @@ -59,6 +56,10 @@ # Filter argparse-manpage from autogenerated package Requires %global __requires_exclude ^python.*argparse-manpage +# Force to require nss version greater or equal as the version available at the build time +# See bz1986327 +%define dirsrv_requires_ge() %(LC_ALL="C" echo '%*' | xargs -r rpm -q --qf 'Requires: %%{name} >= %%{epoch}:%%{version}\\n' | sed -e 's/ (none):/ /' -e 's/ 0:/ /' | grep -v "is not") + Summary: 389 Directory Server (%{variant}) Name: 389-ds-base Version: __VERSION__ @@ -165,7 +166,7 @@ Requires: openldap-clients # this is needed to setup SSL if you are not using the # administration server package Requires: nss-tools -Requires: nss >= 3.34 +%dirsrv_requires_ge nss # these are not found by the auto-dependency method # they are required to support the mandatory LDAP SASL mechs Requires: cyrus-sasl-gssapi
0
bc22cfa6184f51b8492c692f1c95e721d538ab5e
389ds/389-ds-base
Ignore replica busy condition in healthcheck (#6630) Replica Busy condition is expected when there is more than 2 suppliers so healthcheck should not report any error for such condition. Fixed issue in CI tests: test_healthcheck_replication_out_of_sync_not_broken was unstable and redundant with test_healthcheck_replica_busy so I moved it back in health_repl_test.py and rewrite it to test working replication whose replica are not in sync (healthcheck should not report anything) some tests (not always the same) were randomly failing with an empty log (which is unexpected because healthcheck output should never be empty. I suspect the log capture mechanism so health_repl_test.py now run dsctl instance healthcheck using subprocess module and capture the output. So far I have not changed the other files because I have not noticed any failure. Issue: #6626 Reviewed by: @tbordaz (Thanks!)
commit bc22cfa6184f51b8492c692f1c95e721d538ab5e Author: progier389 <[email protected]> Date: Wed Mar 19 19:04:30 2025 +0100 Ignore replica busy condition in healthcheck (#6630) Replica Busy condition is expected when there is more than 2 suppliers so healthcheck should not report any error for such condition. Fixed issue in CI tests: test_healthcheck_replication_out_of_sync_not_broken was unstable and redundant with test_healthcheck_replica_busy so I moved it back in health_repl_test.py and rewrite it to test working replication whose replica are not in sync (healthcheck should not report anything) some tests (not always the same) were randomly failing with an empty log (which is unexpected because healthcheck output should never be empty. I suspect the log capture mechanism so health_repl_test.py now run dsctl instance healthcheck using subprocess module and capture the output. So far I have not changed the other files because I have not noticed any failure. Issue: #6626 Reviewed by: @tbordaz (Thanks!) diff --git a/dirsrvtests/tests/suites/healthcheck/health_repl_test.py b/dirsrvtests/tests/suites/healthcheck/health_repl_test.py index a8d94dfcb..729cc3d6b 100644 --- a/dirsrvtests/tests/suites/healthcheck/health_repl_test.py +++ b/dirsrvtests/tests/suites/healthcheck/health_repl_test.py @@ -9,56 +9,124 @@ import pytest import os -from contextlib import suppress +import random +import re +import string +import subprocess +import threading +import time +from contextlib import suppress, AbstractContextManager from lib389.backend import Backend, Backends from lib389.idm.user import UserAccounts from lib389.replica import Changelog, ReplicationManager, Replicas from lib389.utils import * from lib389._constants import * from lib389.cli_base import FakeArgs -from lib389.topologies import topology_m2, topology_m3 from lib389.cli_ctl.health import health_check_run +from lib389.topologies import topology_m2, topology_m3 from lib389.paths import Paths CMD_OUTPUT = 'No issues found.' JSON_OUTPUT = '[]' +LOGIC_DICT = { + False: ( "not ", "", lambda x: x ), + True: ( "", "not ", lambda x: not x ) + } + ds_paths = Paths() log = logging.getLogger(__name__) -def run_healthcheck_and_flush_log(topology, instance, searched_code, json, searched_code2=None): - args = FakeArgs() - args.instance = instance.serverid - args.verbose = instance.verbose - args.list_errors = False - args.list_checks = False - args.check = ['replication', 'backends:userroot:cl_trimming'] - args.dry_run = False - +class LoadInstance(AbstractContextManager): + @staticmethod + def create_test_user(inst): + users = UserAccounts(inst, DEFAULT_SUFFIX) + uid = str(20000 + int(inst.serverid[8:])) + properties = { + 'uid': f'testuser_{inst.serverid}', + 'cn' : f'testuser_{inst.serverid}', + 'sn' : 'user_{inst.serverid}', + 'uidNumber' : uid, + 'gidNumber' : uid, + 'homeDirectory' : f'/home/testuser_{inst.serverid}' + } + return users.ensure_state(properties=properties) + + def __init__(self, inst): + self.inst = inst + self.stop = threading.Event() + self.thread = threading.Thread(target=self.loader) + self.user = LoadInstance.create_test_user(inst) + + def loader(self): + while not self.stop.is_set(): + value = ''.join(random.choices(string.ascii_uppercase + string.digits, k=10)) + self.user.replace('description', value) + #log.info(f'Modified {self.user.dn} description with {value} on {self.inst.serverid}') + time.sleep(0.001) + + def __exit__(self, *args): + self.stop.set() + self.thread.join() + self.user.delete() + + def __enter__(self): + self.thread.start() + return self + + +class BreakReplication(AbstractContextManager): + def __init__(self, inst): + self.replica = Replicas(inst).list()[0] + self.oldval = None + + def __exit__(self, *args): + self.replica.replace('nsds5ReplicaBindDNGroup', self.oldval) + + def __enter__(self): + self.oldval = self.replica.get_attr_val_utf8('nsds5ReplicaBindDNGroup') + self.replica.replace('nsds5ReplicaBindDNGroup', 'cn=repl') + return self + + +def assert_is_in_result(result, searched_code, isnot=False): + # Assert if searched_code is not in logcap + if searched_code is None: + return + + # Handle positive and negative tests: + nomatch, match, f = LOGIC_DICT[bool(isnot)] + try: + assert f(re.search(re.escape(searched_code), result)) + log.info(f'Searched code {searched_code} is {match}in healthcheck output') + except AssertionError as exc: + log.error(f'{searched_code} is {nomatch}in healthcheck output: {result}') + raise + + +def run_healthcheck_and_check_result(topology, instance, searched_code, json, searched_code2=None, isnot=False): + cmd = [ 'dsctl', ] if json: - log.info('Use healthcheck with --json option') - args.json = json - health_check_run(instance, topology.logcap.log, args) - assert topology.logcap.contains(searched_code) - log.info('Healthcheck returned searched code: %s' % searched_code) - - if searched_code2 is not None: - assert topology.logcap.contains(searched_code2) - log.info('Healthcheck returned searched code: %s' % searched_code2) - else: - log.info('Use healthcheck without --json option') - args.json = json - health_check_run(instance, topology.logcap.log, args) - assert topology.logcap.contains(searched_code) - log.info('Healthcheck returned searched code: %s' % searched_code) - - if searched_code2 is not None: - assert topology.logcap.contains(searched_code2) - log.info('Healthcheck returned searched code: %s' % searched_code2) - - log.info('Clear the log') - topology.logcap.flush() + cmd.append('--json') + if searched_code == CMD_OUTPUT: + searched_code = JSON_OUTPUT + cmd.append(instance.serverid) + cmd.extend(['healthcheck', '--check', 'replication' , 'backends:userroot:cl_trimming']) + + result = subprocess.run(cmd, capture_output=True, universal_newlines=True) + log.info(f'Running: {cmd}') + log.info(f'Stdout: {result.stdout}') + log.info(f'Stderr: {result.stdout}') + log.info(f'Return code: {result.returncode}') + stdout = result.stdout + + # stdout should not be empty + assert stdout is not None + assert len(stdout) > 0 + assert_is_in_result(stdout, searched_code, isnot=isnot) + assert_is_in_result(stdout, searched_code2, isnot=isnot) + def set_changelog_trimming(instance): @@ -112,15 +180,15 @@ def test_healthcheck_replication_replica_not_reachable(topology_m2): with suppress(Exception): repl.wait_for_replication(M1, M2, timeout=5) - run_healthcheck_and_flush_log(topology_m2, M1, RET_CODE, json=False) - run_healthcheck_and_flush_log(topology_m2, M1, RET_CODE, json=True) + run_healthcheck_and_check_result(topology_m2, M1, RET_CODE, json=False) + run_healthcheck_and_check_result(topology_m2, M1, RET_CODE, json=True) log.info('Set nsds5replicaport for the replication agreement to a reachable port') agmt_m1.replace('nsDS5ReplicaPort', '{}'.format(M2.port)) repl.wait_for_replication(M1, M2) - run_healthcheck_and_flush_log(topology_m2, M1, CMD_OUTPUT, json=False) - run_healthcheck_and_flush_log(topology_m2, M1, JSON_OUTPUT, json=True) + run_healthcheck_and_check_result(topology_m2, M1, CMD_OUTPUT, json=False) + run_healthcheck_and_check_result(topology_m2, M1, JSON_OUTPUT, json=True) @pytest.mark.xfail(ds_is_older("1.4.1"), reason="Not implemented") @@ -160,13 +228,13 @@ def test_healthcheck_changelog_trimming_not_configured(topology_m2): time.sleep(3) - run_healthcheck_and_flush_log(topology_m2, M1, RET_CODE, json=False) - run_healthcheck_and_flush_log(topology_m2, M1, RET_CODE, json=True) + run_healthcheck_and_check_result(topology_m2, M1, RET_CODE, json=False) + run_healthcheck_and_check_result(topology_m2, M1, RET_CODE, json=True) set_changelog_trimming(M1) - run_healthcheck_and_flush_log(topology_m2, M1, CMD_OUTPUT, json=False) - run_healthcheck_and_flush_log(topology_m2, M1, JSON_OUTPUT, json=True) + run_healthcheck_and_check_result(topology_m2, M1, CMD_OUTPUT, json=False) + run_healthcheck_and_check_result(topology_m2, M1, JSON_OUTPUT, json=True) @pytest.mark.xfail(ds_is_older("1.4.1"), reason="Not implemented") @@ -208,8 +276,8 @@ def test_healthcheck_replication_presence_of_conflict_entries(topology_m2): repl.test_replication_topology(topology_m2) - run_healthcheck_and_flush_log(topology_m2, M1, RET_CODE, json=False) - run_healthcheck_and_flush_log(topology_m2, M1, RET_CODE, json=True) + run_healthcheck_and_check_result(topology_m2, M1, RET_CODE, json=False) + run_healthcheck_and_check_result(topology_m2, M1, RET_CODE, json=True) def test_healthcheck_non_replicated_suffixes(topology_m2): @@ -245,6 +313,44 @@ def test_healthcheck_non_replicated_suffixes(topology_m2): health_check_run(inst, topology_m2.logcap.log, args) +def test_healthcheck_replica_busy(topology_m3): + """Check that HealthCheck does not returns DSREPLLE0003 code when a replicva is busy + + :id: b7c4a5aa-ef98-11ef-87f5-482ae39447e5 + :setup: 3 MMR topology + :steps: + 1. Create a 3 suppliers full-mesh topology + 2. Generate constant modify load on S1 and S2 + 3. Wait a bit to ensure stable replication flow + 4. Perform a modify on S3 + 5. Use HealthCheck on S3 without --json option + 6. Use HealthCheck on S3 with --json option + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Healthcheck should not reports DSREPLLE0003 code and related details + 6. Healthcheck should not reports DSREPLLE0003 code and related details + """ + + RET_CODE = 'DSREPLLE0003' + # Is DSREPLLE0003 ignored if replica is busy ? + ignored = not ds_is_older("2.7") + + S1 = topology_m3.ms['supplier1'] + S2 = topology_m3.ms['supplier2'] + S3 = topology_m3.ms['supplier3'] + with LoadInstance(S1), LoadInstance(S2): + # Wait a bit to let replication starts + time.sleep(10) + # Create user on S3 then remove it: + LoadInstance(S3).user.delete() + # S3 agrements should now be in the replica busy state + run_healthcheck_and_check_result(topology_m3, S3, RET_CODE, json=False, isnot=ignored) + run_healthcheck_and_check_result(topology_m3, S3, RET_CODE, json=True, isnot=ignored) + + @pytest.mark.xfail(ds_is_older("1.4.1"), reason="Not implemented") def test_healthcheck_replication_out_of_sync_broken(topology_m3): """Check if HealthCheck returns DSREPLLE0001 code @@ -265,25 +371,50 @@ def test_healthcheck_replication_out_of_sync_broken(topology_m3): RET_CODE = 'DSREPLLE0001' - M1 = topology_m3.ms['supplier1'] - M2 = topology_m3.ms['supplier2'] - M3 = topology_m3.ms['supplier3'] + S1 = topology_m3.ms['supplier1'] + S2 = topology_m3.ms['supplier2'] + S3 = topology_m3.ms['supplier3'] log.info('Break supplier2 and supplier3') - replicas = Replicas(M2) - replica = replicas.list()[0] - replica.replace('nsds5ReplicaBindDNGroup', 'cn=repl') + with BreakReplication(S2), BreakReplication(S3): + time.sleep(1) + log.info('Perform update on supplier1') + test_users_m1 = UserAccounts(S1, DEFAULT_SUFFIX) + test_users_m1.create_test_user(1005, 2000) - replicas = Replicas(M3) - replica = replicas.list()[0] - replica.replace('nsds5ReplicaBindDNGroup', 'cn=repl') + time.sleep(3) + run_healthcheck_and_check_result(topology_m3, S1, RET_CODE, json=False) + run_healthcheck_and_check_result(topology_m3, S1, RET_CODE, json=True) - log.info('Perform update on supplier1') - test_users_m1 = UserAccounts(M1, DEFAULT_SUFFIX) - test_users_m1.create_test_user(1005, 2000) - run_healthcheck_and_flush_log(topology_m3, M1, RET_CODE, json=False) - run_healthcheck_and_flush_log(topology_m3, M1, RET_CODE, json=True) +def test_healthcheck_replication_out_of_sync_not_broken(topology_m3): + """Check that HealthCheck returns no issues when replication is in progress + + :id: 8305000d-ba4d-4c00-8331-be0e8bd92150 + :setup: 3 MMR topology + :steps: + 1. Create a 3 suppliers full-mesh topology, all replicas being synchronized + 2. Generate constant load on two supplier + 3. Use HealthCheck without --json option + 4. Use HealthCheck with --json option + :expectedresults: + 1. Success + 2. Success + 3. Healthcheck reports no issue found + 4. Healthcheck reports no issue found + """ + + RET_CODE = CMD_OUTPUT + + S1 = topology_m3.ms['supplier1'] + S2 = topology_m3.ms['supplier2'] + S3 = topology_m3.ms['supplier3'] + + with LoadInstance(S1), LoadInstance(S2): + # Wait a bit to let replication starts + time.sleep(10) + run_healthcheck_and_check_result(topology_m3, S1, RET_CODE, json=False) + run_healthcheck_and_check_result(topology_m3, S1, RET_CODE, json=True) if __name__ == '__main__': diff --git a/dirsrvtests/tests/suites/healthcheck/health_sync_test.py b/dirsrvtests/tests/suites/healthcheck/health_sync_test.py deleted file mode 100644 index 8f9bc0a00..000000000 --- a/dirsrvtests/tests/suites/healthcheck/health_sync_test.py +++ /dev/null @@ -1,130 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2020 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -# - -import pytest -import os -import time -from datetime import * -from lib389.idm.user import UserAccounts -from lib389.utils import * -from lib389._constants import * -from lib389.cli_base import FakeArgs -from lib389.topologies import topology_m3 -from lib389.cli_ctl.health import health_check_run -from lib389.paths import Paths - -ds_paths = Paths() -log = logging.getLogger(__name__) - - -def run_healthcheck_and_flush_log(topology, instance, searched_code, json, searched_code2=None): - args = FakeArgs() - args.instance = instance.serverid - args.verbose = instance.verbose - args.list_errors = False - args.list_checks = False - args.check = ['replication'] - args.dry_run = False - - if json: - log.info('Use healthcheck with --json option') - args.json = json - health_check_run(instance, topology.logcap.log, args) - assert topology.logcap.contains(searched_code) - log.info('Healthcheck returned searched code: %s' % searched_code) - - if searched_code2 is not None: - assert topology.logcap.contains(searched_code2) - log.info('Healthcheck returned searched code: %s' % searched_code2) - else: - log.info('Use healthcheck without --json option') - args.json = json - health_check_run(instance, topology.logcap.log, args) - assert topology.logcap.contains(searched_code) - log.info('Healthcheck returned searched code: %s' % searched_code) - - if searched_code2 is not None: - assert topology.logcap.contains(searched_code2) - log.info('Healthcheck returned searched code: %s' % searched_code2) - - log.info('Clear the log') - topology.logcap.flush() - - -# This test is in separate file because it is timeout specific [email protected](ds_is_older("1.4.1"), reason="Not implemented") -#unstable or unstatus tests, skipped for now [email protected](max_runs=2, min_passes=1) -def test_healthcheck_replication_out_of_sync_not_broken(topology_m3): - """Check if HealthCheck returns DSREPLLE0003 code - - :id: 8305000d-ba4d-4c00-8331-be0e8bd92150 - :setup: 3 MMR topology - :steps: - 1. Create a 3 suppliers full-mesh topology, all replicas being synchronized - 2. Stop M1 - 3. Perform an update on M2 and M3. - 4. Check M2 and M3 are synchronized. - 5. From M2, reinitialize the M3 agreement - 6. Stop M2 and M3 - 7. Restart M1 - 8. Start M3 - 9. Use HealthCheck without --json option - 10. Use HealthCheck with --json option - :expectedresults: - 1. Success - 2. Success - 3. Success - 4. Success - 5. Success - 6. Success - 7. Success - 8. Success - 9. Healthcheck reports DSREPLLE0003 code and related details - 10. Healthcheck reports DSREPLLE0003 code and related details - """ - - RET_CODE = 'DSREPLLE0003' - - M1 = topology_m3.ms['supplier1'] - M2 = topology_m3.ms['supplier2'] - M3 = topology_m3.ms['supplier3'] - - log.info('Stop supplier1') - M1.stop() - - log.info('Perform update on supplier2 and supplier3') - test_users_m2 = UserAccounts(M2, DEFAULT_SUFFIX) - test_users_m3 = UserAccounts(M3, DEFAULT_SUFFIX) - test_users_m2.create_test_user(1000, 2000) - for user_num in range(1001, 3000): - test_users_m3.create_test_user(user_num, 2000) - time.sleep(2) - - log.info('Stop M2 and M3') - M2.stop() - M3.stop() - - log.info('Start M1 first, then M2, so that M2 acquires M1') - M1.start() - M2.start() - time.sleep(2) - - log.info('Start M3 which should not be able to acquire M1 since M2 is updating it') - M3.start() - time.sleep(2) - - run_healthcheck_and_flush_log(topology_m3, M3, RET_CODE, json=False) - run_healthcheck_and_flush_log(topology_m3, M3, RET_CODE, json=True) - - -if __name__ == '__main__': - # Run isolated - # -s for DEBUG mode - CURRENT_FILE = os.path.realpath(__file__) diff --git a/src/lib389/lib389/replica.py b/src/lib389/lib389/replica.py index bf8150f55..7fd210591 100644 --- a/src/lib389/lib389/replica.py +++ b/src/lib389/lib389/replica.py @@ -1255,6 +1255,9 @@ class Replica(DSLdapObject): report['check'] = f'replication:agmts_status' yield report elif status['state'] == 'amber': + if "can't acquire busy replica" in status['reason']: + # Ignore replica busy condition + continue # Warning report = copy.deepcopy(DSREPLLE0003) report['detail'] = report['detail'].replace('SUFFIX', suffix)
0
b2fe2aab7548a91beb0907071ae66d4e46f2095f
389ds/389-ds-base
Ticket 48267 - Add config setting to MO plugin to add objectclass Description: Add setting to plugin to auto add a predefined objectclass that allows "memberOf" attribute https://fedorahosted.org/389/ticket/48267 Reviewed by: nhosoi(Thanks!)
commit b2fe2aab7548a91beb0907071ae66d4e46f2095f Author: Mark Reynolds <[email protected]> Date: Wed Sep 2 11:51:35 2015 -0400 Ticket 48267 - Add config setting to MO plugin to add objectclass Description: Add setting to plugin to auto add a predefined objectclass that allows "memberOf" attribute https://fedorahosted.org/389/ticket/48267 Reviewed by: nhosoi(Thanks!) diff --git a/dirsrvtests/suites/memberof_plugin/memberof_test.py b/dirsrvtests/suites/memberof_plugin/memberof_test.py index b3c98f778..e97c09a49 100644 --- a/dirsrvtests/suites/memberof_plugin/memberof_test.py +++ b/dirsrvtests/suites/memberof_plugin/memberof_test.py @@ -3,9 +3,10 @@ # All rights reserved. # # License: GPL (version 3 or any later version). -# See LICENSE for details. +# See LICENSE for details. # --- END COPYRIGHT BLOCK --- # + import os import sys import time @@ -21,9 +22,13 @@ from lib389.utils import * logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) - installation1_prefix = None +MEMBEROF_PLUGIN_DN = ('cn=' + PLUGIN_MEMBER_OF + ',cn=plugins,cn=config') +USER1_DN = 'uid=user1,' + DEFAULT_SUFFIX +USER2_DN = 'uid=user2,' + DEFAULT_SUFFIX +GROUP_DN = 'cn=group,' + DEFAULT_SUFFIX + class TopologyStandalone(object): def __init__(self, standalone): @@ -51,43 +56,121 @@ def topology(request): standalone.create() standalone.open() + # Delete each instance in the end + def fin(): + standalone.delete() + #pass + request.addfinalizer(fin) + # Clear out the tmp dir standalone.clearTmpDir(__file__) return TopologyStandalone(standalone) -def test_memberof_init(topology): - ''' - Write any test suite initialization here(if needed) - ''' - - return - - -def test_memberof_(topology): - ''' - Write a single test here... - ''' - - return - - -def test_memberof_final(topology): - topology.standalone.delete() - log.info('memberof test suite PASSED') - - -def run_isolated(): - global installation1_prefix - installation1_prefix = None - - topo = topology(True) - test_memberof_init(topo) - test_memberof_(topo) - test_memberof_final(topo) +def test_memberof_auto_add_oc(topology): + """ + Test the auto add objectclass feature. The plugin should add a predefined + objectclass that will allow memberOf to be added to an entry. + """ + + # enable dynamic plugins + try: + topology.standalone.modify_s(DN_CONFIG, + [(ldap.MOD_REPLACE, + 'nsslapd-dynamic-plugins', + 'on')]) + except ldap.LDAPError as e: + ldap.error('Failed to enable dynamic plugins! ' + e.message['desc']) + assert False + + # Enable the plugin + topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) + + # First test invalid value (config validation) + topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) + try: + topology.standalone.modify_s(MEMBEROF_PLUGIN_DN, + [(ldap.MOD_REPLACE, + 'memberofAutoAddOC', + 'invalid123')]) + log.fatal('Incorrectly added invalid objectclass!') + assert False + except ldap.UNWILLING_TO_PERFORM: + log.info('Correctly rejected invalid objectclass') + except ldap.LDAPError as e: + ldap.error('Unexpected error adding invalid objectclass - error: ' + e.message['desc']) + assert False + + # Add valid objectclass + topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) + try: + topology.standalone.modify_s(MEMBEROF_PLUGIN_DN, + [(ldap.MOD_REPLACE, + 'memberofAutoAddOC', + 'inetuser')]) + except ldap.LDAPError as e: + log.fatal('Failed to configure memberOf plugin: error ' + e.message['desc']) + assert False + + # Add two users + try: + topology.standalone.add_s(Entry((USER1_DN, + {'objectclass': 'top', + 'objectclass': 'person', + 'objectclass': 'organizationalPerson', + 'objectclass': 'inetorgperson', + 'sn': 'last', + 'cn': 'full', + 'givenname': 'user1', + 'uid': 'user1' + }))) + except ldap.LDAPError as e: + log.fatal('Failed to add user1 entry, error: ' + e.message['desc']) + assert False + + try: + topology.standalone.add_s(Entry((USER2_DN, + {'objectclass': 'top', + 'objectclass': 'person', + 'objectclass': 'organizationalPerson', + 'objectclass': 'inetorgperson', + 'sn': 'last', + 'cn': 'full', + 'givenname': 'user2', + 'uid': 'user2' + }))) + except ldap.LDAPError as e: + log.fatal('Failed to add user2 entry, error: ' + e.message['desc']) + assert False + + # Add a group(that already includes one user + try: + topology.standalone.add_s(Entry((GROUP_DN, + {'objectclass': 'top', + 'objectclass': 'groupOfNames', + 'cn': 'group', + 'member': USER1_DN + }))) + except ldap.LDAPError as e: + log.fatal('Failed to add group entry, error: ' + e.message['desc']) + assert False + + # Add a user to the group + try: + topology.standalone.modify_s(GROUP_DN, + [(ldap.MOD_ADD, + 'member', + USER2_DN)]) + except ldap.LDAPError as e: + log.fatal('Failed to add user2 to group: error ' + e.message['desc']) + assert False + + log.info('Test complete.') if __name__ == '__main__': - run_isolated() - + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) \ No newline at end of file diff --git a/ldap/servers/plugins/memberof/memberof.c b/ldap/servers/plugins/memberof/memberof.c index 9b577b9e9..aad300fd4 100644 --- a/ldap/servers/plugins/memberof/memberof.c +++ b/ldap/servers/plugins/memberof/memberof.c @@ -145,6 +145,8 @@ static void memberof_fixup_task_thread(void *arg); static int memberof_fix_memberof(MemberOfConfig *config, char *dn, char *filter_str); static int memberof_fix_memberof_callback(Slapi_Entry *e, void *callback_data); static int memberof_entry_in_scope(MemberOfConfig *config, Slapi_DN *sdn); +static int memberof_add_objectclass(char *auto_add_oc, const char *dn); +static int memberof_add_memberof_attr(LDAPMod **mods, const char *dn, char *add_oc); /*** implementation ***/ @@ -490,7 +492,7 @@ int memberof_postop_del(Slapi_PBlock *pb) { int ret = SLAPI_PLUGIN_SUCCESS; MemberOfConfig *mainConfig = NULL; - MemberOfConfig configCopy = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; + MemberOfConfig configCopy = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; Slapi_DN *sdn; void *caller_id = NULL; @@ -818,7 +820,7 @@ int memberof_postop_modrdn(Slapi_PBlock *pb) if(memberof_oktodo(pb)) { MemberOfConfig *mainConfig = 0; - MemberOfConfig configCopy = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; + MemberOfConfig configCopy = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; struct slapi_entry *pre_e = NULL; struct slapi_entry *post_e = NULL; Slapi_DN *pre_sdn = 0; @@ -937,6 +939,7 @@ typedef struct _replace_dn_data char *pre_dn; char *post_dn; char *type; + char *add_oc; } replace_dn_data; @@ -957,7 +960,9 @@ memberof_replace_dn_from_groups(Slapi_PBlock *pb, MemberOfConfig *config, { replace_dn_data data = {(char *)slapi_sdn_get_dn(pre_sdn), (char *)slapi_sdn_get_dn(post_sdn), - config->groupattrs[i]}; + config->groupattrs[i], + config->auto_add_oc + }; groupattrs[0] = config->groupattrs[i]; @@ -981,9 +986,9 @@ int memberof_replace_dn_type_callback(Slapi_Entry *e, void *callback_data) LDAPMod *mods[3]; char *delval[2]; char *addval[2]; - Slapi_PBlock *mod_pb = 0; + char *dn = NULL; - mod_pb = slapi_pblock_new(); + dn = slapi_entry_get_dn(e); mods[0] = &delmod; mods[1] = &addmod; @@ -1003,18 +1008,8 @@ int memberof_replace_dn_type_callback(Slapi_Entry *e, void *callback_data) addmod.mod_type = ((replace_dn_data *)callback_data)->type; addmod.mod_values = addval; - slapi_modify_internal_set_pb_ext( - mod_pb, slapi_entry_get_sdn(e), - mods, 0, 0, - memberof_get_plugin_id(), 0); - - slapi_modify_internal_pb(mod_pb); - - slapi_pblock_get(mod_pb, - SLAPI_PLUGIN_INTOP_RESULT, - &rc); - - slapi_pblock_destroy(mod_pb); + rc = memberof_add_memberof_attr(mods, dn, + ((replace_dn_data *)callback_data)->add_oc); return rc; } @@ -1083,7 +1078,7 @@ int memberof_postop_modify(Slapi_PBlock *pb) { int config_copied = 0; MemberOfConfig *mainConfig = 0; - MemberOfConfig configCopy = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; + MemberOfConfig configCopy = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; /* get the mod set */ slapi_pblock_get(pb, SLAPI_MODIFY_MODS, &mods); @@ -1261,7 +1256,7 @@ int memberof_postop_add(Slapi_PBlock *pb) if(memberof_oktodo(pb) && (sdn = memberof_getsdn(pb))) { struct slapi_entry *e = NULL; - MemberOfConfig configCopy = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; + MemberOfConfig configCopy = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; MemberOfConfig *mainConfig; slapi_pblock_get( pb, SLAPI_ENTRY_POST_OP, &e ); @@ -1455,7 +1450,6 @@ memberof_modop_one_replace_r(Slapi_PBlock *pb, MemberOfConfig *config, LDAPMod *mods[3]; char *val[2]; char *replace_val[2]; - Slapi_PBlock *mod_pb = 0; Slapi_Entry *e = 0; memberofstringll *ll = 0; char *op_str = 0; @@ -1696,8 +1690,6 @@ memberof_modop_one_replace_r(Slapi_PBlock *pb, MemberOfConfig *config, rc = memberof_fix_memberof_callback(e, config); } else { /* single entry - do mod */ - mod_pb = slapi_pblock_new(); - mods[0] = &mod; if(LDAP_MOD_REPLACE == mod_op) { @@ -1724,19 +1716,7 @@ memberof_modop_one_replace_r(Slapi_PBlock *pb, MemberOfConfig *config, replace_mod.mod_type = config->memberof_attr; replace_mod.mod_values = replace_val; } - - slapi_modify_internal_set_pb( - mod_pb, op_to, - mods, 0, 0, - memberof_get_plugin_id(), 0); - - slapi_modify_internal_pb(mod_pb); - - slapi_pblock_get(mod_pb, - SLAPI_PLUGIN_INTOP_RESULT, - &rc); - - slapi_pblock_destroy(mod_pb); + rc = memberof_add_memberof_attr(mods, op_to, config->auto_add_oc); } } @@ -2899,7 +2879,6 @@ int memberof_fix_memberof_callback(Slapi_Entry *e, void *callback_data) * with the found values. */ if (groups && slapi_valueset_count(groups)) { - Slapi_PBlock *mod_pb = slapi_pblock_new(); Slapi_Value *val = 0; Slapi_Mod *smod; LDAPMod **mods = (LDAPMod **) slapi_ch_malloc(2 * sizeof(LDAPMod *)); @@ -2922,17 +2901,10 @@ int memberof_fix_memberof_callback(Slapi_Entry *e, void *callback_data) mods[0] = slapi_mod_get_ldapmod_passout(smod); mods[1] = 0; - slapi_modify_internal_set_pb_ext( - mod_pb, sdn, mods, 0, 0, - memberof_get_plugin_id(), 0); - - slapi_modify_internal_pb(mod_pb); - - slapi_pblock_get(mod_pb, SLAPI_PLUGIN_INTOP_RESULT, &rc); + rc = memberof_add_memberof_attr(mods, slapi_sdn_get_dn(sdn), config->auto_add_oc); ldap_mods_free(mods, 1); slapi_mod_free(&smod); - slapi_pblock_destroy(mod_pb); } else { /* No groups were found, so remove the memberOf attribute * from this entry. */ @@ -2943,3 +2915,88 @@ int memberof_fix_memberof_callback(Slapi_Entry *e, void *callback_data) bail: return rc; } + +/* + * Add the "memberof" attribute to the entry. If we get an objectclass violation, + * check if we are auto adding an objectclass. IF so, add the oc, and try the + * operation one more time. + */ +static int +memberof_add_memberof_attr(LDAPMod **mods, const char *dn, char *add_oc) +{ + Slapi_PBlock *mod_pb = NULL; + int added_oc = 0; + int rc = 0; + + while(1){ + mod_pb = slapi_pblock_new(); + slapi_modify_internal_set_pb( + mod_pb, dn, mods, 0, 0, + memberof_get_plugin_id(), 0); + slapi_modify_internal_pb(mod_pb); + + slapi_pblock_get(mod_pb, SLAPI_PLUGIN_INTOP_RESULT, &rc); + if (rc == LDAP_OBJECT_CLASS_VIOLATION){ + if (!add_oc || added_oc){ + /* + * We aren't auto adding an objectclass, or we already + * added the objectclass, and we are still failing. + */ + break; + } + if(memberof_add_objectclass(add_oc, dn)){ + /* Failed to add objectclass */ + break; + } + added_oc = 1; + slapi_pblock_destroy(mod_pb); + } else if (rc){ + /* Some other fatal error */ + break; + } else { + /* success */ + break; + } + } + slapi_pblock_destroy(mod_pb); + + return rc; +} + +/* + * Add the "auto add" objectclass to an entry + */ +static int +memberof_add_objectclass(char *auto_add_oc, const char *dn) +{ + Slapi_PBlock *mod_pb = NULL; + LDAPMod mod; + LDAPMod *mods[2]; + char *val[2]; + int rc = 0; + + mod_pb = slapi_pblock_new(); + mods[0] = &mod; + mods[1] = 0; + val[0] = auto_add_oc; + val[1] = 0; + + mod.mod_op = LDAP_MOD_ADD; + mod.mod_type = "objectclass"; + mod.mod_values = val; + + slapi_modify_internal_set_pb( + mod_pb, dn, mods, 0, 0, + memberof_get_plugin_id(), 0); + slapi_modify_internal_pb(mod_pb); + + slapi_pblock_get(mod_pb, SLAPI_PLUGIN_INTOP_RESULT, &rc); + if (rc){ + slapi_log_error( SLAPI_LOG_FATAL, MEMBEROF_PLUGIN_SUBSYSTEM, + "Failed to add objectclass (%s) to entry (%s)\n", + auto_add_oc, dn); + } + slapi_pblock_destroy(mod_pb); + + return rc; +} diff --git a/ldap/servers/plugins/memberof/memberof.h b/ldap/servers/plugins/memberof/memberof.h index 9d9d158bd..ece18e47d 100644 --- a/ldap/servers/plugins/memberof/memberof.h +++ b/ldap/servers/plugins/memberof/memberof.h @@ -41,6 +41,7 @@ #define MEMBEROF_ENTRY_SCOPE_ATTR "memberOfEntryScope" #define MEMBEROF_ENTRY_SCOPE_EXCLUDE_SUBTREE "memberOfEntryScopeExcludeSubtree" #define MEMBEROF_SKIP_NESTED_ATTR "memberOfSkipNested" +#define MEMBEROF_AUTO_ADD_OC "memberOfAutoAddOC" #define DN_SYNTAX_OID "1.3.6.1.4.1.1466.115.121.1.12" #define NAME_OPT_UID_SYNTAX_OID "1.3.6.1.4.1.1466.115.121.1.34" @@ -60,6 +61,7 @@ typedef struct memberofconfig { Slapi_Attr **group_slapiattrs; int skip_nested; int fixup_task; + char *auto_add_oc; } MemberOfConfig; diff --git a/ldap/servers/plugins/memberof/memberof_config.c b/ldap/servers/plugins/memberof/memberof_config.c index 10cbd7a05..99132d8b8 100644 --- a/ldap/servers/plugins/memberof/memberof_config.c +++ b/ldap/servers/plugins/memberof/memberof_config.c @@ -180,6 +180,7 @@ memberof_validate_config (Slapi_PBlock *pb, Slapi_Entry* entryBefore, Slapi_Entr char *syntaxoid = NULL; char *config_dn = NULL; char *skip_nested = NULL; + char *auto_add_oc = NULL; char **entry_scopes = NULL; char **entry_exclude_scopes = NULL; int not_dn_syntax = 0; @@ -272,6 +273,22 @@ memberof_validate_config (Slapi_PBlock *pb, Slapi_Entry* entryBefore, Slapi_Entr } } + if ((auto_add_oc = slapi_entry_attr_get_charptr(e, MEMBEROF_AUTO_ADD_OC))){ + char *sup = NULL; + + /* Check if the objectclass exists by looking for its superior oc */ + if((sup = slapi_schema_get_superior_name(auto_add_oc)) == NULL){ + PR_snprintf(returntext, SLAPI_DSE_RETURNTEXT_SIZE, + "The %s configuration attribute must be set to " + "to an existing objectclass (unknown: %s)", + MEMBEROF_AUTO_ADD_OC, auto_add_oc); + *returncode = LDAP_UNWILLING_TO_PERFORM; + goto done; + } else { + slapi_ch_free_string(&sup); + } + } + if ((config_dn = slapi_entry_attr_get_charptr(e, SLAPI_PLUGIN_SHARED_CONFIG_AREA))){ /* Now check the shared config attribute, validate it now */ @@ -412,6 +429,7 @@ done: slapi_sdn_free(&config_sdn); slapi_ch_free_string(&config_dn); slapi_ch_free_string(&skip_nested); + slapi_ch_free_string(&auto_add_oc); if (*returncode != LDAP_SUCCESS) { @@ -445,6 +463,7 @@ memberof_apply_config (Slapi_PBlock *pb, Slapi_Entry* entryBefore, Slapi_Entry* char **entryScopeExcludeSubtrees = NULL; char *sharedcfg = NULL; char *skip_nested = NULL; + char *auto_add_oc = NULL; int num_vals = 0; *returncode = LDAP_SUCCESS; @@ -478,6 +497,7 @@ memberof_apply_config (Slapi_PBlock *pb, Slapi_Entry* entryBefore, Slapi_Entry* memberof_attr = slapi_entry_attr_get_charptr(e, MEMBEROF_ATTR); allBackends = slapi_entry_attr_get_charptr(e, MEMBEROF_BACKEND_ATTR); skip_nested = slapi_entry_attr_get_charptr(e, MEMBEROF_SKIP_NESTED_ATTR); + auto_add_oc = slapi_entry_attr_get_charptr(e, MEMBEROF_AUTO_ADD_OC); /* * We want to be sure we don't change the config in the middle of @@ -602,6 +622,8 @@ memberof_apply_config (Slapi_PBlock *pb, Slapi_Entry* entryBefore, Slapi_Entry* theConfig.allBackends = 0; } + theConfig.auto_add_oc = auto_add_oc; + /* * Check and process the entry scopes */ @@ -731,6 +753,9 @@ memberof_copy_config(MemberOfConfig *dest, MemberOfConfig *src) dest->allBackends = src->allBackends; } + slapi_ch_free_string(&dest->auto_add_oc); + dest->auto_add_oc = slapi_ch_strdup(src->auto_add_oc); + if(src->entryScopes){ int num_vals = 0; @@ -770,7 +795,7 @@ memberof_free_config(MemberOfConfig *config) slapi_attr_free(&config->group_slapiattrs[i]); } slapi_ch_free((void **)&config->group_slapiattrs); - + slapi_ch_free_string(&config->auto_add_oc); slapi_ch_free_string(&config->memberof_attr); memberof_free_scope(config->entryScopes, &config->entryScopeCount); memberof_free_scope(config->entryScopeExcludeSubtrees, &config->entryExcludeScopeCount);
0
ac44337bd97fe63071e7d83e9dcd788f2af1feab
389ds/389-ds-base
Ticket 49079: deadlock on cos cache rebuild Bug Description: To rebuild the cache cos_cache_creation the thread gets cos definitions from backend. It means change_lock is held then cos_cache_creation will acquire some backend pages. A deadlock can happen if cos_post_op is called while backend is locked. For example if a bepreop (urp) does an internal update on a cos definition. Then the thread holds backend pages, that will be needed by cos_cache_creation, and will acquire change_lock for notification of the cos_cache thread Fix Description: Let cos cache rebuild thread run without holding change_lock. The lock prevents parallel run but a flag can do the same. https://fedorahosted.org/389/ticket/49079 Reviewed by: William Brown and Ludwig Krispenz (thanks to you both !!) Platforms tested: F23 Flag Day: no Doc impact: no
commit ac44337bd97fe63071e7d83e9dcd788f2af1feab Author: Thierry Bordaz <[email protected]> Date: Tue Jan 10 14:32:53 2017 +0100 Ticket 49079: deadlock on cos cache rebuild Bug Description: To rebuild the cache cos_cache_creation the thread gets cos definitions from backend. It means change_lock is held then cos_cache_creation will acquire some backend pages. A deadlock can happen if cos_post_op is called while backend is locked. For example if a bepreop (urp) does an internal update on a cos definition. Then the thread holds backend pages, that will be needed by cos_cache_creation, and will acquire change_lock for notification of the cos_cache thread Fix Description: Let cos cache rebuild thread run without holding change_lock. The lock prevents parallel run but a flag can do the same. https://fedorahosted.org/389/ticket/49079 Reviewed by: William Brown and Ludwig Krispenz (thanks to you both !!) Platforms tested: F23 Flag Day: no Doc impact: no diff --git a/ldap/servers/plugins/cos/cos_cache.c b/ldap/servers/plugins/cos/cos_cache.c index 5522d6e37..894225448 100644 --- a/ldap/servers/plugins/cos/cos_cache.c +++ b/ldap/servers/plugins/cos/cos_cache.c @@ -111,7 +111,9 @@ void * cos_get_plugin_identity(void); /* the global plugin handle */ static volatile vattr_sp_handle *vattr_handle = NULL; +/* both variables are protected by change_lock */ static int cos_cache_notify_flag = 0; +static PRBool cos_cache_at_work = PR_FALSE; /* service definition cache structs */ @@ -199,7 +201,8 @@ typedef struct _cos_cache cosCache; static cosCache *pCache; /* always the current global cache, only use getref to get */ /* the place to start if you want a new cache */ -static int cos_cache_create(void); +static int cos_cache_create_unlock(void); +static int cos_cache_creation_lock(void); /* cache index related functions */ static int cos_cache_index_all(cosCache *pCache); @@ -386,7 +389,7 @@ static void cos_cache_wait_on_change(void *arg) pCache = 0; /* create initial cache */ - cos_cache_create(); + cos_cache_creation_lock(); slapi_lock_mutex(start_lock); started = 1; @@ -419,7 +422,7 @@ static void cos_cache_wait_on_change(void *arg) * before we go running off doing lots of stuff lets check if we should stop */ if(keeprunning) { - cos_cache_create(); + cos_cache_creation_lock(); } cos_cache_notify_flag = 0; /* Dealt with it */ }/* while */ @@ -431,22 +434,25 @@ static void cos_cache_wait_on_change(void *arg) slapi_log_err(SLAPI_LOG_TRACE, COS_PLUGIN_SUBSYSTEM, "<-- cos_cache_wait_on_change thread exit\n"); } + /* - cos_cache_create + cos_cache_create_unlock --------------------- Walks the definitions in the DIT and creates the cache. Once created, it swaps the new cache for the old one, releasing its refcount to the old cache and allowing it to be destroyed. + + called while change_lock is NOT held */ -static int cos_cache_create(void) +static int cos_cache_create_unlock(void) { int ret = -1; cosCache *pNewCache; static int firstTime = 1; int cache_built = 0; - slapi_log_err(SLAPI_LOG_TRACE, COS_PLUGIN_SUBSYSTEM, "--> cos_cache_create\n"); + slapi_log_err(SLAPI_LOG_TRACE, COS_PLUGIN_SUBSYSTEM, "--> cos_cache_create_unlock\n"); pNewCache = (cosCache*)slapi_ch_malloc(sizeof(cosCache)); if(pNewCache) @@ -509,21 +515,21 @@ static int cos_cache_create(void) { /* we should not go on without proper schema checking */ cos_cache_release(pNewCache); - slapi_log_err(SLAPI_LOG_ERR, COS_PLUGIN_SUBSYSTEM, "cos_cache_create - Failed to cache the schema\n"); + slapi_log_err(SLAPI_LOG_ERR, COS_PLUGIN_SUBSYSTEM, "cos_cache_create_unlock - Failed to cache the schema\n"); } } else { /* currently we cannot go on without the indexes */ cos_cache_release(pNewCache); - slapi_log_err(SLAPI_LOG_ERR, COS_PLUGIN_SUBSYSTEM, "cos_cache_create - Failed to index cache\n"); + slapi_log_err(SLAPI_LOG_ERR, COS_PLUGIN_SUBSYSTEM, "cos_cache_create_unlock - Failed to index cache\n"); } } else { if(firstTime) { - slapi_log_err(SLAPI_LOG_PLUGIN, COS_PLUGIN_SUBSYSTEM, "cos_cache_create - cos disabled\n"); + slapi_log_err(SLAPI_LOG_PLUGIN, COS_PLUGIN_SUBSYSTEM, "cos_cache_create_unlock - cos disabled\n"); firstTime = 0; } @@ -531,7 +537,7 @@ static int cos_cache_create(void) } } else - slapi_log_err(SLAPI_LOG_ERR, COS_PLUGIN_SUBSYSTEM, "cos_cache_create - Memory allocation failure\n"); + slapi_log_err(SLAPI_LOG_ERR, COS_PLUGIN_SUBSYSTEM, "cos_cache_create_unlock - Memory allocation failure\n"); /* make sure we have a new cache */ @@ -563,10 +569,53 @@ static int cos_cache_create(void) } - slapi_log_err(SLAPI_LOG_TRACE, COS_PLUGIN_SUBSYSTEM, "<-- cos_cache_create\n"); + slapi_log_err(SLAPI_LOG_TRACE, COS_PLUGIN_SUBSYSTEM, "<-- cos_cache_create_unlock\n"); return ret; } +/* cos_cache_creation_lock is called with change_lock being hold: + * slapi_lock_mutex(change_lock) + * + * To rebuild the cache cos_cache_creation gets cos definitions from backend, that + * means change_lock is held then cos_cache_creation will acquire some backend pages. + * + * A deadlock can happen if cos_post_op is called while backend is locked. + * For example if a bepreop (urp) does an internal update on a cos definition, + * the thread holds backend pages that will be needed by cos_cache_creation. + * + * A solution is to use a flag 'cos_cache_at_work' protected by change_lock, + * release change_lock, recreate the cos_cache, acquire change_lock reset the flag. + * + * returned value: result of cos_cache_create_unlock + * + */ +static int cos_cache_creation_lock(void) +{ + int ret = -1; + int max_tries = 10; + + for (; max_tries != 0; max_tries--) { + /* if the cos_cache is already under work (cos_cache_create_unlock) + * wait 1 second + */ + if (cos_cache_at_work) { + slapi_log_err(SLAPI_LOG_FATAL, COS_PLUGIN_SUBSYSTEM, "--> cos_cache_creation_lock already rebuilding cos_cache... retry\n"); + DS_Sleep (PR_MillisecondsToInterval(1000)); + continue; + } + cos_cache_at_work = PR_TRUE; + slapi_unlock_mutex(change_lock); + ret = cos_cache_create_unlock(); + slapi_lock_mutex(change_lock); + cos_cache_at_work = PR_FALSE; + break; + } + if (!max_tries) { + slapi_log_err(SLAPI_LOG_FATAL, COS_PLUGIN_SUBSYSTEM, "--> cos_cache_creation_lock rebuilt was to long, skip this rebuild\n"); + } + + return ret; +} /* cos_cache_build_definition_list @@ -1648,7 +1697,7 @@ int cos_cache_getref(cos_cache **pptheCache) slapi_lock_mutex(change_lock); if(pCache == NULL) { - if(cos_cache_create()) + if(cos_cache_creation_lock()) { /* there was a problem or no COS definitions were found */ slapi_log_err(SLAPI_LOG_PLUGIN, COS_PLUGIN_SUBSYSTEM, "cos_cache_getref - No cos cache created\n");
0
118a7f942ce04ef4754a5fd06c0076f3fd1c57f4
389ds/389-ds-base
Issue 6377 - syntax error in setup.py (#6378) Syntax error due to badly nested quotes in dblib.py cause trouble in setup.py and dsconf dblib b2b2mdb/mdb2dbd Fix bit using double quotes in the f-expression and quotes for the embedded strings. Issue: #6377 Reviewed by: @tbordaz, @droideck (Thank!)
commit 118a7f942ce04ef4754a5fd06c0076f3fd1c57f4 Author: progier389 <[email protected]> Date: Tue Oct 22 17:26:46 2024 +0200 Issue 6377 - syntax error in setup.py (#6378) Syntax error due to badly nested quotes in dblib.py cause trouble in setup.py and dsconf dblib b2b2mdb/mdb2dbd Fix bit using double quotes in the f-expression and quotes for the embedded strings. Issue: #6377 Reviewed by: @tbordaz, @droideck (Thank!) diff --git a/src/lib389/lib389/cli_ctl/dblib.py b/src/lib389/lib389/cli_ctl/dblib.py index 7852dc6ed..053a72d61 100644 --- a/src/lib389/lib389/cli_ctl/dblib.py +++ b/src/lib389/lib389/cli_ctl/dblib.py @@ -205,7 +205,7 @@ def export_changelog(be, dblib): return False try: cl5dbname = be['eccl5dbname'] if dblib == "bdb" else be['cl5dbname'] - _log.info(f'Exporting changelog {cl5dbname} to {be['cl5name']}') + _log.info(f"Exporting changelog {cl5dbname} to {be['cl5name']}") run_dbscan(['-D', dblib, '-f', cl5dbname, '-X', be['cl5name']]) return True except subprocess.CalledProcessError as e: @@ -216,7 +216,7 @@ def import_changelog(be, dblib): # import backend changelog try: cl5dbname = be['eccl5dbname'] if dblib == "bdb" else be['cl5dbname'] - _log.info(f'Importing changelog {cl5dbname} from {be['cl5name']}') + _log.info(f"Importing changelog {cl5dbname} from {be['cl5name']}") run_dbscan(['-D', dblib, '-f', cl5dbname, '--import', be['cl5name'], '--do-it']) return True except subprocess.CalledProcessError as e:
0
119628c030e209ba342b83914b76eeb1fbcd2dd4
389ds/389-ds-base
Bug 616500 - fix coverify Defect Type: Resource leaks issues CID 12094 - 12136 https://bugzilla.redhat.com/show_bug.cgi?id=616500 Resolves: bug 616500 Bug description: fix coverify Defect Type: Resource leaks issues CID 12116 description: Fixed resource leaks in scalab01_addLogin().
commit 119628c030e209ba342b83914b76eeb1fbcd2dd4 Author: Endi S. Dewata <[email protected]> Date: Sun Jul 18 17:28:38 2010 -0500 Bug 616500 - fix coverify Defect Type: Resource leaks issues CID 12094 - 12136 https://bugzilla.redhat.com/show_bug.cgi?id=616500 Resolves: bug 616500 Bug description: fix coverify Defect Type: Resource leaks issues CID 12116 description: Fixed resource leaks in scalab01_addLogin(). diff --git a/ldap/servers/slapd/tools/ldclt/scalab01.c b/ldap/servers/slapd/tools/ldclt/scalab01.c index 595df0ff6..397099aaa 100644 --- a/ldap/servers/slapd/tools/ldclt/scalab01.c +++ b/ldap/servers/slapd/tools/ldclt/scalab01.c @@ -416,11 +416,19 @@ scalab01_addLogin ( int ret; /* Return value */ isp_user *new; /* New entry */ isp_user *cur; /* Current entry */ + int rc = 0; /* * Create the new record. */ new = (isp_user *) malloc (sizeof (isp_user)); + if (NULL == new) { + fprintf (stderr, "ldclt[%d]: %s: cannot malloc(isp_user), error=%d (%s)\n", + mctx.pid, tttctx->thrdId, errno, strerror (errno)); + fflush (stderr); + return -1; + } + strcpy (new->dn, dn); new->cost = new->counter = duration; new->next = NULL; @@ -435,7 +443,8 @@ scalab01_addLogin ( fprintf (stderr, "ldclt[%d]: %s: cannot mutex_lock(), error=%d (%s)\n", mctx.pid, tttctx->thrdId, ret, strerror (ret)); fflush (stderr); - return (-1); + rc = -1; + goto error; } /* @@ -476,6 +485,12 @@ scalab01_addLogin ( } } + goto done; + +error: + if (new) free(new); + +done: /* * Free mutex */ @@ -484,10 +499,10 @@ scalab01_addLogin ( fprintf (stderr, "ldclt[%d]: %s: cannot mutex_unlock(), error=%d (%s)\n", mctx.pid, tttctx->thrdId, ret, strerror (ret)); fflush (stderr); - return (-1); + rc = -1; } - return (0); + return rc; }
0
ee320163c6b7913ddae41cb8954b237d1b99eadf
389ds/389-ds-base
Ticket 284 - Remove unnecessary SNMP MIB files We currently carry around some standard MIB files in our soruce tree and install them as well. Aside from our DS specific MIB file, these are all included as a part of the Net-SNMP distribution. We should not be carrying these files around as well. This patch removes the standard MIB files from our tree and updates the Makefile to avoid installing them.
commit ee320163c6b7913ddae41cb8954b237d1b99eadf Author: Nathan Kinder <[email protected]> Date: Wed Feb 8 09:38:26 2012 -0800 Ticket 284 - Remove unnecessary SNMP MIB files We currently carry around some standard MIB files in our soruce tree and install them as well. Aside from our DS specific MIB file, these are all included as a part of the Net-SNMP distribution. We should not be carrying these files around as well. This patch removes the standard MIB files from our tree and updates the Makefile to avoid installing them. diff --git a/Makefile.am b/Makefile.am index e84b63d89..bb761e3fc 100644 --- a/Makefile.am +++ b/Makefile.am @@ -404,13 +404,7 @@ inf_DATA = ldap/admin/src/slapd.inf \ ldap/admin/src/scripts/dsupdate.map \ ldap/admin/src/scripts/dsorgentries.map -mib_DATA = ldap/servers/snmp/RFC-1215.txt \ - ldap/servers/snmp/SNMPv2-TC.txt \ - ldap/servers/snmp/redhat-directory.mib \ - ldap/servers/snmp/SNMPv2-CONF.txt \ - ldap/servers/snmp/NETWORK-SERVICES-MIB.txt \ - ldap/servers/snmp/RFC1155-SMI.txt \ - ldap/servers/snmp/SNMPv2-SMI.txt +mib_DATA = ldap/servers/snmp/redhat-directory.mib pkgconfig_DATA = $(PACKAGE_NAME).pc diff --git a/Makefile.in b/Makefile.in index b33c6a55c..88ab46f0d 100644 --- a/Makefile.in +++ b/Makefile.in @@ -1604,14 +1604,7 @@ inf_DATA = ldap/admin/src/slapd.inf \ ldap/admin/src/scripts/dsupdate.map \ ldap/admin/src/scripts/dsorgentries.map -mib_DATA = ldap/servers/snmp/RFC-1215.txt \ - ldap/servers/snmp/SNMPv2-TC.txt \ - ldap/servers/snmp/redhat-directory.mib \ - ldap/servers/snmp/SNMPv2-CONF.txt \ - ldap/servers/snmp/NETWORK-SERVICES-MIB.txt \ - ldap/servers/snmp/RFC1155-SMI.txt \ - ldap/servers/snmp/SNMPv2-SMI.txt - +mib_DATA = ldap/servers/snmp/redhat-directory.mib pkgconfig_DATA = $(PACKAGE_NAME).pc #------------------------ diff --git a/ldap/servers/snmp/NETWORK-SERVICES-MIB.txt b/ldap/servers/snmp/NETWORK-SERVICES-MIB.txt deleted file mode 100644 index cadc5504d..000000000 --- a/ldap/servers/snmp/NETWORK-SERVICES-MIB.txt +++ /dev/null @@ -1,650 +0,0 @@ --- extracted from rfc2788.txt --- at Fri Mar 24 07:07:18 2000 - - NETWORK-SERVICES-MIB DEFINITIONS ::= BEGIN - - IMPORTS - OBJECT-TYPE, Counter32, Gauge32, MODULE-IDENTITY, mib-2 - FROM SNMPv2-SMI - TimeStamp, TEXTUAL-CONVENTION - FROM SNMPv2-TC - MODULE-COMPLIANCE, OBJECT-GROUP - FROM SNMPv2-CONF - SnmpAdminString - FROM SNMP-FRAMEWORK-MIB; - - application MODULE-IDENTITY - LAST-UPDATED "200003030000Z" - ORGANIZATION "IETF Mail and Directory Management Working Group" - CONTACT-INFO - " Ned Freed - - Postal: Innosoft International, Inc. - 1050 Lakes Drive - West Covina, CA 91790 - US - - Tel: +1 626 919 3600 - Fax: +1 626 919 3614 - - E-Mail: [email protected]" - DESCRIPTION - "The MIB module describing network service applications" - REVISION "200003030000Z" - DESCRIPTION - "This revision, published in RFC 2788, changes a number of - DisplayStrings to SnmpAdminStrings. Note that this change - is not strictly supported by SMIv2. However, the alternative - of deprecating the old objects and defining new objects - would have a more adverse impact on backward compatibility - and interoperability, given the particular semantics of - these objects. The defining reference for distinguished - names has also been updated from RFC 1779 to RFC 2253." - REVISION "199905120000Z" - DESCRIPTION - "This revision fixes a few small technical problems found - in previous versions, mostly in regards to the conformance - groups for different versions of this MIB. No changes have - been made to the objects this MIB defines since RFC 2248." - REVISION "199708170000Z" - DESCRIPTION - "This revision, published in RFC 2248, adds the - applDescription and applURL objects, adds the quiescing - state to the applOperStatus object and renames the MIB - from the APPLICATION-MIB to the NETWORK-SERVICE-MIB." - REVISION "199311280000Z" - DESCRIPTION - "The original version of this MIB was published in RFC 1565" - ::= {mib-2 27} - - -- Textual conventions - - -- DistinguishedName is used to refer to objects in the - -- directory. - - DistinguishedName ::= TEXTUAL-CONVENTION - DISPLAY-HINT "255a" - STATUS current - DESCRIPTION - "A Distinguished Name represented in accordance with - RFC 2253, presented in the UTF-8 charset defined in - RFC 2279." - SYNTAX OCTET STRING (SIZE (0..255)) - - -- Uniform Resource Locators are stored in URLStrings. - - URLString ::= TEXTUAL-CONVENTION - DISPLAY-HINT "255a" - STATUS current - DESCRIPTION - "A Uniform Resource Locator represented in accordance - with RFCs 1738 and 2368, presented in the NVT ASCII - charset defined in RFC 854." - SYNTAX OCTET STRING (SIZE (0..255)) - - -- The basic applTable contains a list of the application - -- entities. - - applTable OBJECT-TYPE - SYNTAX SEQUENCE OF ApplEntry - MAX-ACCESS not-accessible - STATUS current - DESCRIPTION - "The table holding objects which apply to all different - kinds of applications providing network services. - Each network service application capable of being - monitored should have a single entry in this table." - ::= {application 1} - - applEntry OBJECT-TYPE - SYNTAX ApplEntry - MAX-ACCESS not-accessible - STATUS current - DESCRIPTION - "An entry associated with a single network service - application." - INDEX {applIndex} - ::= {applTable 1} - - ApplEntry ::= SEQUENCE { - applIndex - INTEGER, - applName - SnmpAdminString, - applDirectoryName - DistinguishedName, - applVersion - SnmpAdminString, - applUptime - TimeStamp, - applOperStatus - INTEGER, - applLastChange - TimeStamp, - applInboundAssociations - Gauge32, - applOutboundAssociations - Gauge32, - applAccumulatedInboundAssociations - Counter32, - applAccumulatedOutboundAssociations - Counter32, - applLastInboundActivity - TimeStamp, - applLastOutboundActivity - TimeStamp, - applRejectedInboundAssociations - Counter32, - applFailedOutboundAssociations - Counter32, - applDescription - SnmpAdminString, - applURL - URLString - } - - applIndex OBJECT-TYPE - SYNTAX INTEGER (1..2147483647) - MAX-ACCESS not-accessible - STATUS current - DESCRIPTION - "An index to uniquely identify the network service - application. This attribute is the index used for - lexicographic ordering of the table." - ::= {applEntry 1} - - applName OBJECT-TYPE - SYNTAX SnmpAdminString - MAX-ACCESS read-only - STATUS current - DESCRIPTION - "The name the network service application chooses to be - known by." - ::= {applEntry 2} - - applDirectoryName OBJECT-TYPE - SYNTAX DistinguishedName - MAX-ACCESS read-only - STATUS current - DESCRIPTION - "The Distinguished Name of the directory entry where - static information about this application is stored. - An empty string indicates that no information about - the application is available in the directory." - ::= {applEntry 3} - - applVersion OBJECT-TYPE - SYNTAX SnmpAdminString - MAX-ACCESS read-only - STATUS current - DESCRIPTION - "The version of network service application software. - This field is usually defined by the vendor of the - network service application software." - ::= {applEntry 4} - applUptime OBJECT-TYPE - SYNTAX TimeStamp - MAX-ACCESS read-only - STATUS current - DESCRIPTION - "The value of sysUpTime at the time the network service - application was last initialized. If the application was - last initialized prior to the last initialization of the - network management subsystem, then this object contains - a zero value." - ::= {applEntry 5} - - applOperStatus OBJECT-TYPE - SYNTAX INTEGER { - up(1), - down(2), - halted(3), - congested(4), - restarting(5), - quiescing(6) - } - MAX-ACCESS read-only - STATUS current - DESCRIPTION - "Indicates the operational status of the network service - application. 'down' indicates that the network service is - not available. 'up' indicates that the network service - is operational and available. 'halted' indicates that the - service is operational but not available. 'congested' - indicates that the service is operational but no additional - inbound associations can be accommodated. 'restarting' - indicates that the service is currently unavailable but is - in the process of restarting and will be available soon. - 'quiescing' indicates that service is currently operational - but is in the process of shutting down. Additional inbound - associations may be rejected by applications in the - 'quiescing' state." - ::= {applEntry 6} - - applLastChange OBJECT-TYPE - SYNTAX TimeStamp - MAX-ACCESS read-only - STATUS current - DESCRIPTION - "The value of sysUpTime at the time the network service - application entered its current operational state. If - the current state was entered prior to the last - initialization of the local network management subsystem, - then this object contains a zero value." - ::= {applEntry 7} - - applInboundAssociations OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION - "The number of current associations to the network service - application, where it is the responder. An inbound - association occurs when another application successfully - connects to this one." - ::= {applEntry 8} - - applOutboundAssociations OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION - "The number of current associations to the network service - application, where it is the initiator. An outbound - association occurs when this application successfully - connects to another one." - ::= {applEntry 9} - - applAccumulatedInboundAssociations OBJECT-TYPE - SYNTAX Counter32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION - "The total number of associations to the application entity - since application initialization, where it was the responder." - ::= {applEntry 10} - - applAccumulatedOutboundAssociations OBJECT-TYPE - SYNTAX Counter32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION - "The total number of associations to the application entity - since application initialization, where it was the initiator." - ::= {applEntry 11} - - applLastInboundActivity OBJECT-TYPE - SYNTAX TimeStamp - MAX-ACCESS read-only - STATUS current - DESCRIPTION - "The value of sysUpTime at the time this application last - had an inbound association. If the last association - occurred prior to the last initialization of the network - subsystem, then this object contains a zero value." - ::= {applEntry 12} - - applLastOutboundActivity OBJECT-TYPE - SYNTAX TimeStamp - MAX-ACCESS read-only - STATUS current - DESCRIPTION - "The value of sysUpTime at the time this application last - had an outbound association. If the last association - occurred prior to the last initialization of the network - subsystem, then this object contains a zero value." - ::= {applEntry 13} - - applRejectedInboundAssociations OBJECT-TYPE - SYNTAX Counter32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION - "The total number of inbound associations the application - entity has rejected, since application initialization. - Rejected associations are not counted in the accumulated - association totals. Note that this only counts - associations the application entity has rejected itself; - it does not count rejections that occur at lower layers - of the network. Thus, this counter may not reflect the - true number of failed inbound associations." - ::= {applEntry 14} - - applFailedOutboundAssociations OBJECT-TYPE - SYNTAX Counter32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION - "The total number associations where the application entity - is initiator and association establishment has failed, - since application initialization. Failed associations are - not counted in the accumulated association totals." - ::= {applEntry 15} - - applDescription OBJECT-TYPE - SYNTAX SnmpAdminString - MAX-ACCESS read-only - STATUS current - DESCRIPTION - "A text description of the application. This information - is intended to identify and briefly describe the - application in a status display." - ::= {applEntry 16} - - applURL OBJECT-TYPE - SYNTAX URLString - MAX-ACCESS read-only - STATUS current - DESCRIPTION - "A URL pointing to a description of the application. - This information is intended to identify and describe - the application in a status display." - ::= {applEntry 17} - - -- The assocTable augments the information in the applTable - -- with information about associations. Note that two levels - -- of compliance are specified below, depending on whether - -- association monitoring is mandated. - - assocTable OBJECT-TYPE - SYNTAX SEQUENCE OF AssocEntry - MAX-ACCESS not-accessible - STATUS current - DESCRIPTION - "The table holding a set of all active application - associations." - ::= {application 2} - - assocEntry OBJECT-TYPE - SYNTAX AssocEntry - MAX-ACCESS not-accessible - STATUS current - DESCRIPTION - "An entry associated with an association for a network - service application." - INDEX {applIndex, assocIndex} - ::= {assocTable 1} - - AssocEntry ::= SEQUENCE { - assocIndex - INTEGER, - assocRemoteApplication - SnmpAdminString, - assocApplicationProtocol - OBJECT IDENTIFIER, - assocApplicationType - INTEGER, - assocDuration - TimeStamp - } - - assocIndex OBJECT-TYPE - SYNTAX INTEGER (1..2147483647) - MAX-ACCESS not-accessible - STATUS current - DESCRIPTION - "An index to uniquely identify each association for a network - service application. This attribute is the index that is - used for lexicographic ordering of the table. Note that the - table is also indexed by the applIndex." - ::= {assocEntry 1} - - assocRemoteApplication OBJECT-TYPE - SYNTAX SnmpAdminString - MAX-ACCESS read-only - STATUS current - DESCRIPTION - "The name of the system running remote network service - application. For an IP-based application this should be - either a domain name or IP address. For an OSI application - it should be the string encoded distinguished name of the - managed object. For X.400(1984) MTAs which do not have a - Distinguished Name, the RFC 2156 syntax 'mta in - globalid' used in X400-Received: fields can be used. Note, - however, that not all connections an MTA makes are - necessarily to another MTA." - ::= {assocEntry 2} - - assocApplicationProtocol OBJECT-TYPE - SYNTAX OBJECT IDENTIFIER - MAX-ACCESS read-only - STATUS current - DESCRIPTION - "An identification of the protocol being used for the - application. For an OSI Application, this will be the - Application Context. For Internet applications, OID - values of the form {applTCPProtoID port} or {applUDPProtoID - port} are used for TCP-based and UDP-based protocols, - respectively. In either case 'port' corresponds to the - primary port number being used by the protocol. The - usual IANA procedures may be used to register ports for - new protocols." - ::= {assocEntry 3} - - assocApplicationType OBJECT-TYPE - SYNTAX INTEGER { - uainitiator(1), - uaresponder(2), - peerinitiator(3), - peerresponder(4)} - MAX-ACCESS read-only - STATUS current - DESCRIPTION - "This indicates whether the remote application is some type of - client making use of this network service (e.g., a Mail User - Agent) or a server acting as a peer. Also indicated is whether - the remote end initiated an incoming connection to the network - service or responded to an outgoing connection made by the - local application. MTAs and messaging gateways are - considered to be peers for the purposes of this variable." - ::= {assocEntry 4} - - assocDuration OBJECT-TYPE - SYNTAX TimeStamp - MAX-ACCESS read-only - STATUS current - DESCRIPTION - "The value of sysUpTime at the time this association was - started. If this association started prior to the last - initialization of the network subsystem, then this - object contains a zero value." - ::= {assocEntry 5} - - - -- Conformance information - - applConformance OBJECT IDENTIFIER ::= {application 3} - - applGroups OBJECT IDENTIFIER ::= {applConformance 1} - applCompliances OBJECT IDENTIFIER ::= {applConformance 2} - - -- Compliance statements - - applCompliance MODULE-COMPLIANCE - STATUS obsolete - DESCRIPTION - "The compliance statement for RFC 1565 implementations - which support the Network Services Monitoring MIB - for basic monitoring of network service applications. - This is the basic compliance statement for RFC 1565." - MODULE - MANDATORY-GROUPS {applRFC1565Group} - ::= {applCompliances 1} - - assocCompliance MODULE-COMPLIANCE - STATUS obsolete - DESCRIPTION - "The compliance statement for RFC 1565 implementations - which support the Network Services Monitoring MIB - for basic monitoring of network service applications - and their associations." - MODULE - MANDATORY-GROUPS {applRFC1565Group, assocRFC1565Group} - ::= {applCompliances 2} - - applRFC2248Compliance MODULE-COMPLIANCE - STATUS deprecated - DESCRIPTION - "The compliance statement for RFC 2248 implementations - which support the Network Services Monitoring MIB - for basic monitoring of network service applications." - MODULE - MANDATORY-GROUPS {applRFC2248Group} - ::= {applCompliances 3} - - assocRFC2248Compliance MODULE-COMPLIANCE - STATUS deprecated - DESCRIPTION - "The compliance statement for RFC 2248 implementations - which support the Network Services Monitoring MIB for - basic monitoring of network service applications and - their associations." - MODULE - MANDATORY-GROUPS {applRFC2248Group, assocRFC2248Group} - ::= {applCompliances 4} - - applRFC2788Compliance MODULE-COMPLIANCE - STATUS current - DESCRIPTION - "The compliance statement for RFC 2788 implementations - which support the Network Services Monitoring MIB - for basic monitoring of network service applications." - MODULE - MANDATORY-GROUPS {applRFC2788Group} - ::= {applCompliances 5} - - assocRFC2788Compliance MODULE-COMPLIANCE - STATUS current - DESCRIPTION - "The compliance statement for RFC 2788 implementations - which support the Network Services Monitoring MIB for - basic monitoring of network service applications and - their associations." - MODULE - MANDATORY-GROUPS {applRFC2788Group, assocRFC2788Group} - ::= {applCompliances 6} - - - -- Units of conformance - - applRFC1565Group OBJECT-GROUP - OBJECTS { - applName, applVersion, applUptime, applOperStatus, - applLastChange, applInboundAssociations, - applOutboundAssociations, applAccumulatedInboundAssociations, - applAccumulatedOutboundAssociations, applLastInboundActivity, - applLastOutboundActivity, applRejectedInboundAssociations, - applFailedOutboundAssociations} - STATUS obsolete - DESCRIPTION - "A collection of objects providing basic monitoring of - network service applications. This is the original set - of such objects defined in RFC 1565." - ::= {applGroups 7} - - assocRFC1565Group OBJECT-GROUP - OBJECTS { - assocRemoteApplication, assocApplicationProtocol, - assocApplicationType, assocDuration} - STATUS obsolete - DESCRIPTION - "A collection of objects providing basic monitoring of - network service applications' associations. This is the - original set of such objects defined in RFC 1565." - ::= {applGroups 2} - - applRFC2248Group OBJECT-GROUP - OBJECTS { - applName, applVersion, applUptime, applOperStatus, - applLastChange, applInboundAssociations, - applOutboundAssociations, applAccumulatedInboundAssociations, - applAccumulatedOutboundAssociations, applLastInboundActivity, - applLastOutboundActivity, applRejectedInboundAssociations, - applFailedOutboundAssociations, applDescription, applURL} - STATUS deprecated - DESCRIPTION - "A collection of objects providing basic monitoring of - network service applications. This group was originally - defined in RFC 2248; note that applDirectoryName is - missing." - ::= {applGroups 3} - - assocRFC2248Group OBJECT-GROUP - OBJECTS { - assocRemoteApplication, assocApplicationProtocol, - assocApplicationType, assocDuration} - STATUS deprecated - DESCRIPTION - "A collection of objects providing basic monitoring of - network service applications' associations. This group - was originally defined by RFC 2248." - ::= {applGroups 4} - - applRFC2788Group OBJECT-GROUP - OBJECTS { - applName, applDirectoryName, applVersion, applUptime, - applOperStatus, applLastChange, applInboundAssociations, - applOutboundAssociations, applAccumulatedInboundAssociations, - applAccumulatedOutboundAssociations, applLastInboundActivity, - applLastOutboundActivity, applRejectedInboundAssociations, - applFailedOutboundAssociations, applDescription, applURL} - STATUS current - DESCRIPTION - "A collection of objects providing basic monitoring of - network service applications. This is the appropriate - group for RFC 2788 -- it adds the applDirectoryName object - missing in RFC 2248." - ::= {applGroups 5} - - assocRFC2788Group OBJECT-GROUP - OBJECTS { - assocRemoteApplication, assocApplicationProtocol, - assocApplicationType, assocDuration} - STATUS current - DESCRIPTION - "A collection of objects providing basic monitoring of - network service applications' associations. This is - the appropriate group for RFC 2788." - ::= {applGroups 6} - - -- OIDs of the form {applTCPProtoID port} are intended to be used - -- for TCP-based protocols that don't have OIDs assigned by other - -- means. {applUDPProtoID port} serves the same purpose for - -- UDP-based protocols. In either case 'port' corresponds to - -- the primary port number being used by the protocol. For example, - -- assuming no other OID is assigned for SMTP, an OID of - -- {applTCPProtoID 25} could be used, since SMTP is a TCP-based - -- protocol that uses port 25 as its primary port. - - applTCPProtoID OBJECT IDENTIFIER ::= {application 4} - applUDPProtoID OBJECT IDENTIFIER ::= {application 5} - - END - --- --- Copyright (C) The Internet Society (2000). All Rights Reserved. --- --- This document and translations of it may be copied and furnished to --- others, and derivative works that comment on or otherwise explain it --- or assist in its implementation may be prepared, copied, published --- and distributed, in whole or in part, without restriction of any --- kind, provided that the above copyright notice and this paragraph are --- included on all such copies and derivative works. However, this --- document itself may not be modified in any way, such as by removing --- the copyright notice or references to the Internet Society or other --- Internet organizations, except as needed for the purpose of --- developing Internet standards in which case the procedures for --- copyrights defined in the Internet Standards process must be --- followed, or as required to translate it into languages other than --- English. --- --- The limited permissions granted above are perpetual and will not be --- revoked by the Internet Society or its successors or assigns. --- --- This document and the information contained herein is provided on an --- "AS IS" basis and THE INTERNET SOCIETY AND THE INTERNET ENGINEERING --- TASK FORCE DISCLAIMS ALL WARRANTIES, EXPRESS OR IMPLIED, INCLUDING --- BUT NOT LIMITED TO ANY WARRANTY THAT THE USE OF THE INFORMATION --- HEREIN WILL NOT INFRINGE ANY RIGHTS OR ANY IMPLIED WARRANTIES OF --- MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. --- diff --git a/ldap/servers/snmp/RFC-1215.txt b/ldap/servers/snmp/RFC-1215.txt deleted file mode 100644 index 643272337..000000000 --- a/ldap/servers/snmp/RFC-1215.txt +++ /dev/null @@ -1,38 +0,0 @@ - -RFC-1215 DEFINITIONS ::= BEGIN - --- This module is a empty module. It has been created solely for the --- purpose of allowing other modules to correctly import the TRAP-TYPE --- clause from RFC-1215 where it should be imported from. It's a --- built in type in the UCD-SNMP code, and in fact RFC-1215 doesn't --- actually define a mib at all; it only defines macros. However, --- importing the TRAP-TYPE is conventionally done from an import --- clause pointing to RFC-1215. --- --- Wes 7/17/98 - -TRAP-TYPE MACRO ::= -BEGIN - TYPE NOTATION ::= "ENTERPRISE" value - (enterprise OBJECT IDENTIFIER) - VarPart - DescrPart - ReferPart - VALUE NOTATION ::= value (VALUE INTEGER) - VarPart ::= - "VARIABLES" "{" VarTypes "}" - | empty - VarTypes ::= - VarType | VarTypes "," VarType - VarType ::= - value (vartype ObjectName) - DescrPart ::= - "DESCRIPTION" value (description DisplayString) - | empty - ReferPart ::= - "REFERENCE" value (reference DisplayString) - | empty -END - - -END diff --git a/ldap/servers/snmp/RFC1155-SMI.txt b/ldap/servers/snmp/RFC1155-SMI.txt deleted file mode 100644 index 3abc7ffb7..000000000 --- a/ldap/servers/snmp/RFC1155-SMI.txt +++ /dev/null @@ -1,119 +0,0 @@ -RFC1155-SMI DEFINITIONS ::= BEGIN - -EXPORTS -- EVERYTHING - internet, directory, mgmt, - experimental, private, enterprises, - OBJECT-TYPE, ObjectName, ObjectSyntax, SimpleSyntax, - ApplicationSyntax, NetworkAddress, IpAddress, - Counter, Gauge, TimeTicks, Opaque; - - -- the path to the root - - internet OBJECT IDENTIFIER ::= { iso org(3) dod(6) 1 } - - directory OBJECT IDENTIFIER ::= { internet 1 } - - mgmt OBJECT IDENTIFIER ::= { internet 2 } - - experimental OBJECT IDENTIFIER ::= { internet 3 } - - private OBJECT IDENTIFIER ::= { internet 4 } - enterprises OBJECT IDENTIFIER ::= { private 1 } - - -- definition of object types - - OBJECT-TYPE MACRO ::= - BEGIN - TYPE NOTATION ::= "SYNTAX" type (TYPE ObjectSyntax) - "ACCESS" Access - "STATUS" Status - VALUE NOTATION ::= value (VALUE ObjectName) - - Access ::= "read-only" - | "read-write" - | "write-only" - | "not-accessible" - Status ::= "mandatory" - | "optional" - | "obsolete" - END - - -- names of objects in the MIB - - ObjectName ::= - OBJECT IDENTIFIER - - -- syntax of objects in the MIB - - ObjectSyntax ::= - CHOICE { - simple - SimpleSyntax, - -- note that simple SEQUENCEs are not directly - -- mentioned here to keep things simple (i.e., - -- prevent mis-use). However, application-wide - -- types which are IMPLICITly encoded simple - -- SEQUENCEs may appear in the following CHOICE - - application-wide - ApplicationSyntax - } - - SimpleSyntax ::= - CHOICE { - number - INTEGER, - string - OCTET STRING, - object - OBJECT IDENTIFIER, - empty - NULL - } - - ApplicationSyntax ::= - CHOICE { - address - NetworkAddress, - counter - Counter, - gauge - Gauge, - ticks - TimeTicks, - arbitrary - Opaque - - -- other application-wide types, as they are - -- defined, will be added here - } - - -- application-wide types - - NetworkAddress ::= - CHOICE { - internet - IpAddress - } - - IpAddress ::= - [APPLICATION 0] -- in network-byte order - IMPLICIT OCTET STRING (SIZE (4)) - - Counter ::= - [APPLICATION 1] - IMPLICIT INTEGER (0..4294967295) - - Gauge ::= - [APPLICATION 2] - IMPLICIT INTEGER (0..4294967295) - - TimeTicks ::= - [APPLICATION 3] - IMPLICIT INTEGER (0..4294967295) - - Opaque ::= - [APPLICATION 4] -- arbitrary ASN.1 value, - IMPLICIT OCTET STRING -- "double-wrapped" - - END diff --git a/ldap/servers/snmp/SNMPv2-CONF.txt b/ldap/servers/snmp/SNMPv2-CONF.txt deleted file mode 100644 index 24a1eed95..000000000 --- a/ldap/servers/snmp/SNMPv2-CONF.txt +++ /dev/null @@ -1,322 +0,0 @@ -SNMPv2-CONF DEFINITIONS ::= BEGIN - -IMPORTS ObjectName, NotificationName, ObjectSyntax - FROM SNMPv2-SMI; - --- definitions for conformance groups - -OBJECT-GROUP MACRO ::= -BEGIN - TYPE NOTATION ::= - ObjectsPart - "STATUS" Status - "DESCRIPTION" Text - ReferPart - - VALUE NOTATION ::= - value(VALUE OBJECT IDENTIFIER) - - ObjectsPart ::= - "OBJECTS" "{" Objects "}" - Objects ::= - Object - | Objects "," Object - Object ::= - - value(ObjectName) - - Status ::= - "current" - | "deprecated" - | "obsolete" - - ReferPart ::= - "REFERENCE" Text - | empty - - -- a character string as defined in [2] - Text ::= value(IA5String) -END - --- more definitions for conformance groups - -NOTIFICATION-GROUP MACRO ::= -BEGIN - TYPE NOTATION ::= - NotificationsPart - "STATUS" Status - "DESCRIPTION" Text - ReferPart - - VALUE NOTATION ::= - value(VALUE OBJECT IDENTIFIER) - - NotificationsPart ::= - "NOTIFICATIONS" "{" Notifications "}" - Notifications ::= - Notification - | Notifications "," Notification - Notification ::= - value(NotificationName) - - Status ::= - "current" - | "deprecated" - | "obsolete" - - ReferPart ::= - "REFERENCE" Text - | empty - - -- a character string as defined in [2] - Text ::= value(IA5String) -END - --- definitions for compliance statements - -MODULE-COMPLIANCE MACRO ::= -BEGIN - TYPE NOTATION ::= - "STATUS" Status - "DESCRIPTION" Text - ReferPart - ModulePart - - VALUE NOTATION ::= - value(VALUE OBJECT IDENTIFIER) - - Status ::= - "current" - | "deprecated" - | "obsolete" - - ReferPart ::= - "REFERENCE" Text - | empty - - ModulePart ::= - Modules - Modules ::= - Module - | Modules Module - Module ::= - -- name of module -- - "MODULE" ModuleName - MandatoryPart - CompliancePart - - ModuleName ::= - -- identifier must start with uppercase letter - identifier ModuleIdentifier - -- must not be empty unless contained - -- in MIB Module - | empty - ModuleIdentifier ::= - value(OBJECT IDENTIFIER) - | empty - - MandatoryPart ::= - "MANDATORY-GROUPS" "{" Groups "}" - | empty - - Groups ::= - - Group - | Groups "," Group - Group ::= - value(OBJECT IDENTIFIER) - - CompliancePart ::= - Compliances - | empty - - Compliances ::= - Compliance - | Compliances Compliance - Compliance ::= - ComplianceGroup - | Object - - ComplianceGroup ::= - "GROUP" value(OBJECT IDENTIFIER) - "DESCRIPTION" Text - - Object ::= - "OBJECT" value(ObjectName) - SyntaxPart - WriteSyntaxPart - AccessPart - "DESCRIPTION" Text - - -- must be a refinement for object's SYNTAX clause - SyntaxPart ::= "SYNTAX" Syntax - | empty - - -- must be a refinement for object's SYNTAX clause - WriteSyntaxPart ::= "WRITE-SYNTAX" Syntax - | empty - - Syntax ::= -- Must be one of the following: - -- a base type (or its refinement), - -- a textual convention (or its refinement), or - -- a BITS pseudo-type - type - | "BITS" "{" NamedBits "}" - - NamedBits ::= NamedBit - | NamedBits "," NamedBit - - NamedBit ::= identifier "(" number ")" -- number is nonnegative - - AccessPart ::= - "MIN-ACCESS" Access - | empty - Access ::= - "not-accessible" - | "accessible-for-notify" - | "read-only" - | "read-write" - | "read-create" - - -- a character string as defined in [2] - Text ::= value(IA5String) -END - --- definitions for capabilities statements - -AGENT-CAPABILITIES MACRO ::= -BEGIN - TYPE NOTATION ::= - "PRODUCT-RELEASE" Text - "STATUS" Status - "DESCRIPTION" Text - ReferPart - ModulePart - - VALUE NOTATION ::= - value(VALUE OBJECT IDENTIFIER) - - Status ::= - "current" - | "obsolete" - - ReferPart ::= - "REFERENCE" Text - | empty - - ModulePart ::= - Modules - | empty - Modules ::= - Module - | Modules Module - Module ::= - -- name of module -- - "SUPPORTS" ModuleName - "INCLUDES" "{" Groups "}" - VariationPart - - ModuleName ::= - - -- identifier must start with uppercase letter - identifier ModuleIdentifier - ModuleIdentifier ::= - value(OBJECT IDENTIFIER) - | empty - - Groups ::= - Group - | Groups "," Group - Group ::= - value(OBJECT IDENTIFIER) - - VariationPart ::= - Variations - | empty - Variations ::= - Variation - | Variations Variation - - Variation ::= - ObjectVariation - | NotificationVariation - - NotificationVariation ::= - "VARIATION" value(NotificationName) - AccessPart - "DESCRIPTION" Text - - ObjectVariation ::= - "VARIATION" value(ObjectName) - SyntaxPart - WriteSyntaxPart - AccessPart - CreationPart - DefValPart - "DESCRIPTION" Text - - -- must be a refinement for object's SYNTAX clause - SyntaxPart ::= "SYNTAX" Syntax - | empty - - WriteSyntaxPart ::= "WRITE-SYNTAX" Syntax - | empty - - Syntax ::= -- Must be one of the following: - -- a base type (or its refinement), - -- a textual convention (or its refinement), or - -- a BITS pseudo-type - - type - | "BITS" "{" NamedBits "}" - - NamedBits ::= NamedBit - | NamedBits "," NamedBit - - NamedBit ::= identifier "(" number ")" -- number is nonnegative - - AccessPart ::= - "ACCESS" Access - | empty - - Access ::= - "not-implemented" - -- only "not-implemented" for notifications - | "accessible-for-notify" - | "read-only" - | "read-write" - | "read-create" - -- following is for backward-compatibility only - | "write-only" - - CreationPart ::= - "CREATION-REQUIRES" "{" Cells "}" - | empty - Cells ::= - Cell - | Cells "," Cell - Cell ::= - value(ObjectName) - - DefValPart ::= "DEFVAL" "{" Defvalue "}" - | empty - - Defvalue ::= -- must be valid for the object's syntax - -- in this macro's SYNTAX clause, if present, - -- or if not, in object's OBJECT-TYPE macro - value(ObjectSyntax) - | "{" BitsValue "}" - - BitsValue ::= BitNames - | empty - - BitNames ::= BitName - | BitNames "," BitName - - BitName ::= identifier - - -- a character string as defined in [2] - Text ::= value(IA5String) -END - -END diff --git a/ldap/servers/snmp/SNMPv2-SMI.txt b/ldap/servers/snmp/SNMPv2-SMI.txt deleted file mode 100644 index 1c01e1dfc..000000000 --- a/ldap/servers/snmp/SNMPv2-SMI.txt +++ /dev/null @@ -1,344 +0,0 @@ -SNMPv2-SMI DEFINITIONS ::= BEGIN - --- the path to the root - -org OBJECT IDENTIFIER ::= { iso 3 } -- "iso" = 1 -dod OBJECT IDENTIFIER ::= { org 6 } -internet OBJECT IDENTIFIER ::= { dod 1 } - -directory OBJECT IDENTIFIER ::= { internet 1 } - -mgmt OBJECT IDENTIFIER ::= { internet 2 } -mib-2 OBJECT IDENTIFIER ::= { mgmt 1 } -transmission OBJECT IDENTIFIER ::= { mib-2 10 } - -experimental OBJECT IDENTIFIER ::= { internet 3 } - -private OBJECT IDENTIFIER ::= { internet 4 } -enterprises OBJECT IDENTIFIER ::= { private 1 } - -security OBJECT IDENTIFIER ::= { internet 5 } - -snmpV2 OBJECT IDENTIFIER ::= { internet 6 } - --- transport domains -snmpDomains OBJECT IDENTIFIER ::= { snmpV2 1 } - --- transport proxies -snmpProxys OBJECT IDENTIFIER ::= { snmpV2 2 } - --- module identities -snmpModules OBJECT IDENTIFIER ::= { snmpV2 3 } - --- Extended UTCTime, to allow dates with four-digit years --- (Note that this definition of ExtUTCTime is not to be IMPORTed --- by MIB modules.) -ExtUTCTime ::= OCTET STRING(SIZE(11 | 13)) - -- format is YYMMDDHHMMZ or YYYYMMDDHHMMZ - - -- where: YY - last two digits of year (only years - -- between 1900-1999) - -- YYYY - last four digits of the year (any year) - -- MM - month (01 through 12) - -- DD - day of month (01 through 31) - -- HH - hours (00 through 23) - -- MM - minutes (00 through 59) - -- Z - denotes GMT (the ASCII character Z) - -- - -- For example, "9502192015Z" and "199502192015Z" represent - -- 8:15pm GMT on 19 February 1995. Years after 1999 must use - -- the four digit year format. Years 1900-1999 may use the - -- two or four digit format. - --- definitions for information modules - -MODULE-IDENTITY MACRO ::= -BEGIN - TYPE NOTATION ::= - "LAST-UPDATED" value(Update ExtUTCTime) - "ORGANIZATION" Text - "CONTACT-INFO" Text - "DESCRIPTION" Text - RevisionPart - - VALUE NOTATION ::= - value(VALUE OBJECT IDENTIFIER) - - RevisionPart ::= - Revisions - | empty - Revisions ::= - Revision - | Revisions Revision - Revision ::= - "REVISION" value(Update ExtUTCTime) - "DESCRIPTION" Text - - -- a character string as defined in section 3.1.1 - Text ::= value(IA5String) -END - -OBJECT-IDENTITY MACRO ::= -BEGIN - TYPE NOTATION ::= - "STATUS" Status - "DESCRIPTION" Text - - ReferPart - - VALUE NOTATION ::= - value(VALUE OBJECT IDENTIFIER) - - Status ::= - "current" - | "deprecated" - | "obsolete" - - ReferPart ::= - "REFERENCE" Text - | empty - - -- a character string as defined in section 3.1.1 - Text ::= value(IA5String) -END - --- names of objects --- (Note that these definitions of ObjectName and NotificationName --- are not to be IMPORTed by MIB modules.) - -ObjectName ::= - OBJECT IDENTIFIER - -NotificationName ::= - OBJECT IDENTIFIER - --- syntax of objects - --- the "base types" defined here are: --- 3 built-in ASN.1 types: INTEGER, OCTET STRING, OBJECT IDENTIFIER --- 8 application-defined types: Integer32, IpAddress, Counter32, --- Gauge32, Unsigned32, TimeTicks, Opaque, and Counter64 - -ObjectSyntax ::= - CHOICE { - simple - SimpleSyntax, - -- note that SEQUENCEs for conceptual tables and - -- rows are not mentioned here... - - application-wide - ApplicationSyntax - } - --- built-in ASN.1 types - -SimpleSyntax ::= - CHOICE { - -- INTEGERs with a more restrictive range - -- may also be used - integer-value -- includes Integer32 - INTEGER (-2147483648..2147483647), - -- OCTET STRINGs with a more restrictive size - -- may also be used - string-value - OCTET STRING (SIZE (0..65535)), - objectID-value - OBJECT IDENTIFIER - } - --- indistinguishable from INTEGER, but never needs more than --- 32-bits for a two's complement representation -Integer32 ::= - INTEGER (-2147483648..2147483647) - --- application-wide types - -ApplicationSyntax ::= - CHOICE { - ipAddress-value - IpAddress, - counter-value - Counter32, - timeticks-value - TimeTicks, - arbitrary-value - Opaque, - big-counter-value - Counter64, - unsigned-integer-value -- includes Gauge32 - Unsigned32 - } - --- in network-byte order - --- (this is a tagged type for historical reasons) -IpAddress ::= - [APPLICATION 0] - IMPLICIT OCTET STRING (SIZE (4)) - --- this wraps -Counter32 ::= - [APPLICATION 1] - IMPLICIT INTEGER (0..4294967295) - --- this doesn't wrap -Gauge32 ::= - [APPLICATION 2] - IMPLICIT INTEGER (0..4294967295) - --- an unsigned 32-bit quantity --- indistinguishable from Gauge32 -Unsigned32 ::= - [APPLICATION 2] - IMPLICIT INTEGER (0..4294967295) - --- hundredths of seconds since an epoch -TimeTicks ::= - [APPLICATION 3] - IMPLICIT INTEGER (0..4294967295) - --- for backward-compatibility only -Opaque ::= - [APPLICATION 4] - IMPLICIT OCTET STRING - --- for counters that wrap in less than one hour with only 32 bits -Counter64 ::= - [APPLICATION 6] - IMPLICIT INTEGER (0..18446744073709551615) - --- definition for objects - -OBJECT-TYPE MACRO ::= -BEGIN - TYPE NOTATION ::= - "SYNTAX" Syntax - UnitsPart - "MAX-ACCESS" Access - "STATUS" Status - "DESCRIPTION" Text - ReferPart - - IndexPart - DefValPart - - VALUE NOTATION ::= - value(VALUE ObjectName) - - Syntax ::= -- Must be one of the following: - -- a base type (or its refinement), - -- a textual convention (or its refinement), or - -- a BITS pseudo-type - type - | "BITS" "{" NamedBits "}" - - NamedBits ::= NamedBit - | NamedBits "," NamedBit - - NamedBit ::= identifier "(" number ")" -- number is nonnegative - - UnitsPart ::= - "UNITS" Text - | empty - - Access ::= - "not-accessible" - | "accessible-for-notify" - | "read-only" - | "read-write" - | "read-create" - - Status ::= - "current" - | "deprecated" - | "obsolete" - - ReferPart ::= - "REFERENCE" Text - | empty - - IndexPart ::= - "INDEX" "{" IndexTypes "}" - | "AUGMENTS" "{" Entry "}" - | empty - IndexTypes ::= - IndexType - | IndexTypes "," IndexType - IndexType ::= - "IMPLIED" Index - | Index - - Index ::= - -- use the SYNTAX value of the - -- correspondent OBJECT-TYPE invocation - value(ObjectName) - Entry ::= - -- use the INDEX value of the - -- correspondent OBJECT-TYPE invocation - value(ObjectName) - - DefValPart ::= "DEFVAL" "{" Defvalue "}" - | empty - - Defvalue ::= -- must be valid for the type specified in - -- SYNTAX clause of same OBJECT-TYPE macro - value(ObjectSyntax) - | "{" BitsValue "}" - - BitsValue ::= BitNames - | empty - - BitNames ::= BitName - | BitNames "," BitName - - BitName ::= identifier - - -- a character string as defined in section 3.1.1 - Text ::= value(IA5String) -END - --- definitions for notifications - -NOTIFICATION-TYPE MACRO ::= -BEGIN - TYPE NOTATION ::= - ObjectsPart - "STATUS" Status - "DESCRIPTION" Text - ReferPart - - VALUE NOTATION ::= - value(VALUE NotificationName) - - ObjectsPart ::= - "OBJECTS" "{" Objects "}" - | empty - Objects ::= - Object - - | Objects "," Object - Object ::= - value(ObjectName) - - Status ::= - "current" - | "deprecated" - | "obsolete" - - ReferPart ::= - "REFERENCE" Text - | empty - - -- a character string as defined in section 3.1.1 - Text ::= value(IA5String) -END - --- definitions of administrative identifiers - -zeroDotZero OBJECT-IDENTITY - STATUS current - DESCRIPTION - "A value used for null identifiers." - ::= { 0 0 } - -END diff --git a/ldap/servers/snmp/SNMPv2-TC.txt b/ldap/servers/snmp/SNMPv2-TC.txt deleted file mode 100644 index 860bf71ee..000000000 --- a/ldap/servers/snmp/SNMPv2-TC.txt +++ /dev/null @@ -1,772 +0,0 @@ -SNMPv2-TC DEFINITIONS ::= BEGIN - -IMPORTS - TimeTicks FROM SNMPv2-SMI; - --- definition of textual conventions - -TEXTUAL-CONVENTION MACRO ::= - -BEGIN - TYPE NOTATION ::= - DisplayPart - "STATUS" Status - "DESCRIPTION" Text - ReferPart - "SYNTAX" Syntax - - VALUE NOTATION ::= - value(VALUE Syntax) -- adapted ASN.1 - - DisplayPart ::= - "DISPLAY-HINT" Text - | empty - - Status ::= - "current" - | "deprecated" - | "obsolete" - - ReferPart ::= - "REFERENCE" Text - | empty - - -- a character string as defined in [2] - Text ::= value(IA5String) - - Syntax ::= -- Must be one of the following: - -- a base type (or its refinement), or - -- a BITS pseudo-type - type - | "BITS" "{" NamedBits "}" - - NamedBits ::= NamedBit - | NamedBits "," NamedBit - - NamedBit ::= identifier "(" number ")" -- number is nonnegative - -END - -DisplayString ::= TEXTUAL-CONVENTION - DISPLAY-HINT "255a" - STATUS current - DESCRIPTION - "Represents textual information taken from the NVT ASCII - - character set, as defined in pages 4, 10-11 of RFC 854. - - To summarize RFC 854, the NVT ASCII repertoire specifies: - - - the use of character codes 0-127 (decimal) - - - the graphics characters (32-126) are interpreted as - US ASCII - - - NUL, LF, CR, BEL, BS, HT, VT and FF have the special - meanings specified in RFC 854 - - - the other 25 codes have no standard interpretation - - - the sequence 'CR LF' means newline - - - the sequence 'CR NUL' means carriage-return - - - an 'LF' not preceded by a 'CR' means moving to the - same column on the next line. - - - the sequence 'CR x' for any x other than LF or NUL is - illegal. (Note that this also means that a string may - end with either 'CR LF' or 'CR NUL', but not with CR.) - - Any object defined using this syntax may not exceed 255 - characters in length." - SYNTAX OCTET STRING (SIZE (0..255)) - -PhysAddress ::= TEXTUAL-CONVENTION - DISPLAY-HINT "1x:" - STATUS current - DESCRIPTION - "Represents media- or physical-level addresses." - SYNTAX OCTET STRING - -MacAddress ::= TEXTUAL-CONVENTION - DISPLAY-HINT "1x:" - STATUS current - DESCRIPTION - "Represents an 802 MAC address represented in the - `canonical' order defined by IEEE 802.1a, i.e., as if it - were transmitted least significant bit first, even though - 802.5 (in contrast to other 802.x protocols) requires MAC - addresses to be transmitted most significant bit first." - SYNTAX OCTET STRING (SIZE (6)) - -TruthValue ::= TEXTUAL-CONVENTION - STATUS current - DESCRIPTION - "Represents a boolean value." - SYNTAX INTEGER { true(1), false(2) } - -TestAndIncr ::= TEXTUAL-CONVENTION - STATUS current - DESCRIPTION - "Represents integer-valued information used for atomic - operations. When the management protocol is used to specify - that an object instance having this syntax is to be - modified, the new value supplied via the management protocol - must precisely match the value presently held by the - instance. If not, the management protocol set operation - fails with an error of `inconsistentValue'. Otherwise, if - the current value is the maximum value of 2^31-1 (2147483647 - decimal), then the value held by the instance is wrapped to - zero; otherwise, the value held by the instance is - incremented by one. (Note that regardless of whether the - management protocol set operation succeeds, the variable- - binding in the request and response PDUs are identical.) - - The value of the ACCESS clause for objects having this - syntax is either `read-write' or `read-create'. When an - instance of a columnar object having this syntax is created, - any value may be supplied via the management protocol. - - When the network management portion of the system is re- - initialized, the value of every object instance having this - syntax must either be incremented from its value prior to - the re-initialization, or (if the value prior to the re- - initialization is unknown) be set to a pseudo-randomly - generated value." - SYNTAX INTEGER (0..2147483647) - -AutonomousType ::= TEXTUAL-CONVENTION - STATUS current - DESCRIPTION - "Represents an independently extensible type identification - value. It may, for example, indicate a particular sub-tree - with further MIB definitions, or define a particular type of - protocol or hardware." - SYNTAX OBJECT IDENTIFIER - -InstancePointer ::= TEXTUAL-CONVENTION - STATUS obsolete - DESCRIPTION - "A pointer to either a specific instance of a MIB object or - a conceptual row of a MIB table in the managed device. In - the latter case, by convention, it is the name of the - particular instance of the first accessible columnar object - in the conceptual row. - - The two uses of this textual convention are replaced by - VariablePointer and RowPointer, respectively." - SYNTAX OBJECT IDENTIFIER - -VariablePointer ::= TEXTUAL-CONVENTION - STATUS current - DESCRIPTION - "A pointer to a specific object instance. For example, - sysContact.0 or ifInOctets.3." - SYNTAX OBJECT IDENTIFIER - -RowPointer ::= TEXTUAL-CONVENTION - STATUS current - DESCRIPTION - "Represents a pointer to a conceptual row. The value is the - name of the instance of the first accessible columnar object - in the conceptual row. - - For example, ifIndex.3 would point to the 3rd row in the - ifTable (note that if ifIndex were not-accessible, then - ifDescr.3 would be used instead)." - SYNTAX OBJECT IDENTIFIER - -RowStatus ::= TEXTUAL-CONVENTION - STATUS current - DESCRIPTION - "The RowStatus textual convention is used to manage the - creation and deletion of conceptual rows, and is used as the - value of the SYNTAX clause for the status column of a - conceptual row (as described in Section 7.7.1 of [2].) - - The status column has six defined values: - - - `active', which indicates that the conceptual row is - available for use by the managed device; - - - `notInService', which indicates that the conceptual - row exists in the agent, but is unavailable for use by - the managed device (see NOTE below); 'notInService' has - no implication regarding the internal consistency of - the row, availability of resources, or consistency with - the current state of the managed device; - - - `notReady', which indicates that the conceptual row - exists in the agent, but is missing information - necessary in order to be available for use by the - managed device (i.e., one or more required columns in - the conceptual row have not been instanciated); - - - `createAndGo', which is supplied by a management - station wishing to create a new instance of a - conceptual row and to have its status automatically set - to active, making it available for use by the managed - device; - - - `createAndWait', which is supplied by a management - station wishing to create a new instance of a - conceptual row (but not make it available for use by - the managed device); and, - - `destroy', which is supplied by a management station - wishing to delete all of the instances associated with - an existing conceptual row. - - Whereas five of the six values (all except `notReady') may - be specified in a management protocol set operation, only - three values will be returned in response to a management - protocol retrieval operation: `notReady', `notInService' or - `active'. That is, when queried, an existing conceptual row - has only three states: it is either available for use by - the managed device (the status column has value `active'); - it is not available for use by the managed device, though - the agent has sufficient information to attempt to make it - so (the status column has value `notInService'); or, it is - not available for use by the managed device, and an attempt - to make it so would fail because the agent has insufficient - information (the state column has value `notReady'). - - NOTE WELL - - This textual convention may be used for a MIB table, - irrespective of whether the values of that table's - conceptual rows are able to be modified while it is - active, or whether its conceptual rows must be taken - out of service in order to be modified. That is, it is - the responsibility of the DESCRIPTION clause of the - status column to specify whether the status column must - not be `active' in order for the value of some other - column of the same conceptual row to be modified. If - such a specification is made, affected columns may be - changed by an SNMP set PDU if the RowStatus would not - be equal to `active' either immediately before or after - processing the PDU. In other words, if the PDU also - contained a varbind that would change the RowStatus - value, the column in question may be changed if the - RowStatus was not equal to `active' as the PDU was - received, or if the varbind sets the status to a value - other than 'active'. - - Also note that whenever any elements of a row exist, the - RowStatus column must also exist. - - To summarize the effect of having a conceptual row with a - status column having a SYNTAX clause value of RowStatus, - consider the following state diagram: - - STATE - +--------------+-----------+-------------+------------- - | A | B | C | D - | |status col.|status column| - |status column | is | is |status column - ACTION |does not exist| notReady | notInService| is active ---------------+--------------+-----------+-------------+------------- -set status |noError ->D|inconsist- |inconsistent-|inconsistent- -column to | or | entValue| Value| Value -createAndGo |inconsistent- | | | - | Value| | | ---------------+--------------+-----------+-------------+------------- -set status |noError see 1|inconsist- |inconsistent-|inconsistent- -column to | or | entValue| Value| Value -createAndWait |wrongValue | | | ---------------+--------------+-----------+-------------+------------- -set status |inconsistent- |inconsist- |noError |noError -column to | Value| entValue| | -active | | | | - | | or | | - | | | | - | |see 2 ->D|see 8 ->D| ->D ---------------+--------------+-----------+-------------+------------- -set status |inconsistent- |inconsist- |noError |noError ->C -column to | Value| entValue| | -notInService | | | | - | | or | | or - | | | | - | |see 3 ->C| ->C|see 6 ---------------+--------------+-----------+-------------+------------- -set status |noError |noError |noError |noError ->A -column to | | | | or -destroy | ->A| ->A| ->A|see 7 ---------------+--------------+-----------+-------------+------------- -set any other |see 4 |noError |noError |see 5 -column to some| | | | -value | | see 1| ->C| ->D ---------------+--------------+-----------+-------------+------------- - - (1) goto B or C, depending on information available to the - agent. - - (2) if other variable bindings included in the same PDU, - provide values for all columns which are missing but - required, and all columns have acceptable values, then - return noError and goto D. - - (3) if other variable bindings included in the same PDU, - provide legal values for all columns which are missing but - required, then return noError and goto C. - - (4) at the discretion of the agent, the return value may be - either: - - inconsistentName: because the agent does not choose to - create such an instance when the corresponding - RowStatus instance does not exist, or - - inconsistentValue: if the supplied value is - inconsistent with the state of some other MIB object's - value, or - - noError: because the agent chooses to create the - instance. - - If noError is returned, then the instance of the status - column must also be created, and the new state is B or C, - depending on the information available to the agent. If - inconsistentName or inconsistentValue is returned, the row - remains in state A. - - (5) depending on the MIB definition for the column/table, - either noError or inconsistentValue may be returned. - - (6) the return value can indicate one of the following - errors: - - wrongValue: because the agent does not support - notInService (e.g., an agent which does not support - createAndWait), or - - inconsistentValue: because the agent is unable to take - the row out of service at this time, perhaps because it - is in use and cannot be de-activated. - - (7) the return value can indicate the following error: - - inconsistentValue: because the agent is unable to - remove the row at this time, perhaps because it is in - use and cannot be de-activated. - - (8) the transition to D can fail, e.g., if the values of the - conceptual row are inconsistent, then the error code would - be inconsistentValue. - - NOTE: Other processing of (this and other varbinds of) the - set request may result in a response other than noError - being returned, e.g., wrongValue, noCreation, etc. - - Conceptual Row Creation - - There are four potential interactions when creating a - conceptual row: selecting an instance-identifier which is - not in use; creating the conceptual row; initializing any - objects for which the agent does not supply a default; and, - making the conceptual row available for use by the managed - device. - - Interaction 1: Selecting an Instance-Identifier - - The algorithm used to select an instance-identifier varies - for each conceptual row. In some cases, the instance- - identifier is semantically significant, e.g., the - destination address of a route, and a management station - selects the instance-identifier according to the semantics. - - In other cases, the instance-identifier is used solely to - distinguish conceptual rows, and a management station - without specific knowledge of the conceptual row might - examine the instances present in order to determine an - unused instance-identifier. (This approach may be used, but - it is often highly sub-optimal; however, it is also a - questionable practice for a naive management station to - attempt conceptual row creation.) - - Alternately, the MIB module which defines the conceptual row - might provide one or more objects which provide assistance - in determining an unused instance-identifier. For example, - if the conceptual row is indexed by an integer-value, then - an object having an integer-valued SYNTAX clause might be - defined for such a purpose, allowing a management station to - issue a management protocol retrieval operation. In order - to avoid unnecessary collisions between competing management - stations, `adjacent' retrievals of this object should be - different. - - Finally, the management station could select a pseudo-random - number to use as the index. In the event that this index - - was already in use and an inconsistentValue was returned in - response to the management protocol set operation, the - management station should simply select a new pseudo-random - number and retry the operation. - - A MIB designer should choose between the two latter - algorithms based on the size of the table (and therefore the - efficiency of each algorithm). For tables in which a large - number of entries are expected, it is recommended that a MIB - object be defined that returns an acceptable index for - creation. For tables with small numbers of entries, it is - recommended that the latter pseudo-random index mechanism be - used. - - Interaction 2: Creating the Conceptual Row - - Once an unused instance-identifier has been selected, the - management station determines if it wishes to create and - activate the conceptual row in one transaction or in a - negotiated set of interactions. - - Interaction 2a: Creating and Activating the Conceptual Row - - The management station must first determine the column - requirements, i.e., it must determine those columns for - which it must or must not provide values. Depending on the - complexity of the table and the management station's - knowledge of the agent's capabilities, this determination - can be made locally by the management station. Alternately, - the management station issues a management protocol get - operation to examine all columns in the conceptual row that - it wishes to create. In response, for each column, there - are three possible outcomes: - - - a value is returned, indicating that some other - management station has already created this conceptual - row. We return to interaction 1. - - - the exception `noSuchInstance' is returned, - indicating that the agent implements the object-type - associated with this column, and that this column in at - least one conceptual row would be accessible in the MIB - view used by the retrieval were it to exist. For those - columns to which the agent provides read-create access, - the `noSuchInstance' exception tells the management - station that it should supply a value for this column - when the conceptual row is to be created. - - - the exception `noSuchObject' is returned, indicating - that the agent does not implement the object-type - associated with this column or that there is no - conceptual row for which this column would be - accessible in the MIB view used by the retrieval. As - such, the management station can not issue any - management protocol set operations to create an - instance of this column. - - Once the column requirements have been determined, a - management protocol set operation is accordingly issued. - This operation also sets the new instance of the status - column to `createAndGo'. - - When the agent processes the set operation, it verifies that - it has sufficient information to make the conceptual row - available for use by the managed device. The information - available to the agent is provided by two sources: the - management protocol set operation which creates the - conceptual row, and, implementation-specific defaults - supplied by the agent (note that an agent must provide - implementation-specific defaults for at least those objects - which it implements as read-only). If there is sufficient - information available, then the conceptual row is created, a - `noError' response is returned, the status column is set to - `active', and no further interactions are necessary (i.e., - interactions 3 and 4 are skipped). If there is insufficient - information, then the conceptual row is not created, and the - set operation fails with an error of `inconsistentValue'. - On this error, the management station can issue a management - protocol retrieval operation to determine if this was - because it failed to specify a value for a required column, - or, because the selected instance of the status column - already existed. In the latter case, we return to - interaction 1. In the former case, the management station - can re-issue the set operation with the additional - information, or begin interaction 2 again using - `createAndWait' in order to negotiate creation of the - conceptual row. - - NOTE WELL - - Regardless of the method used to determine the column - requirements, it is possible that the management - station might deem a column necessary when, in fact, - the agent will not allow that particular columnar - instance to be created or written. In this case, the - management protocol set operation will fail with an - error such as `noCreation' or `notWritable'. In this - case, the management station decides whether it needs - to be able to set a value for that particular columnar - instance. If not, the management station re-issues the - management protocol set operation, but without setting - a value for that particular columnar instance; - otherwise, the management station aborts the row - creation algorithm. - - Interaction 2b: Negotiating the Creation of the Conceptual - Row - - The management station issues a management protocol set - operation which sets the desired instance of the status - column to `createAndWait'. If the agent is unwilling to - process a request of this sort, the set operation fails with - an error of `wrongValue'. (As a consequence, such an agent - must be prepared to accept a single management protocol set - operation, i.e., interaction 2a above, containing all of the - columns indicated by its column requirements.) Otherwise, - the conceptual row is created, a `noError' response is - returned, and the status column is immediately set to either - `notInService' or `notReady', depending on whether it has - sufficient information to (attempt to) make the conceptual - row available for use by the managed device. If there is - sufficient information available, then the status column is - set to `notInService'; otherwise, if there is insufficient - information, then the status column is set to `notReady'. - Regardless, we proceed to interaction 3. - - Interaction 3: Initializing non-defaulted Objects - - The management station must now determine the column - requirements. It issues a management protocol get operation - to examine all columns in the created conceptual row. In - the response, for each column, there are three possible - outcomes: - - - a value is returned, indicating that the agent - implements the object-type associated with this column - and had sufficient information to provide a value. For - those columns to which the agent provides read-create - access (and for which the agent allows their values to - be changed after their creation), a value return tells - the management station that it may issue additional - management protocol set operations, if it desires, in - order to change the value associated with this column. - - - the exception `noSuchInstance' is returned, - indicating that the agent implements the object-type - associated with this column, and that this column in at - least one conceptual row would be accessible in the MIB - view used by the retrieval were it to exist. However, - the agent does not have sufficient information to - provide a value, and until a value is provided, the - conceptual row may not be made available for use by the - managed device. For those columns to which the agent - provides read-create access, the `noSuchInstance' - exception tells the management station that it must - issue additional management protocol set operations, in - order to provide a value associated with this column. - - - the exception `noSuchObject' is returned, indicating - that the agent does not implement the object-type - associated with this column or that there is no - conceptual row for which this column would be - accessible in the MIB view used by the retrieval. As - such, the management station can not issue any - management protocol set operations to create an - instance of this column. - - If the value associated with the status column is - `notReady', then the management station must first deal with - all `noSuchInstance' columns, if any. Having done so, the - value of the status column becomes `notInService', and we - proceed to interaction 4. - - Interaction 4: Making the Conceptual Row Available - - Once the management station is satisfied with the values - associated with the columns of the conceptual row, it issues - a management protocol set operation to set the status column - to `active'. If the agent has sufficient information to - make the conceptual row available for use by the managed - device, the management protocol set operation succeeds (a - `noError' response is returned). Otherwise, the management - protocol set operation fails with an error of - `inconsistentValue'. - - NOTE WELL - - A conceptual row having a status column with value - `notInService' or `notReady' is unavailable to the - managed device. As such, it is possible for the - managed device to create its own instances during the - time between the management protocol set operation - which sets the status column to `createAndWait' and the - management protocol set operation which sets the status - column to `active'. In this case, when the management - protocol set operation is issued to set the status - column to `active', the values held in the agent - supersede those used by the managed device. - - If the management station is prevented from setting the - status column to `active' (e.g., due to management station - or network failure) the conceptual row will be left in the - `notInService' or `notReady' state, consuming resources - indefinitely. The agent must detect conceptual rows that - have been in either state for an abnormally long period of - time and remove them. It is the responsibility of the - DESCRIPTION clause of the status column to indicate what an - abnormally long period of time would be. This period of - time should be long enough to allow for human response time - (including `think time') between the creation of the - conceptual row and the setting of the status to `active'. - In the absence of such information in the DESCRIPTION - clause, it is suggested that this period be approximately 5 - minutes in length. This removal action applies not only to - newly-created rows, but also to previously active rows which - are set to, and left in, the notInService state for a - prolonged period exceeding that which is considered normal - for such a conceptual row. - - Conceptual Row Suspension - - When a conceptual row is `active', the management station - may issue a management protocol set operation which sets the - instance of the status column to `notInService'. If the - agent is unwilling to do so, the set operation fails with an - error of `wrongValue' or `inconsistentValue'. Otherwise, - the conceptual row is taken out of service, and a `noError' - response is returned. It is the responsibility of the - DESCRIPTION clause of the status column to indicate under - what circumstances the status column should be taken out of - service (e.g., in order for the value of some other column - of the same conceptual row to be modified). - - Conceptual Row Deletion - - For deletion of conceptual rows, a management protocol set - operation is issued which sets the instance of the status - column to `destroy'. This request may be made regardless of - the current value of the status column (e.g., it is possible - to delete conceptual rows which are either `notReady', - `notInService' or `active'.) If the operation succeeds, - then all instances associated with the conceptual row are - immediately removed." - SYNTAX INTEGER { - -- the following two values are states: - -- these values may be read or written - active(1), - notInService(2), - -- the following value is a state: - -- this value may be read, but not written - notReady(3), - -- the following three values are - -- actions: these values may be written, - -- but are never read - createAndGo(4), - createAndWait(5), - destroy(6) - } - -TimeStamp ::= TEXTUAL-CONVENTION - STATUS current - DESCRIPTION - "The value of the sysUpTime object at which a specific - occurrence happened. The specific occurrence must be - - defined in the description of any object defined using this - type. - - If sysUpTime is reset to zero as a result of a re- - initialization of the network management (sub)system, then - the values of all TimeStamp objects are also reset. - However, after approximately 497 days without a re- - initialization, the sysUpTime object will reach 2^^32-1 and - then increment around to zero; in this case, existing values - of TimeStamp objects do not change. This can lead to - ambiguities in the value of TimeStamp objects." - SYNTAX TimeTicks - -TimeInterval ::= TEXTUAL-CONVENTION - STATUS current - DESCRIPTION - "A period of time, measured in units of 0.01 seconds." - SYNTAX INTEGER (0..2147483647) - -DateAndTime ::= TEXTUAL-CONVENTION - DISPLAY-HINT "2d-1d-1d,1d:1d:1d.1d,1a1d:1d" - STATUS current - DESCRIPTION - "A date-time specification. - - field octets contents range - ----- ------ -------- ----- - 1 1-2 year* 0..65536 - 2 3 month 1..12 - 3 4 day 1..31 - 4 5 hour 0..23 - 5 6 minutes 0..59 - 6 7 seconds 0..60 - (use 60 for leap-second) - 7 8 deci-seconds 0..9 - 8 9 direction from UTC '+' / '-' - 9 10 hours from UTC* 0..13 - 10 11 minutes from UTC 0..59 - - * Notes: - - the value of year is in network-byte order - - daylight saving time in New Zealand is +13 - - For example, Tuesday May 26, 1992 at 1:30:15 PM EDT would be - displayed as: - - 1992-5-26,13:30:15.0,-4:0 - - Note that if only local time is known, then timezone - information (fields 8-10) is not present." - SYNTAX OCTET STRING (SIZE (8 | 11)) - -StorageType ::= TEXTUAL-CONVENTION - STATUS current - DESCRIPTION - "Describes the memory realization of a conceptual row. A - row which is volatile(2) is lost upon reboot. A row which - is either nonVolatile(3), permanent(4) or readOnly(5), is - backed up by stable storage. A row which is permanent(4) - can be changed but not deleted. A row which is readOnly(5) - cannot be changed nor deleted. - - If the value of an object with this syntax is either - permanent(4) or readOnly(5), it cannot be written. - Conversely, if the value is either other(1), volatile(2) or - nonVolatile(3), it cannot be modified to be permanent(4) or - readOnly(5). (All illegal modifications result in a - 'wrongValue' error.) - - Every usage of this textual convention is required to - specify the columnar objects which a permanent(4) row must - at a minimum allow to be writable." - SYNTAX INTEGER { - other(1), -- eh? - volatile(2), -- e.g., in RAM - nonVolatile(3), -- e.g., in NVRAM - permanent(4), -- e.g., partially in ROM - readOnly(5) -- e.g., completely in ROM - } - -TDomain ::= TEXTUAL-CONVENTION - STATUS current - DESCRIPTION - "Denotes a kind of transport service. - - Some possible values, such as snmpUDPDomain, are defined in - the SNMPv2-TM MIB module. Other possible values are defined - in other MIB modules." - REFERENCE "The SNMPv2-TM MIB module is defined in RFC 1906." - SYNTAX OBJECT IDENTIFIER - -TAddress ::= TEXTUAL-CONVENTION - STATUS current - DESCRIPTION - "Denotes a transport service address. - - A TAddress value is always interpreted within the context of a - TDomain value. Thus, each definition of a TDomain value must - be accompanied by a definition of a textual convention for use - with that TDomain. Some possible textual conventions, such as - SnmpUDPAddress for snmpUDPDomain, are defined in the SNMPv2-TM - MIB module. Other possible textual conventions are defined in - other MIB modules." - REFERENCE "The SNMPv2-TM MIB module is defined in RFC 1906." - SYNTAX OCTET STRING (SIZE (1..255)) - -END diff --git a/ldap/servers/snmp/netscape-ldap.mib b/ldap/servers/snmp/netscape-ldap.mib deleted file mode 100644 index c4ef5f602..000000000 --- a/ldap/servers/snmp/netscape-ldap.mib +++ /dev/null @@ -1,759 +0,0 @@ --- BEGIN COPYRIGHT BLOCK --- This Program is free software; you can redistribute it and/or modify it under --- the terms of the GNU General Public License as published by the Free Software --- Foundation; version 2 of the License. --- --- This Program is distributed in the hope that it will be useful, but WITHOUT --- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS --- FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. --- --- You should have received a copy of the GNU General Public License along with --- this Program; if not, write to the Free Software Foundation, Inc., 59 Temple --- Place, Suite 330, Boston, MA 02111-1307 USA. --- --- In addition, as a special exception, Red Hat, Inc. gives You the additional --- right to link the code of this Program with code not covered under the GNU --- General Public License ("Non-GPL Code") and to distribute linked combinations --- including the two, subject to the limitations in this paragraph. Non-GPL Code --- permitted under this exception must only link to the code of this Program --- through those well defined interfaces identified in the file named EXCEPTION --- found in the source code files (the "Approved Interfaces"). The files of --- Non-GPL Code may instantiate templates or use macros or inline functions from --- the Approved Interfaces without causing the resulting work to be covered by --- the GNU General Public License. Only Red Hat, Inc. may make changes or --- additions to the list of Approved Interfaces. You must obey the GNU General --- Public License in all respects for all of the Program code and other code used --- in conjunction with the Program except the Non-GPL Code covered by this --- exception. If you modify this file, you may extend this exception to your --- version of the file, but you are not obligated to do so. If you do not wish to --- provide this exception without modification, you must delete this exception --- statement from your version and license this file solely under the GPL without --- exception. --- --- --- Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. --- Copyright (C) 2005 Red Hat, Inc. --- All rights reserved. --- END COPYRIGHT BLOCK --- --- --- MIB for Directory Server --- --- This is an implementation of the MADMAN mib for monitoring LDAP/CLDAP and X.500 --- directories described in RFC 2788 and 2789 --- with the addition of traps for server up and down events - -NSLDAP-MIB DEFINITIONS ::= BEGIN - -IMPORTS - MODULE-IDENTITY, Counter32, Gauge32, OBJECT-TYPE - FROM SNMPv2-SMI - DisplayString, TimeStamp, TEXTUAL-CONVENTION - FROM SNMPv2-TC - MODULE-COMPLIANCE, OBJECT-GROUP - FROM SNMPv2-CONF - applIndex, DistinguishedName, URLString - FROM NETWORK-SERVICES-MIB - enterprises - FROM RFC1155-SMI - TRAP-TYPE - FROM RFC-1215; - - netscape OBJECT IDENTIFIER ::= { enterprises 1450 } - - URLString ::= TEXTUAL-CONVENTION - STATUS current - DESCRIPTION - "I couldn't find it but madman said it should be here, guessing DisplayString" - SYNTAX DisplayString - - nsldap MODULE-IDENTITY - LAST-UPDATED "200207160000Z" - ORGANIZATION "Netscape Communications Corp" - CONTACT-INFO - " AOL Strategic Business Solutions - Postal: 22000 AOL Way - Dulles, VA 20166 - - Website: http://enterprise.netscape.com" - DESCRIPTION - " An implementation of the MADMAN mib for monitoring LDAP/CLDAP and X.500 - directories described in RFC 2788 and 2789 - used for Directory Server" - ::= { netscape 7} - - dsOpsTable OBJECT-TYPE - SYNTAX SEQUENCE OF DsOpsEntry - MAX-ACCESS not-accessible - STATUS current - DESCRIPTION - " The table holding information related to the - DS operations." - ::= {nsldap 1} - - dsOpsEntry OBJECT-TYPE - SYNTAX DsOpsEntry - MAX-ACCESS not-accessible - STATUS current - DESCRIPTION - " Entry containing operations related statistics - for a DS." - INDEX { applIndex} - ::= {dsOpsTable 1} - - DsOpsEntry ::= SEQUENCE { - - -- Bindings - - dsAnonymousBinds - Counter32, - dsUnAuthBinds - Counter32, - dsSimpleAuthBinds - Counter32, - dsStrongAuthBinds - Counter32, - dsBindSecurityErrors - Counter32, - - -- In-coming operations - - dsInOps - Counter32, - dsReadOps - Counter32, - dsCompareOps - Counter32, - dsAddEntryOps - Counter32, - dsRemoveEntryOps - Counter32, - dsModifyEntryOps - Counter32, - dsModifyRDNOps - Counter32, - dsListOps - Counter32, - dsSearchOps - Counter32, - dsOneLevelSearchOps - Counter32, - dsWholeSubtreeSearchOps - Counter32, - - -- Out going operations - - dsReferrals - Counter32, - dsChainings - Counter32, - - -- Errors - - dsSecurityErrors - Counter32, - dsErrors - Counter32 - } - - -- CLDAP does not use binds; for A CLDAP DS the bind - -- related counters will be inaccessible. - -- - -- CLDAP and LDAP implement "Read" and "List" operations - -- indirectly via the "search" operation; the following - -- counters will be inaccessible for CLDAP and LDAP DSs: - -- dsReadOps, dsListOps - -- - -- CLDAP does not implement "Compare", "Add", "Remove", - -- "Modify", "ModifyRDN"; the following counters will be - -- inaccessible for CLDAP DSs: - -- dsCompareOps, dsAddEntryOps, dsRemoveEntryOps, - -- dsModifyEntryOps, dsModifyRDNOps. - -- - -- CLDAP and LDAP DS's do not return Referrals - -- the following fields will remain inaccessible for - -- CLDAP and LDAP DSs: dsReferrals. - - dsAnonymousBinds OBJECT-TYPE - SYNTAX Counter32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION - " Number of anonymous binds to this DS from UAs - since application start." - ::= {dsOpsEntry 1} - - dsUnAuthBinds OBJECT-TYPE - SYNTAX Counter32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION - " Number of un-authenticated binds to this DS since - application start." - ::= {dsOpsEntry 2} - - dsSimpleAuthBinds OBJECT-TYPE - SYNTAX Counter32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION - " Number of binds to this DS that were authenticated - using simple authentication procedures since - application start." - REFERENCE - " CCITT Blue Book Fascicle VIII.8 - Rec. X.511, 1988: - Section 8.1.2.1.1. and, RFC1777 Section 4.1" - ::= {dsOpsEntry 3} - - dsStrongAuthBinds OBJECT-TYPE - SYNTAX Counter32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION - " Number of binds to this DS that were authenticated - using the strong authentication procedures since - application start. This includes the binds that were - authenticated using external authentication procedures." - REFERENCE - " CCITT Blue Book Fascicle VIII.8 - Rec. X.511, 1988: - Sections 8.1.2.1.2 & 8.1.2.1.3. and, RFC1777 Section 4.1." - ::= {dsOpsEntry 4} - - dsBindSecurityErrors OBJECT-TYPE - SYNTAX Counter32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION - " Number of bind operations that have been rejected - by this DS due to inappropriateAuthentication or - invalidCredentials." - REFERENCE - " CCITT Blue Book Fascicle VIII.8 - Rec. X.511, 1988: - Section 12.7.2 and, RFC1777 Section 4." - ::= {dsOpsEntry 5} - - dsInOps OBJECT-TYPE - SYNTAX Counter32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION - " Number of operations forwarded to this DS - from UAs or other DSs since application - start up." - ::= {dsOpsEntry 6} - - dsReadOps OBJECT-TYPE - SYNTAX Counter32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION - " Number of read operations serviced by - this DS since application startup." - REFERENCE - " CCITT Blue Book Fascicle VIII.8 - Rec. X.511, 1988: - Section 9.1." - ::= {dsOpsEntry 7} - - dsCompareOps OBJECT-TYPE - SYNTAX Counter32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION - " Number of compare operations serviced by - this DS since application startup." - REFERENCE - " CCITT Blue Book Fascicle VIII.8 - Rec. X.511, 1988: - Section 9.2. and, RFC1777 section 4.8" - ::= {dsOpsEntry 8} - - dsAddEntryOps OBJECT-TYPE - SYNTAX Counter32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION - " Number of addEntry operations serviced by - this DS since application startup." - REFERENCE - " CCITT Blue Book Fascicle VIII.8 - Rec. X.511, 1988: - Section 11.1. and, RFC1777 Section 4.5." - ::= {dsOpsEntry 9} - - dsRemoveEntryOps OBJECT-TYPE - SYNTAX Counter32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION - " Number of removeEntry operations serviced by - this DS since application startup." - REFERENCE - " CCITT Blue Book Fascicle VIII.8 - Rec. X.511, 1988: - Section 11.2. and, RFC1777 Section 4.6." - ::= {dsOpsEntry 10} - - dsModifyEntryOps OBJECT-TYPE - SYNTAX Counter32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION - " Number of modifyEntry operations serviced by - this DS since application startup." - REFERENCE - " CCITT Blue Book Fascicle VIII.8 - Rec. X.511, 1988: - Section 11.3. and, RFC1777 Section 4.4." - ::= {dsOpsEntry 11} - - dsModifyRDNOps OBJECT-TYPE - SYNTAX Counter32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION - " Number of modifyRDN operations serviced by - this DS since application startup." - REFERENCE - " CCITT Blue Book Fascicle VIII.8 - Rec. X.511, 1988: - Section 11.4.and, RFC1777 Section 4.7" - ::= {dsOpsEntry 12} - - dsListOps OBJECT-TYPE - SYNTAX Counter32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION - " Number of list operations serviced by - this DS since application startup." - REFERENCE - " CCITT Blue Book Fascicle VIII.8 - Rec. X.511, 1988: - Section 10.1." - ::= {dsOpsEntry 13} - - dsSearchOps OBJECT-TYPE - SYNTAX Counter32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION - " Number of search operations- baseObject searches, - oneLevel searches and wholeSubtree searches, - serviced by this DS since application startup." - REFERENCE - " CCITT Blue Book Fascicle VIII.8 - Rec. X.511, 1988: - Section 10.2. and, RFC1777 Section 4.3." - ::= {dsOpsEntry 14} - - dsOneLevelSearchOps OBJECT-TYPE - SYNTAX Counter32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION - " Number of oneLevel search operations serviced - by this DS since application startup." - REFERENCE - " CCITT Blue Book Fascicle VIII.8 - Rec. X.511, 1988: - Section 10.2.2.2. and, RFC1777 Section 4.3." - ::= {dsOpsEntry 15} - - dsWholeSubtreeSearchOps OBJECT-TYPE - SYNTAX Counter32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION - " Number of wholeSubtree search operations serviced - by this DS since application startup." - REFERENCE - " CCITT Blue Book Fascicle VIII.8 - Rec. X.511, 1988: - Section 10.2.2.2. and, RFC1777 Section 4.3." - ::= {dsOpsEntry 16} - - dsReferrals OBJECT-TYPE - SYNTAX Counter32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION - " Number of referrals returned by this DS in response - to requests for operations since application startup." - REFERENCE - " CCITT Blue Book Fascicle VIII.8 - Rec. X.511, 1988: - Section 12.6." - ::= {dsOpsEntry 17} - - dsChainings OBJECT-TYPE - SYNTAX Counter32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION - " Number of operations forwarded by this DS - to other DSs since application startup." - REFERENCE - " CCITT Blue Book Fascicle VIII.8 - Rec. X.518, 1988: - Section 14." - ::= {dsOpsEntry 18} - - dsSecurityErrors OBJECT-TYPE - SYNTAX Counter32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION - " Number of operations forwarded to this DS - which did not meet the security requirements. " - REFERENCE - " CCITT Blue Book Fascicle VIII.8 - Rec. X.511, 1988: - Section 12.7. and, RFC1777 Section 4." - ::= {dsOpsEntry 19} - - dsErrors OBJECT-TYPE - SYNTAX Counter32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION - " Number of operations that could not be serviced - due to errors other than security errors, and - referrals. - A partially serviced operation will not be counted - as an error. - The errors include NameErrors, UpdateErrors, Attribute - errors and ServiceErrors." - REFERENCE - " CCITT Blue Book Fascicle VIII.8 - Rec. X.511, 1988: - Sections 12.4, 12.5, 12.8 & 12.9. and, RFC1777 Section 4." - ::= {dsOpsEntry 20} - - -- Entry statistics/Cache performance - dsEntriesTable OBJECT-TYPE - SYNTAX SEQUENCE OF DsEntriesEntry - MAX-ACCESS not-accessible - STATUS current - DESCRIPTION - " The table holding information related to the - entry statistics and cache performance of the DSs." - ::= {nsldap 2} - - dsEntriesEntry OBJECT-TYPE - SYNTAX DsEntriesEntry - MAX-ACCESS not-accessible - STATUS current - DESCRIPTION - " Entry containing statistics pertaining to entries - held by a DS." - INDEX { applIndex } - ::= {dsEntriesTable 1} - - - DsEntriesEntry ::= SEQUENCE { - - dsMasterEntries - Gauge32, - dsCopyEntries - Gauge32, - dsCacheEntries - Gauge32, - dsCacheHits - Counter32, - dsSlaveHits - Counter32 - } - - -- A (C)LDAP frontend to the X.500 Directory will not have - -- MasterEntries, CopyEntries; the following counters will - -- be inaccessible for LDAP/CLDAP frontends to the X.500 - -- directory: dsMasterEntries, dsCopyEntries, dsSlaveHits. - - dsMasterEntries OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION - " Number of entries mastered in the DS." - ::= {dsEntriesEntry 1} - - dsCopyEntries OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION - " Number of entries for which systematic (slave) - copies are maintained in the DS." - ::= {dsEntriesEntry 2} - - dsCacheEntries OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION - " Number of entries cached (non-systematic copies) in - the DS. This will include the entries that are - cached partially. The negative cache is not counted." - ::= {dsEntriesEntry 3} - - dsCacheHits OBJECT-TYPE - SYNTAX Counter32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION - " Number of operations that were serviced from - the locally held cache since application - startup." - ::= {dsEntriesEntry 4} - - dsSlaveHits OBJECT-TYPE - SYNTAX Counter32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION - " Number of operations that were serviced from - the locally held object replications ( shadow - entries) since application startup." - ::= {dsEntriesEntry 5} - - -- The dsIntTable contains statistical data on the peer DSs - -- with which the monitored DSs (attempt to) interact. This - -- table will provide a useful insight into the effect of - -- neighbours on the DS performance. - -- The table keeps track of the last "N" DSs with which the - -- monitored DSs has interacted (attempted to interact), - -- where "N" is a locally-defined constant. - - dsIntTable OBJECT-TYPE - SYNTAX SEQUENCE OF DsIntEntry - MAX-ACCESS not-accessible - STATUS current - DESCRIPTION - " Each row of this table contains some details - related to the history of the interaction - of the monitored DSs with their respective - peer DSs." - ::= { nsldap 3 } - - dsIntEntry OBJECT-TYPE - SYNTAX DsIntEntry - MAX-ACCESS not-accessible - STATUS current - DESCRIPTION - " Entry containing interaction details of a DS - with a peer DS." - INDEX { applIndex, dsIntIndex } - ::= { dsIntTable 1 } - - DsIntEntry ::= SEQUENCE { - - dsIntIndex - INTEGER, - dsName - DistinguishedName, - dsTimeOfCreation - TimeStamp, - dsTimeOfLastAttempt - TimeStamp, - dsTimeOfLastSuccess - TimeStamp, - dsFailuresSinceLastSuccess - Counter32, - dsFailures - Counter32, - dsSuccesses - Counter32, - dsURL - URLString - - } - - dsIntIndex OBJECT-TYPE - SYNTAX INTEGER (1..2147483647) - MAX-ACCESS not-accessible - STATUS current - DESCRIPTION - " Together with applIndex it forms the unique key to - identify the conceptual row which contains useful info - on the (attempted) interaction between the DS (referred - to by applIndex) and a peer DS." - ::= {dsIntEntry 1} - - dsName OBJECT-TYPE - SYNTAX DistinguishedName - MAX-ACCESS read-only - STATUS current - DESCRIPTION - " Distinguished Name of the peer DS to which this - entry pertains." - ::= {dsIntEntry 2} - - dsTimeOfCreation OBJECT-TYPE - SYNTAX TimeStamp - MAX-ACCESS read-only - STATUS current - DESCRIPTION - " The value of sysUpTime when this row was created. - If the entry was created before the network management - subsystem was initialized, this object will contain - a value of zero." - ::= {dsIntEntry 3} - - dsTimeOfLastAttempt OBJECT-TYPE - SYNTAX TimeStamp - MAX-ACCESS read-only - STATUS current - DESCRIPTION - " The value of sysUpTime when the last attempt was made - to contact this DS. If the last attempt was made before - the network management subsystem was initialized, this - object will contain a value of zero." - ::= {dsIntEntry 4} - - dsTimeOfLastSuccess OBJECT-TYPE - SYNTAX TimeStamp - MAX-ACCESS read-only - STATUS current - DESCRIPTION - " The value of sysUpTime when the last attempt made to - contact this DS was successful. If there have - been no successful attempts this entry will have a value - of zero. If the last successful attempt was made before - the network management subsystem was initialized, this - object will contain a value of zero." - ::= {dsIntEntry 5} - - dsFailuresSinceLastSuccess OBJECT-TYPE - SYNTAX Counter32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION - " The number of failures since the last time an - attempt to contact this DS was successful. If - there has been no successful attempts, this counter - will contain the number of failures since this entry - was created." - ::= {dsIntEntry 6} - - dsFailures OBJECT-TYPE - SYNTAX Counter32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION - " Cumulative failures since the creation of - this entry." - ::= {dsIntEntry 7} - - dsSuccesses OBJECT-TYPE - SYNTAX Counter32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION - " Cumulative successes since the creation of - this entry." - ::= {dsIntEntry 8} - - dsURL OBJECT-TYPE - SYNTAX URLString - MAX-ACCESS read-only - STATUS current - DESCRIPTION - " URL of the DS application." - ::= {dsIntEntry 9} - --- --- Information about this installation of the directory server --- - - dsEntityTable OBJECT-TYPE - SYNTAX SEQUENCE OF DsEntityEntry - MAX-ACCESS not-accessible - STATUS current - DESCRIPTION - "This table holds general information related to an installed - instance of a directory server" - ::= {nsldap 5} - - dsEntityEntry OBJECT-TYPE - SYNTAX DsEntityEntry - MAX-ACCESS not-accessible - STATUS current - DESCRIPTION - "Entry of general information about an installed instance - of a directory server" - INDEX { applIndex} - ::= {dsEntityTable 1} - - DsEntityEntry ::= SEQUENCE { - dsEntityDescr - DisplayString, - dsEntityVers - DisplayString, - dsEntityOrg - DisplayString, - dsEntityLocation - DisplayString, - dsEntityContact - DisplayString, - dsEntityName - DisplayString - } - - dsEntityDescr OBJECT-TYPE - SYNTAX DisplayString(SIZE (0..255)) - ACCESS read-only - STATUS mandatory - DESCRIPTION - "A general textual description of this directory server." - ::= {dsEntityEntry 1} - - dsEntityVers OBJECT-TYPE - SYNTAX DisplayString(SIZE (0..255)) - ACCESS read-only - STATUS mandatory - DESCRIPTION - "The version of this directory server" - ::={dsEntityEntry 2} - - dsEntityOrg OBJECT-TYPE - SYNTAX DisplayString(SIZE (0..255)) - ACCESS read-only - STATUS mandatory - DESCRIPTION - "Organization responsible for directory server at this installation" - ::={dsEntityEntry 3} - - dsEntityLocation OBJECT-TYPE - SYNTAX DisplayString(SIZE (0..255)) - ACCESS read-only - STATUS mandatory - DESCRIPTION - "Physical location of this entity (directory server). - For example: hostname, building number, lab number, etc." - ::={dsEntityEntry 4} - - dsEntityContact OBJECT-TYPE - SYNTAX DisplayString(SIZE (0..255)) - ACCESS read-only - STATUS mandatory - DESCRIPTION - "Contact person(s)responsible for the directory server at this - installation, together with information on how to conact." - ::={dsEntityEntry 5} - - dsEntityName OBJECT-TYPE - SYNTAX DisplayString(SIZE (0..255)) - ACCESS read-only - STATUS mandatory - DESCRIPTION - "Name assigned to this entity at the installation site" - ::={dsEntityEntry 6} - --- --- Traps --- --- - nsDirectoryServerDown TRAP-TYPE - ENTERPRISE netscape - VARIABLES { dsEntityDescr, dsEntityVers, dsEntityLocation, - dsEntityContact } - DESCRIPTION "This trap is generated whenever the agent detects the - directory server to be (potentially) Down." - ::= 7001 - - nsDirectoryServerStart TRAP-TYPE - ENTERPRISE netscape - VARIABLES { dsEntityDescr, dsEntityVers, dsEntityLocation } - DESCRIPTION "This trap is generated whenever the agent detects the - directory server to have (re)started." - ::= 7002 - - END -
0
7fb2595757a85fed45b42826a1c21e1e9827f224
389ds/389-ds-base
Issue: 48851 - investigate and port TET matching rules filter tests(indexing final) Bug Description: Investigate and port TET matching rules filter tests(indexing final) Relates : https://pagure.io/389-ds-base/issue/48851 Author: aborah Reviewed by: Simon Pichugin
commit 7fb2595757a85fed45b42826a1c21e1e9827f224 Author: Anuj Borah <[email protected]> Date: Wed Aug 7 15:55:06 2019 +0530 Issue: 48851 - investigate and port TET matching rules filter tests(indexing final) Bug Description: Investigate and port TET matching rules filter tests(indexing final) Relates : https://pagure.io/389-ds-base/issue/48851 Author: aborah Reviewed by: Simon Pichugin diff --git a/dirsrvtests/tests/suites/filter/filter_index_matching_rule.py b/dirsrvtests/tests/suites/filter/filter_index_matching_rule.py new file mode 100644 index 000000000..7a5528875 --- /dev/null +++ b/dirsrvtests/tests/suites/filter/filter_index_matching_rule.py @@ -0,0 +1,332 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK ---- + + +""" +Test the matching rules feature . +""" + +import os +import pytest + +from lib389._constants import DEFAULT_SUFFIX +from lib389.topologies import topology_st +from lib389.cos import CosTemplates +from lib389.index import Indexes + +import ldap + +pytestmark = pytest.mark.tier1 + + +TESTED_MATCHING_RULES = ["bitStringMatch", + "caseExactIA5Match", + "caseExactMatch", + "caseExactOrderingMatch", + "caseExactSubstringsMatch", + "caseExactIA5SubstringsMatch", + "generalizedTimeMatch", + "generalizedTimeOrderingMatch", + "booleanMatch", + "caseIgnoreIA5Match", + "caseIgnoreIA5SubstringsMatch", + "caseIgnoreMatch", + "caseIgnoreOrderingMatch", + "caseIgnoreSubstringsMatch", + "caseIgnoreListMatch", + "caseIgnoreListSubstringsMatch", + "objectIdentifierMatch", + "directoryStringFirstComponentMatch", + "objectIdentifierFirstComponentMatch", + "distinguishedNameMatch", + "integerMatch", + "integerOrderingMatch", + "integerFirstComponentMatch", + "uniqueMemberMatch", + "numericStringMatch", + "numericStringOrderingMatch", + "numericStringSubstringsMatch", + "telephoneNumberMatch", + "telephoneNumberSubstringsMatch", + "octetStringMatch", + "octetStringOrderingMatch"] + + +LIST_CN_INDEX = [('attroctetStringMatch', ['pres', 'eq']), + ('attrbitStringMatch', ['pres', 'eq']), + ('attrcaseExactIA5Match', ['pres', 'eq', 'sub']), + ('attrcaseExactMatch', ['pres', 'eq', 'sub']), + ('attrgeneralizedTimeMatch', ['pres', 'eq']), + ('attrbooleanMatch', ['pres', 'eq']), + ('attrcaseIgnoreIA5Match', ['pres', 'eq', 'sub']), + ('attrcaseIgnoreMatch', ['pres', 'eq', 'sub']), + ('attrcaseIgnoreListMatch', ['pres', 'eq', 'sub']), + ('attrobjectIdentifierMatch', ['pres', 'eq']), + ('attrdistinguishedNameMatch', ['pres', 'eq']), + ('attrintegerMatch', ['pres', 'eq']), + ('attruniqueMemberMatch', ['pres', 'eq']), + ('attrnumericStringMatch', ['pres', 'eq', 'sub']), + ('attrtelephoneNumberMatch', ['pres', 'eq', 'sub']), + ('attrdirectoryStringFirstComponentMatch', ['pres', 'eq']), + ('attrobjectIdentifierFirstComponentMatch', ['pres', 'eq']), + ('attrintegerFirstComponentMatch', ['pres', 'eq'])] + + +LIST_ATTR_INDEX = [ + {'attr': 'attrbitStringMatch', + 'positive': ["'0010'B", "'0011'B", "'0100'B", "'0101'B", "'0110'B"], + 'negative': ["'0001'B", "'0001'B", "'0010'B", "'0010'B", "'0011'B", + "'0011'B", "'0100'B", "'0100'B", "'0101'B", "'0101'B", + "'0110'B", "'0110'B"]}, + {'attr': 'attrcaseExactIA5Match', + 'positive': ['sPrain', 'spRain', 'sprAin', 'spraIn', 'sprain'], + 'negative': ['Sprain', 'Sprain', 'sPrain', 'sPrain', 'spRain', + 'spRain', 'sprAin', 'sprAin', 'spraIn', 'spraIn', + 'sprain', 'sprain']}, + {'attr': 'attrcaseExactMatch', + 'positive': ['ÇéliNé Ändrè', 'Çéliné ÄndrÈ', 'Çéliné Ändrè', 'çÉliné Ändrè'], + 'negative': ['ÇélIné Ändrè', 'ÇélIné Ändrè', 'ÇéliNé Ändrè', 'ÇéliNé Ändrè', + 'Çéliné ÄndrÈ', 'Çéliné ÄndrÈ', 'Çéliné Ändrè', 'Çéliné Ändrè', + 'çÉliné Ändrè', 'çÉliné Ändrè']}, + {'attr': 'attrgeneralizedTimeMatch', + 'positive': ['20100218171301Z', '20100218171302Z', '20100218171303Z', + '20100218171304Z', '20100218171305Z'], + 'negative': ['20100218171300Z', '20100218171300Z', '20100218171301Z', + '20100218171301Z', '20100218171302Z', '20100218171302Z', + '20100218171303Z', '20100218171303Z', '20100218171304Z', + '20100218171304Z', '20100218171305Z', '20100218171305Z']}, + {'attr': 'attrbooleanMatch', + 'positive': ['FALSE'], + 'negative': ['TRUE', 'TRUE', 'FALSE', 'FALSE']}, + {'attr': 'attrcaseIgnoreIA5Match', + 'positive': ['sprain2', 'sprain3', 'sprain4', 'sprain5', 'sprain6'], + 'negative': ['sprain1', 'sprain1', 'sprain2', 'sprain2', 'sprain3', + 'sprain3', 'sprain4', 'sprain4', 'sprain5', 'sprain5', + 'sprain6', 'sprain6']}, + {'attr': 'attrcaseIgnoreMatch', + 'positive': ['ÇélIné Ändrè2', 'ÇélIné Ändrè3', 'ÇélIné Ändrè4', + 'ÇélIné Ändrè5', 'ÇélIné Ändrè6'], + 'negative': ['ÇélIné Ändrè1', 'ÇélIné Ändrè1', 'ÇélIné Ändrè2', + 'ÇélIné Ändrè2', 'ÇélIné Ändrè3', 'ÇélIné Ändrè3', + 'ÇélIné Ändrè4', 'ÇélIné Ändrè4', 'ÇélIné Ändrè5', + 'ÇélIné Ändrè5', 'ÇélIné Ändrè6', 'ÇélIné Ändrè6']}, + {'attr': 'attrcaseIgnoreListMatch', + 'positive': ['foo2$bar', 'foo3$bar', 'foo4$bar', 'foo5$bar', 'foo6$bar'], + 'negative': ['foo1$bar', 'foo1$bar', 'foo2$bar', 'foo2$bar', 'foo3$bar', + 'foo3$bar', 'foo4$bar', 'foo4$bar', 'foo5$bar', 'foo5$bar', + 'foo6$bar', 'foo6$bar']}, + {'attr': 'attrobjectIdentifierMatch', + 'positive': ['1.3.6.1.4.1.1466.115.121.1.24', '1.3.6.1.4.1.1466.115.121.1.26', + '1.3.6.1.4.1.1466.115.121.1.40', '1.3.6.1.4.1.1466.115.121.1.41', + '1.3.6.1.4.1.1466.115.121.1.6'], + 'negative': ['1.3.6.1.4.1.1466.115.121.1.15', '1.3.6.1.4.1.1466.115.121.1.15', + '1.3.6.1.4.1.1466.115.121.1.24', '1.3.6.1.4.1.1466.115.121.1.24', + '1.3.6.1.4.1.1466.115.121.1.26', '1.3.6.1.4.1.1466.115.121.1.26', + '1.3.6.1.4.1.1466.115.121.1.40', '1.3.6.1.4.1.1466.115.121.1.40', + '1.3.6.1.4.1.1466.115.121.1.41', '1.3.6.1.4.1.1466.115.121.1.41', + '1.3.6.1.4.1.1466.115.121.1.6', '1.3.6.1.4.1.1466.115.121.1.6']}, + {'attr': 'attrdirectoryStringFirstComponentMatch', + 'positive': ['ÇélIné Ändrè2', 'ÇélIné Ändrè3', 'ÇélIné Ändrè4', 'ÇélIné Ändrè5', + 'ÇélIné Ändrè6'], + 'negative': ['ÇélIné Ändrè1', 'ÇélIné Ändrè1', 'ÇélIné Ändrè2', 'ÇélIné Ändrè2', + 'ÇélIné Ändrè3', 'ÇélIné Ändrè3', 'ÇélIné Ändrè4', 'ÇélIné Ändrè4', + 'ÇélIné Ändrè5', 'ÇélIné Ändrè5', 'ÇélIné Ändrè6', 'ÇélIné Ändrè6']}, + {'attr': 'attrobjectIdentifierFirstComponentMatch', + 'positive': ['1.3.6.1.4.1.1466.115.121.1.24', '1.3.6.1.4.1.1466.115.121.1.26', + '1.3.6.1.4.1.1466.115.121.1.40', '1.3.6.1.4.1.1466.115.121.1.41', + '1.3.6.1.4.1.1466.115.121.1.6'], + 'negative': ['1.3.6.1.4.1.1466.115.121.1.15', '1.3.6.1.4.1.1466.115.121.1.15', + '1.3.6.1.4.1.1466.115.121.1.24', '1.3.6.1.4.1.1466.115.121.1.24', + '1.3.6.1.4.1.1466.115.121.1.26', '1.3.6.1.4.1.1466.115.121.1.26', + '1.3.6.1.4.1.1466.115.121.1.40', '1.3.6.1.4.1.1466.115.121.1.40', + '1.3.6.1.4.1.1466.115.121.1.41', '1.3.6.1.4.1.1466.115.121.1.41', + '1.3.6.1.4.1.1466.115.121.1.6', '1.3.6.1.4.1.1466.115.121.1.6']}, + {'attr': 'attrdistinguishedNameMatch', + 'positive': ['cn=foo2,cn=bar', 'cn=foo3,cn=bar', 'cn=foo4,cn=bar', 'cn=foo5,cn=bar', + 'cn=foo6,cn=bar'], + 'negative': ['cn=foo1,cn=bar', 'cn=foo1,cn=bar', 'cn=foo2,cn=bar', 'cn=foo2,cn=bar', + 'cn=foo3,cn=bar', 'cn=foo3,cn=bar', 'cn=foo4,cn=bar', 'cn=foo4,cn=bar', + 'cn=foo5,cn=bar', 'cn=foo5,cn=bar', 'cn=foo6,cn=bar', 'cn=foo6,cn=bar']}, + {'attr': 'attrintegerMatch', + 'positive': ['-1', '0', '1', '2', '3'], + 'negative': ['-2', '-2', '-1', '-1', '0', '0', '1', '1', '2', '2', '3', '3']}, + {'attr': 'attrintegerFirstComponentMatch', + 'positive': ['-1', '0', '1', '2', '3'], + 'negative': ['-2', '-2', '-1', '-1', '0', '0', '1', '1', '2', '2', '3', '3']}, + {'attr': 'attruniqueMemberMatch', + 'positive': ["cn=foo2,cn=bar#'0010'B", "cn=foo3,cn=bar#'0011'B", + "cn=foo4,cn=bar#'0100'B", "cn=foo5,cn=bar#'0101'B", + "cn=foo6,cn=bar#'0110'B"], + 'negative': ["cn=foo1,cn=bar#'0001'B", "cn=foo1,cn=bar#'0001'B", + "cn=foo2,cn=bar#'0010'B", "cn=foo2,cn=bar#'0010'B", + "cn=foo3,cn=bar#'0011'B", "cn=foo3,cn=bar#'0011'B", + "cn=foo4,cn=bar#'0100'B", "cn=foo4,cn=bar#'0100'B", + "cn=foo5,cn=bar#'0101'B", "cn=foo5,cn=bar#'0101'B", + "cn=foo6,cn=bar#'0110'B", "cn=foo6,cn=bar#'0110'B"]}, + {'attr': 'attrnumericStringMatch', + 'positive': ['00002', '00003', '00004', '00005', '00006'], + 'negative': ['00001', '00001', '00002', '00002', '00003', '00003', + '00004', '00004', '00005', '00005', '00006', '00006']}, + {'attr': 'attrtelephoneNumberMatch', + 'positive': ['+1 408 555 5625', '+1 408 555 6201', '+1 408 555 8585', + '+1 408 555 9187', '+1 408 555 9423'], + 'negative': ['+1 408 555 4798', '+1 408 555 4798', '+1 408 555 5625', + '+1 408 555 5625', '+1 408 555 6201', '+1 408 555 6201', + '+1 408 555 8585', '+1 408 555 8585', '+1 408 555 9187', + '+1 408 555 9187', '+1 408 555 9423', '+1 408 555 9423']}, + {'attr': 'attroctetStringMatch', + 'positive': ['AAAAAAAAAAAAAAI=', 'AAAAAAAAAAAAAAM=', 'AAAAAAAAAAAAAAQ=', + 'AAAAAAAAAAAAAAU=', 'AAAAAAAAAAAAAAY='], + 'negative': ['AAAAAAAAAAAAAAE=', 'AAAAAAAAAAAAAAE=', 'AAAAAAAAAAAAAAI=', + 'AAAAAAAAAAAAAAI=', 'AAAAAAAAAAAAAAM=', 'AAAAAAAAAAAAAAM=', + 'AAAAAAAAAAAAAAQ=', 'AAAAAAAAAAAAAAQ=', 'AAAAAAAAAAAAAAU=', + 'AAAAAAAAAAAAAAU=', 'AAAAAAAAAAAAAAY=', 'AAAAAAAAAAAAAAY=']}] + + +LIST_MOD_ATTR_ALL = [ + {'attr': 'attrcaseExactMatch', + 'positive': ['ÇélIné Ändrè'], + 'negative': ['ÇélIné Ändrè', 'ÇéliNé Ändrè', 'Çéliné ÄndrÈ', 'Çéliné Ändrè', + 'çÉliné Ändrè']}, + {'attr': 'attrgeneralizedTimeMatch', + 'positive': ['20100218171300Z'], + 'negative': ['20100218171300Z', '20100218171301Z', '20100218171302Z', + '20100218171303Z', '20100218171304Z', '20100218171305Z']}, + {'attr': 'attrbooleanMatch', + 'positive': ['TRUE'], + 'negative': ['TRUE', 'FALSE']}, + {'attr': 'attrcaseIgnoreIA5Match', + 'positive': ['sprain1'], + 'negative': ['sprain1', 'sprain2', 'sprain3', 'sprain4', 'sprain5', 'sprain6']}, + {'attr': 'attrcaseIgnoreMatch', + 'positive': ['ÇélIné Ändrè1'], + 'negative': ['ÇélIné Ändrè1', 'ÇélIné Ändrè2', 'ÇélIné Ändrè3', 'ÇélIné Ändrè4', + 'ÇélIné Ändrè5', 'ÇélIné Ändrè6']}, + {'attr': 'attrcaseIgnoreListMatch', + 'positive': ['foo1$bar'], + 'negative': ['foo1$bar', 'foo2$bar', 'foo3$bar', 'foo4$bar', 'foo5$bar', 'foo6$bar']}, + {'attr': 'attrbitStringMatch', + 'positive': ["'0001'B"], + 'negative': ["'0001'B", "'0010'B", "'0011'B", "'0100'B", "'0101'B", "'0110'B"]}, + {'attr': 'attrcaseExactIA5Match', + 'positive': ['Sprain'], + 'negative': ['Sprain', 'sPrain', 'spRain', 'sprAin', 'spraIn', 'sprain']}, + {'attr': 'attrobjectIdentifierMatch', + 'positive': ['1.3.6.1.4.1.1466.115.121.1.15'], + 'negative': ['1.3.6.1.4.1.1466.115.121.1.15', '1.3.6.1.4.1.1466.115.121.1.24', + '1.3.6.1.4.1.1466.115.121.1.26', '1.3.6.1.4.1.1466.115.121.1.40', + '1.3.6.1.4.1.1466.115.121.1.41', '1.3.6.1.4.1.1466.115.121.1.6']}, + {'attr': 'attrdirectoryStringFirstComponentMatch', + 'positive': ['ÇélIné Ändrè1'], + 'negative': ['ÇélIné Ändrè1', 'ÇélIné Ändrè2', 'ÇélIné Ändrè3', 'ÇélIné Ändrè4', + 'ÇélIné Ändrè5', 'ÇélIné Ändrè6']}, + {'attr': 'attrobjectIdentifierFirstComponentMatch', + 'positive': ['1.3.6.1.4.1.1466.115.121.1.15'], + 'negative': ['1.3.6.1.4.1.1466.115.121.1.15', '1.3.6.1.4.1.1466.115.121.1.24', + '1.3.6.1.4.1.1466.115.121.1.26', '1.3.6.1.4.1.1466.115.121.1.40', + '1.3.6.1.4.1.1466.115.121.1.41', '1.3.6.1.4.1.1466.115.121.1.6']}, + {'attr': 'attrdistinguishedNameMatch', + 'positive': ['cn=foo1,cn=bar'], + 'negative': ['cn=foo1,cn=bar', 'cn=foo2,cn=bar', 'cn=foo3,cn=bar', + 'cn=foo4,cn=bar', 'cn=foo5,cn=bar', 'cn=foo6,cn=bar']}, + {'attr': 'attrintegerMatch', + 'positive': ['-2'], + 'negative': ['-2', '-1', '0', '1', '2', '3']}, + {'attr': 'attrintegerFirstComponentMatch', + 'positive': ['-2'], + 'negative': ['-2', '-1', '0', '1', '2', '3']}, + {'attr': 'attruniqueMemberMatch', + 'positive': ["cn=foo1,cn=bar#'0001'B"], + 'negative': ["cn=foo1,cn=bar#'0001'B", "cn=foo2,cn=bar#'0010'B", + "cn=foo3,cn=bar#'0011'B", "cn=foo4,cn=bar#'0100'B", + "cn=foo5,cn=bar#'0101'B", "cn=foo6,cn=bar#'0110'B"]}, + {'attr': 'attrnumericStringMatch', + 'positive': ['00001'], + 'negative': ['00001', '00002', '00003', '00004', '00005', '00006']}, + {'attr': 'attrgeneralizedTimeMatch', + 'positive': ['+1 408 555 4798'], + 'negative': ['+1 408 555 4798', '+1 408 555 5625', '+1 408 555 6201', + '+1 408 555 8585', '+1 408 555 9187', '+1 408 555 9423']}, + {'attr': 'attroctetStringMatch', + 'positive': ['AAAAAAAAAAAAAAE='], + 'negative': ['AAAAAAAAAAAAAAE=', 'AAAAAAAAAAAAAAI=', 'AAAAAAAAAAAAAAM=', + 'AAAAAAAAAAAAAAQ=', 'AAAAAAAAAAAAAAU=', 'AAAAAAAAAAAAAAY=']}] + + [email protected](scope="module") +def _create_index_entry(topology_st): + """Create index entries. + :id: 9c93aec8-b87d-11e9-93b0-8c16451d917b + :setup: Standalone + :steps: + 1. Test index entries can be created. + :expected results: + 1. Pass + """ + indexes = Indexes(topology_st.standalone) + for cn_cn, index_type in LIST_CN_INDEX: + indexes.create(properties={ + 'cn': cn_cn, + 'nsSystemIndex': 'true', + 'nsIndexType': index_type + }) + + [email protected]("index", LIST_ATTR_INDEX) +def test_valid_invalid_attributes(topology_st, _create_index_entry, index): + """Test valid and invalid values of attributes + :id: 93dc9e02-b87d-11e9-b39b-8c16451d917b + :setup: Standalone + :steps: + 1. Create entry with an attribute that uses that matching rule + 2. Delete existing entry + 3. Create entry with an attribute that uses that matching rule providing duplicate + values that are duplicates according to the equality matching rule. + :expected results: + 1. Pass + 2. Pass + 3. Fail(ldap.TYPE_OR_VALUE_EXISTS) + """ + cos = CosTemplates(topology_st.standalone, DEFAULT_SUFFIX) + # Entry with extensibleObject + entry = cos.create(properties={'cn': 'addentry' + index['attr'].split('attr')[1], + index['attr']: index['positive']}) + entry.delete() + with pytest.raises(ldap.TYPE_OR_VALUE_EXISTS): + cos.create(properties={'cn': 'addentry' + index['attr'].split('attr')[1], + index['attr']: index['negative']}) + + [email protected]("mod", LIST_MOD_ATTR_ALL) +def test_mods(topology_st, _create_index_entry, mod): + """Test valid and invalid values of attributes mods + :id: 8c15874c-b87d-11e9-9c5d-8c16451d917b + :setup: Standalone + :steps: + 1. Create entry with an attribute that uses matching mod + 2. Add an attribute that uses that matching mod providing duplicate + values that are duplicates according to the equality matching. + 3. Delete existing entry + :expected results: + 1. Pass + 2. Fail(ldap.TYPE_OR_VALUE_EXISTS) + 3. Pass + """ + cos = CosTemplates(topology_st.standalone, DEFAULT_SUFFIX) + # Entry with extensibleObject + cos.create(properties={'cn': 'addentry'+mod['attr'].split('attr')[1], + mod['attr']: mod['positive']}) + with pytest.raises(ldap.TYPE_OR_VALUE_EXISTS): + cos.list()[0].add(mod['attr'], mod['negative']) + for entry in cos.list(): + entry.delete() + + +if __name__ == '__main__': + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s -v %s" % CURRENT_FILE)
0
498c42b8ed1d30e078182d1aeb686f7cb7ce5dbd
389ds/389-ds-base
fix transaction support in ldbm_delete ldbm_back_delete() was not properly clearing out the transaction pointer before starting a new transaction, and was not properly clearing out the used transaction after a commit or an abort.
commit 498c42b8ed1d30e078182d1aeb686f7cb7ce5dbd Author: Rich Megginson <[email protected]> Date: Wed Oct 5 16:51:58 2011 -0600 fix transaction support in ldbm_delete ldbm_back_delete() was not properly clearing out the transaction pointer before starting a new transaction, and was not properly clearing out the used transaction after a commit or an abort. diff --git a/ldap/servers/slapd/back-ldbm/ldbm_delete.c b/ldap/servers/slapd/back-ldbm/ldbm_delete.c index 10f8f68c6..40aed481f 100644 --- a/ldap/servers/slapd/back-ldbm/ldbm_delete.c +++ b/ldap/servers/slapd/back-ldbm/ldbm_delete.c @@ -451,6 +451,7 @@ ldbm_back_delete( Slapi_PBlock *pb ) * to the persistent store. From now on, we're transacted */ + txn.back_txn_txn = NULL; /* ready to create the child transaction */ for (retry_count = 0; retry_count < RETRY_TIMES; retry_count++) { if (retry_count > 0) { dblayer_txn_abort(li,&txn); @@ -472,7 +473,7 @@ ldbm_back_delete( Slapi_PBlock *pb ) } /* stash the transaction */ - slapi_pblock_set(pb, SLAPI_TXN, (void *)txn.back_txn_txn); + slapi_pblock_set(pb, SLAPI_TXN, txn.back_txn_txn); /* call the transaction pre delete plugins just after creating the transaction */ if ((retval = plugin_call_plugins(pb, SLAPI_PLUGIN_BE_TXN_PRE_DELETE_FN))) { @@ -892,6 +893,9 @@ ldbm_back_delete( Slapi_PBlock *pb ) } retval = dblayer_txn_commit(li,&txn); + /* after commit - txn is no longer valid - replace SLAPI_TXN with parent */ + txn.back_txn_txn = NULL; + slapi_pblock_set(pb, SLAPI_TXN, parent_txn); if (0 != retval) { if (LDBM_OS_ERR_IS_DISKFULL(retval)) disk_full = 1; @@ -955,6 +959,9 @@ error_return: /* It is safer not to abort when the transaction is not started. */ if (retry_count > 0) { dblayer_txn_abort(li,&txn); /* abort crashes in case disk full */ + /* txn is no longer valid - reset the txn pointer to the parent */ + txn.back_txn_txn = NULL; + slapi_pblock_set(pb, SLAPI_TXN, parent_txn); } common_return:
0
b6ed4534959f309203126758fbffbd0f9b16db65
389ds/389-ds-base
Ticket 50164 - Add test for dscreate Description: Add a simple test in basic suite to make sure dscreate works, also moved setup/remove tests from lib389 and moved them inside dirsrvtests directory https://pagure.io/389-ds-base/issue/50164 Reviewed by: ?
commit b6ed4534959f309203126758fbffbd0f9b16db65 Author: Mark Reynolds <[email protected]> Date: Wed Jan 16 18:04:24 2019 +0000 Ticket 50164 - Add test for dscreate Description: Add a simple test in basic suite to make sure dscreate works, also moved setup/remove tests from lib389 and moved them inside dirsrvtests directory https://pagure.io/389-ds-base/issue/50164 Reviewed by: ? diff --git a/dirsrvtests/tests/suites/basic/basic_test.py b/dirsrvtests/tests/suites/basic/basic_test.py index 50db65d77..50b5dc049 100644 --- a/dirsrvtests/tests/suites/basic/basic_test.py +++ b/dirsrvtests/tests/suites/basic/basic_test.py @@ -870,7 +870,7 @@ def test_mod_def_rootdse_attr(topology_st, import_example_ldif, rootdse_attr): :id: c7831e04-f458-4e23-83c7-b6f66109f639 - :setup: Standalone instance and we are using rootdse_attr fixture which + :setup: Standalone instance and we are using rootdse_attr fixture which adds nsslapd-return-default-opattr attr with value of one operation attribute. :steps: @@ -1143,6 +1143,56 @@ def test_ticketldbm_audit(topology_st): assert audit_pattern_found(inst, regex) +def test_dscreate(request): + """Test that dscreate works, we need this for now until setup-ds.pl is + fully discontinued. + + :id: 5bf75c47-a283-430e-a65c-3c5fd8dbadb9 + :setup: None + :steps: + 1. Create template file for dscreate + 2. Create instance using template file + :expectedresults: + 1. Should succeeds + 2. Should succeeds + """ + + template_file = "dssetup.inf" + template_text = """[general] +config_version = 2 +full_machine_name = localhost.localdomain + + +[slapd] +instance_name = test_dscreate +root_dn = cn=directory manager +root_password = someLongPassword_123 + +[backend-userroot] +suffix = dc=example,dc=com +sample_entries = yes +""" + + with open(template_file, "w") as template_fd: + template_fd.write(template_text) + cmd = 'dscreate from-file ' + template_file + + try: + subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as e: + log.fatal("dscreate failed! Error ({}) {}".format(e.returncode, e.output)) + assert False + + def fin(): + os.remove(template_file) + try: + subprocess.check_output('dsctl test_dscreate remove --do-it', shell=True) + except subprocess.CalledProcessError as e: + log.fatal("Failed to remove test instance Error ({}) {}".format(e.returncode, e.output)) + + request.addfinalizer(fin) + + if __name__ == '__main__': # Run isolated # -s for DEBUG mode diff --git a/dirsrvtests/tests/suites/setup_ds/__init__.py b/dirsrvtests/tests/suites/setup_ds/__init__.py index 8371b7682..80ce75136 100644 --- a/dirsrvtests/tests/suites/setup_ds/__init__.py +++ b/dirsrvtests/tests/suites/setup_ds/__init__.py @@ -1,3 +1,12 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- + """ - :Requirement: 389-ds-base: Basic Directory Server Operations + :Requirement: 389-ds-base: Basic Directory Server Operations """ + diff --git a/src/lib389/lib389/tests/instance/setup_test.py b/dirsrvtests/tests/suites/setup_ds/dscreate_test.py similarity index 100% rename from src/lib389/lib389/tests/instance/setup_test.py rename to dirsrvtests/tests/suites/setup_ds/dscreate_test.py diff --git a/src/lib389/lib389/tests/instance/remove_test.py b/dirsrvtests/tests/suites/setup_ds/remove_test.py similarity index 100% rename from src/lib389/lib389/tests/instance/remove_test.py rename to dirsrvtests/tests/suites/setup_ds/remove_test.py diff --git a/src/lib389/lib389/tests/instance/__init__.py b/src/lib389/lib389/tests/instance/__init__.py deleted file mode 100644 index d57ac3325..000000000 --- a/src/lib389/lib389/tests/instance/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2016 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- -
0
cd354191bc08c4eeb1a0222217777f122d726439
389ds/389-ds-base
Ticket #47701 - Make retro changelog trim interval programmable Description: Fixing resource leak CID 12435 (Coverity) introduced by commit bb4f0c428f9e53bccb875a552f5cae1ee6f733be https://fedorahosted.org/389/ticket/47701 Reviewed by [email protected] (Thank you, Rich!!)
commit cd354191bc08c4eeb1a0222217777f122d726439 Author: Noriko Hosoi <[email protected]> Date: Mon Feb 24 09:11:37 2014 -0800 Ticket #47701 - Make retro changelog trim interval programmable Description: Fixing resource leak CID 12435 (Coverity) introduced by commit bb4f0c428f9e53bccb875a552f5cae1ee6f733be https://fedorahosted.org/389/ticket/47701 Reviewed by [email protected] (Thank you, Rich!!) diff --git a/ldap/servers/plugins/retrocl/retrocl_trim.c b/ldap/servers/plugins/retrocl/retrocl_trim.c index cb18889ae..4d1737720 100644 --- a/ldap/servers/plugins/retrocl/retrocl_trim.c +++ b/ldap/servers/plugins/retrocl/retrocl_trim.c @@ -499,7 +499,6 @@ void retrocl_init_trimming (void) const char *cl_trim_interval; cl_maxage = retrocl_get_config_str(CONFIG_CHANGELOG_MAXAGE_ATTRIBUTE); - cl_trim_interval = retrocl_get_config_str(CONFIG_CHANGELOG_TRIM_INTERVAL); if (cl_maxage == NULL) { LDAPDebug0Args(LDAP_DEBUG_TRACE,"No maxage, not trimming retro changelog.\n"); @@ -508,6 +507,7 @@ void retrocl_init_trimming (void) ageval = age_str2time (cl_maxage); slapi_ch_free_string((char **)&cl_maxage); + cl_trim_interval = retrocl_get_config_str(CONFIG_CHANGELOG_TRIM_INTERVAL); if (cl_trim_interval) { trim_interval = strtol(cl_trim_interval, (char **)NULL, 10); if (0 == trim_interval) {
0
092f08aee37f5f3664c3dc7159c71331fef13b25
389ds/389-ds-base
Issue 49156 - Modify token :assert: to :expectedresults: Description: In order to import the test cases from source code to Polarion, we are using betelgeuse and betelgeuse uses :expectedresults: token in place of :assert: token. That needs a fix in create_test file and all other files which used :assert: in docstrings https://pagure.io/389-ds-base/issue/49156 Reviewed by: Simon Pichugin Signed-off-by: Simon Pichugin <[email protected]>
commit 092f08aee37f5f3664c3dc7159c71331fef13b25 Author: Amita Sharma <[email protected]> Date: Mon Mar 27 12:15:45 2017 +0530 Issue 49156 - Modify token :assert: to :expectedresults: Description: In order to import the test cases from source code to Polarion, we are using betelgeuse and betelgeuse uses :expectedresults: token in place of :assert: token. That needs a fix in create_test file and all other files which used :assert: in docstrings https://pagure.io/389-ds-base/issue/49156 Reviewed by: Simon Pichugin Signed-off-by: Simon Pichugin <[email protected]> diff --git a/dirsrvtests/create_test.py b/dirsrvtests/create_test.py index 0461ae0e3..ed71eaf75 100755 --- a/dirsrvtests/create_test.py +++ b/dirsrvtests/create_test.py @@ -694,7 +694,7 @@ if len(sys.argv) > 0: TEST.write(' :setup: Fill in set up configuration here\n') TEST.write(' :steps: 1. Fill in test case steps here\n') TEST.write(' 2. And indent them like this (RST format requirement)\n') - TEST.write(' :assert: Fill in the result that is expected\n') + TEST.write(' :expectedresults: Fill in the result that is expected\n') TEST.write(' """\n\n') TEST.write(' # If you need any test suite initialization,\n') TEST.write(' # please, write additional fixture for that (including finalizer).\n' diff --git a/dirsrvtests/tests/suites/config/config_test.py b/dirsrvtests/tests/suites/config/config_test.py index 1c67fcf32..6170eb085 100644 --- a/dirsrvtests/tests/suites/config/config_test.py +++ b/dirsrvtests/tests/suites/config/config_test.py @@ -70,7 +70,7 @@ def test_maxbersize_repl(topology_m2, test_user, big_file): :steps: 1. Set 20KiB small maxbersize on master2 2. Add big value to master2 3. Add big value to master1 - :assert: Adding the big value to master2 is failed, + :expectedresults: Adding the big value to master2 is failed, adding the big value to master1 is succeed, the big value is successfully replicated to master2 """ @@ -144,7 +144,7 @@ def test_config_listen_backport_size(topology_m2): Try positive and negative. 3. Set nsslapd-listen-backlog-size to an invalid value 4. Set nsslapd-listen-backlog-size back to a default value - :assert: Search and the valid modification should be a success + :expectedresults: Search and the valid modification should be a success Modification with an invalid value should throw an error """ @@ -202,7 +202,7 @@ def test_config_deadlock_policy(topology_m2): Try positive and negative. 3. Set nsslapd-db-deadlock-policy to an invalid value 4. Set nsslapd-db-deadlock-policy back to a default value - :assert: Search and the valid modification should be a success + :expectedresults: Search and the valid modification should be a success Modification with invalid values should throw an error """ diff --git a/dirsrvtests/tests/suites/filter/rfc3673_all_oper_attrs_test.py b/dirsrvtests/tests/suites/filter/rfc3673_all_oper_attrs_test.py index b9fcafc30..83283dd0b 100644 --- a/dirsrvtests/tests/suites/filter/rfc3673_all_oper_attrs_test.py +++ b/dirsrvtests/tests/suites/filter/rfc3673_all_oper_attrs_test.py @@ -100,7 +100,7 @@ def test_supported_features(topology_st): :feature: Filter :setup: Standalone instance :steps: 1. Search for 'supportedFeatures' at rootDSE - :assert: Value 1.3.6.1.4.1.4203.1.5.1 is presented + :expectedresults: Value 1.3.6.1.4.1.4203.1.5.1 is presented """ entries = topology_st.standalone.search_s('', ldap.SCOPE_BASE, @@ -127,7 +127,7 @@ def test_search_basic(topology_st, test_user, user_aci, add_attr, :steps: 1. Bind as regular user or Directory Manager 2. Search with '+' filter and with additionaly 'objectClass' and '*' attrs too - :assert: All expected values were returned, not more + :expectedresults: All expected values were returned, not more """ if regular_user: diff --git a/dirsrvtests/tests/suites/paged_results/paged_results_test.py b/dirsrvtests/tests/suites/paged_results/paged_results_test.py index 96626aa41..1f2098b3f 100644 --- a/dirsrvtests/tests/suites/paged_results/paged_results_test.py +++ b/dirsrvtests/tests/suites/paged_results/paged_results_test.py @@ -253,7 +253,7 @@ def test_search_success(topology_st, test_user, page_size, users_num): variated number of users for the search base :steps: 1. Bind as test user 2. Search through added users with a simple paged control - :assert: All users should be found + :expectedresults: All users should be found """ users_list = add_users(topology_st, users_num, DEFAULT_SUFFIX) @@ -302,7 +302,7 @@ def test_search_limits_fail(topology_st, test_user, page_size, users_num, 2. Set limit attribute to the value that will cause an expected exception 3. Search through added users with a simple paged control - :assert: Should fail with appropriate exception + :expectedresults: Should fail with appropriate exception """ users_list = add_users(topology_st, users_num, DEFAULT_SUFFIX) @@ -384,7 +384,7 @@ def test_search_sort_success(topology_st, test_user): :steps: 1. Bind as test user 2. Search through added users with a simple paged control and a server side sort control - :assert: All users should be found and sorted + :expectedresults: All users should be found and sorted """ users_num = 50 @@ -428,7 +428,7 @@ def test_search_abandon(topology_st, test_user): :steps: 1. Bind as test user 2. Search through added users with a simple paged control 3. Abandon the search - :assert: It will throw an ldap.TIMEOUT exception, while trying + :expectedresults: It will throw an ldap.TIMEOUT exception, while trying to get the rest of the search results """ @@ -478,7 +478,7 @@ def test_search_with_timelimit(topology_st, test_user): and timelimit set to 5 3. When the returned cookie is empty, wait 10 seconds 4. Perform steps 2 and 3 three times in a row - :assert: No error happens + :expectedresults: No error happens """ users_num = 100 @@ -559,7 +559,7 @@ def test_search_dns_ip_aci(topology_st, test_user, aci_subject): 6. Return ACI to the initial state 7. Go through all steps onece again, but use IP subjectdn insted of DNS - :assert: No error happens, all users should be found and sorted + :expectedresults: No error happens, all users should be found and sorted """ users_num = 100 @@ -627,7 +627,7 @@ def test_search_multiple_paging(topology_st, test_user): 2. Initiate the search with a simple paged control 3. Acquire the returned cookie only one time 4. Perform steps 2 and 3 three times in a row - :assert: No error happens + :expectedresults: No error happens """ users_num = 100 @@ -685,7 +685,7 @@ def test_search_invalid_cookie(topology_st, test_user, invalid_cookie): 2. Initiate the search with a simple paged control 3. Put an invalid cookie (-1, 1000) to the control 4. Continue the search - :assert: It will throw an TypeError exception + :expectedresults: It will throw an TypeError exception """ users_num = 100 @@ -735,7 +735,7 @@ def test_search_abandon_with_zero_size(topology_st, test_user): :steps: 1. Bind as test user 2. Search through added users with a simple paged control and page_size = 0 - :assert: No cookie should be returned at all + :expectedresults: No cookie should be returned at all """ users_num = 10 @@ -783,7 +783,7 @@ def test_search_pagedsizelimit_success(topology_st, test_user): 2. Bind as test user 3. Search through added users with a simple paged control using page_size = 10 - :assert: All users should be found + :expectedresults: All users should be found """ users_num = 10 @@ -841,7 +841,7 @@ def test_search_nspagedsizelimit(topology_st, test_user, 9. Bind as test user 10. Search through added users with a simple paged control using page_size = 10 - :assert: After the steps 1-4, it should PASS. + :expectedresults: After the steps 1-4, it should PASS. After the steps 7-10, it should throw SIZELIMIT_EXCEEDED exception """ @@ -909,7 +909,7 @@ def test_search_paged_limits(topology_st, test_user, conf_attr_values, expected_ 9. Bind as test user 10. Search through added users with a simple paged control using page_size = 10 - :assert: After the steps 1-4, it should PASS. + :expectedresults: After the steps 1-4, it should PASS. After the steps 7-10, it should throw ADMINLIMIT_EXCEEDED exception """ @@ -985,7 +985,7 @@ def test_search_paged_user_limits(topology_st, test_user, conf_attr_values, expe 10. Bind as test user 11. Search through added users with a simple paged control using page_size = 10 - :assert: After the steps 1-4, it should PASS. + :expectedresults: After the steps 1-4, it should PASS. After the steps 8-11, it should throw ADMINLIMIT_EXCEEDED exception """ @@ -1047,7 +1047,7 @@ def test_ger_basic(topology_st, test_user): variated number of users for the search base :steps: 1. Search through added users with a simple paged control and get effective rights control - :assert: All users should be found, every found entry should have + :expectedresults: All users should be found, every found entry should have an 'attributeLevelRights' returned """ @@ -1089,7 +1089,7 @@ def test_multi_suffix_search(topology_st, test_user, new_suffixes): using page_size = 4 3. Wait some time logs to be updated 3. Check access log - :assert: All users should be found, the access log should contain + :expectedresults: All users should be found, the access log should contain the pr_cookie for each page request and it should be equal 0, except the last one should be equal -1 """ @@ -1145,7 +1145,7 @@ def test_maxsimplepaged_per_conn_success(topology_st, test_user, conf_attr_value to the next values: no value, -1, some positive 2. Search through the added users with a simple paged control using page size = 4 - :assert: If no value or value = -1 - all users should be found, + :expectedresults: If no value or value = -1 - all users should be found, default behaviour; If the value is positive, the value is the max simple paged results requests per connection. @@ -1194,7 +1194,7 @@ def test_maxsimplepaged_per_conn_failure(topology_st, test_user, conf_attr_value 3. Set nsslapd-maxsimplepaged-per-conn = 1 in cn=config 4. Search through the added users with a simple paged control using page size = 4 two times, but don't close the connections - :assert: During the searches UNWILLING_TO_PERFORM should be throwned + :expectedresults: During the searches UNWILLING_TO_PERFORM should be throwned """ users_list = add_users(topology_st, 20, DEFAULT_SUFFIX) diff --git a/dirsrvtests/tests/suites/password/pwdPolicy_attribute_test.py b/dirsrvtests/tests/suites/password/pwdPolicy_attribute_test.py index d076cf425..4584f2966 100644 --- a/dirsrvtests/tests/suites/password/pwdPolicy_attribute_test.py +++ b/dirsrvtests/tests/suites/password/pwdPolicy_attribute_test.py @@ -132,7 +132,7 @@ def test_change_pwd(topology_st, test_user, password_policy, 2. Bind as test user 3. Try to change password - :assert: Subtree/User passwordChange - result + :expectedresults: Subtree/User passwordChange - result off/on, on/on - success on/off, off/off - UNWILLING_TO_PERFORM """ @@ -202,7 +202,7 @@ def test_pwd_min_age(topology_st, test_user, password_policy): 5. Try to change password two times in a row 6. Wait 12 seconds 7. Try to change password - :assert: User should be not allowed to change the password + :expectedresults: User should be not allowed to change the password right after previous change - CONSTRAINT_VIOLATION User should be not allowed to change the password after 12 seconds passed diff --git a/dirsrvtests/tests/suites/password/pwdPolicy_inherit_global_test.py b/dirsrvtests/tests/suites/password/pwdPolicy_inherit_global_test.py index fddbd49dc..828a2d3a4 100644 --- a/dirsrvtests/tests/suites/password/pwdPolicy_inherit_global_test.py +++ b/dirsrvtests/tests/suites/password/pwdPolicy_inherit_global_test.py @@ -157,7 +157,7 @@ def test_entry_has_no_restrictions(topology_st, password_policy, test_user, b) 'on' and 'off' c) 'off' and 'on' 3. Try to add user with a short password - :assert: No exception should occure + :expectedresults: No exception should occure """ log.info('Set {} to {}'.format(ATTR_INHERIT_GLOBAL, inherit_value)) @@ -220,7 +220,7 @@ def test_entry_has_restrictions(topology_st, password_policy, test_user, contain b) ou=people policy container 5. Try to add user with a short password (<9) 6. Try to add user with a long password (>9) - :assert: User should be rejected + :expectedresults: User should be rejected """ log.info('Set {} to {}'.format(ATTR_INHERIT_GLOBAL, 'on')) diff --git a/dirsrvtests/tests/suites/password/pwdPolicy_warning_test.py b/dirsrvtests/tests/suites/password/pwdPolicy_warning_test.py index bc7175af6..9c583d9cf 100644 --- a/dirsrvtests/tests/suites/password/pwdPolicy_warning_test.py +++ b/dirsrvtests/tests/suites/password/pwdPolicy_warning_test.py @@ -240,9 +240,9 @@ def test_different_values(topology_st, value): under cn=config entry 2. Run the search command to check the value of passwordSendExpiringTime attribute - :assert: 1. Invalid values should be rejected with - an OPERATIONS_ERROR - 2. Valid values should be accepted and saved + :expectedresults: 1. Invalid values should be rejected with + an OPERATIONS_ERROR + 2. Valid values should be accepted and saved """ log.info('Get the default value') @@ -285,8 +285,8 @@ def test_expiry_time(topology_st, global_policy, add_user): 2. User entry for binding :steps: 1. Bind as the user 2. Request the control for the user - :assert: The password expiry warning time for the user should be - returned + :expectedresults: The password expiry warning time for the user should be + returned """ res_ctrls = None @@ -328,8 +328,8 @@ def test_password_warning(topology_st, global_policy, add_user, attr, val): 2. Bind as the user 3a. Request the control for the user 3b. Request the password expiry warning time - :assert: a. Password expiry warning time should not be returned - b. Password expiry warning time should be returned + :expectedresults: a. Password expiry warning time should not be returned + b. Password expiry warning time should be returned """ try: @@ -370,11 +370,11 @@ def test_with_different_password_states(topology_st, global_policy, add_user): 3. Set the system date to the current day 4. Try to bind with the user entry and request the control - :assert: 1. In the first try, the bind should fail with an - INVALID_CREDENTIALS error - 2. In the second try, the bind should be successful - and the password expiry warning time should be - returned + :expectedresults: 1. In the first try, the bind should fail with an + INVALID_CREDENTIALS error + 2. In the second try, the bind should be successful + and the password expiry warning time should be + returned """ res_ctrls = None @@ -428,9 +428,9 @@ def test_default_behavior(topology_st, global_policy_default, add_user): 2. User entry for binding to the server :steps: 1. Bind as the user 2. Request the control for the user - :assert: Password expiry warning time should be returned by the - server by the server since passwordMaxAge and - passwordWarning are set to the same value + :expectedresults: Password expiry warning time should be returned by the + server by the server since passwordMaxAge and + passwordWarning are set to the same value """ res_ctrls = None @@ -469,8 +469,8 @@ def test_with_local_policy(topology_st, global_policy, local_policy): -h localhost -p 389 -U 'uid=tuser,dc=example,dc=com' :steps: 1. Bind as the user 2. Request the control for the user - :assert: Password expiry warning time should not be returned for the - user + :expectedresults: Password expiry warning time should not be returned for the + user """ res_ctrls = None diff --git a/dirsrvtests/tests/suites/plugins/accpol_test.py b/dirsrvtests/tests/suites/plugins/accpol_test.py index 1dbf50507..595ea278c 100644 --- a/dirsrvtests/tests/suites/plugins/accpol_test.py +++ b/dirsrvtests/tests/suites/plugins/accpol_test.py @@ -126,7 +126,7 @@ def test_actNinact_local(topology_st, accpolicy_local): 5. Wait for 2 secs or till accountInactivityLimit is exceeded 6. Run ldapsearch as normal user and check if its inactivated, expected error 19. 7. Sleep for +14 secs to check if accounts accessed at step4 are inactivated now - :assert: Should return error code 19 + :expectedresults: Should return error code 19 """ suffix = DEFAULT_SUFFIX @@ -160,7 +160,7 @@ def test_noinact_local(topology_st, accpolicy_local): 2. Wait for 16 secs and run ldapsearch as normal user to check account is active, expected 0. 3. Move users from ou=groups to ou=people subtree 4. Sleep for 16 secs and check if entries are inactivated - :assert: Should return error code 0 and 19 + :expectedresults: Should return error code 0 and 19 """ suffix = DEFAULT_SUFFIX @@ -200,7 +200,7 @@ def test_inact_local(topology_st, accpolicy_local): 3. Move users from ou=people to ou=groups subtree 4. Sleep for +2 secs and check if users are inactivated in ou=people subtree 5. Check if users are not inactivated in ou=groups subtree - :assert: Should return error code 0 + :expectedresults: Should return error code 0 """ suffix = DEFAULT_SUFFIX diff --git a/dirsrvtests/tests/suites/replication/acceptance_test.py b/dirsrvtests/tests/suites/replication/acceptance_test.py index 5b785f3e2..e677b9d14 100644 --- a/dirsrvtests/tests/suites/replication/acceptance_test.py +++ b/dirsrvtests/tests/suites/replication/acceptance_test.py @@ -76,7 +76,7 @@ def test_add_entry(topo, test_entry): :steps: 1. Add entry to master1 2. Wait for replication to happen 3. Check entry on all other masters - :assert: Entry should be replicated + :expectedresults: Entry should be replicated """ entries = get_repl_entries(topo, TEST_ENTRY_NAME, ["uid"]) @@ -92,7 +92,7 @@ def test_modify_entry(topo, test_entry): :steps: 1. Modify the entry on master1 (try add, modify and delete operations) 2. Wait for replication to happen 3. Check entry on all other masters - :assert: Entry attr should be replicated + :expectedresults: Entry attr should be replicated """ log.info('Modifying entry {} - add operation'.format(TEST_ENTRY_DN)) @@ -144,7 +144,7 @@ def test_delete_entry(topo, test_entry): :steps: 1. Delete the entry from master1 2. Wait for replication to happen 3. Check entry on all other masters - :assert: Entry deletion should be replicated + :expectedresults: Entry deletion should be replicated """ log.info('Deleting entry {} during the test'.format(TEST_ENTRY_DN)) @@ -164,7 +164,7 @@ def test_modrdn_entry(topo, test_entry, delold): :steps: 1. Make modrdn operation on entry on master1 with both delold 1 and 0 2. Wait for replication to happen 3. Check entry on all other masters - :assert: Entry with new RDN should be replicated. + :expectedresults: Entry with new RDN should be replicated. If delold was specified, entry with old RDN shouldn't exist """ diff --git a/dirsrvtests/tests/suites/replication/single_master_test.py b/dirsrvtests/tests/suites/replication/single_master_test.py index 8be6d37f4..c1428f68f 100644 --- a/dirsrvtests/tests/suites/replication/single_master_test.py +++ b/dirsrvtests/tests/suites/replication/single_master_test.py @@ -85,7 +85,7 @@ def test_mail_attr_repl(topo_r, test_user): 4. Restore mail database 5. Search for the entry with a substring 'mail=user*' 6. Search for the entry once again to make sure that server is alive - :assert: No crash happens + :expectedresults: No crash happens """ master = topo_r.ms["master1"] @@ -144,7 +144,7 @@ def test_lastupdate_attr_before_init(topo_nr, replica_without_init): without initialization :steps: 1. Check nsds5replicaLastUpdateStart, nsds5replicaLastUpdateEnd, nsds5replicaLastUpdateStatus attrs - :assert: nsds5replicaLastUpdateStart: 0, nsds5replicaLastUpdateEnd: 0 and + :expectedresults: nsds5replicaLastUpdateStart: 0, nsds5replicaLastUpdateEnd: 0 and nsds5replicaLastUpdateStatus is not equal to "0 Replica acquired successfully: Incremental update started" """ diff --git a/dirsrvtests/tests/suites/replication/tombstone_test.py b/dirsrvtests/tests/suites/replication/tombstone_test.py index 4c623ba0b..71fc08492 100644 --- a/dirsrvtests/tests/suites/replication/tombstone_test.py +++ b/dirsrvtests/tests/suites/replication/tombstone_test.py @@ -22,7 +22,7 @@ def test_purge_success(topology_st): 2. Add an entry to the replicated suffix 3. Delete the entry 4. Check that tombstone entry exists (objectclass=nsTombstone) - :assert: Tombstone entry exist + :expectedresults: Tombstone entry exist """ log.info('Setting up replication...') diff --git a/dirsrvtests/tests/suites/setup_ds/setup_ds_test.py b/dirsrvtests/tests/suites/setup_ds/setup_ds_test.py index db2dff1d6..5aa1f74d5 100644 --- a/dirsrvtests/tests/suites/setup_ds/setup_ds_test.py +++ b/dirsrvtests/tests/suites/setup_ds/setup_ds_test.py @@ -42,12 +42,14 @@ def test_slapd_InstScriptsEnabled(config_attr): :ID: 02faac7f-c44d-4a3e-bf2d-1021e51da1ed :feature: Add configure option to disable instance specific scripts - :setup: Create directory server instance using setup-ds.pl with slapd.InstScriptsEnabled option as "True" and "False" + :setup: Create directory server instance using setup-ds.pl + with slapd.InstScriptsEnabled option as "True" and "False" :steps: 1. Execute setup-ds.pl with slapd.InstScriptsEnabled option as "True" and "False" one by one 2. Check if /usr/lib64/dirsrv/slapd-instance instance script directory is created or not. 3. The script directory should be created if slapd.InstScriptsEnabled option is "True" 4. The script directory should not be created if slapd.InstScriptsEnabled option is "False" - :assert: The script directory should be created if slapd.InstScriptsEnabled option is "True" and not if it is "Fasle" + :expectedresults: The script directory should be created + if slapd.InstScriptsEnabled option is "True" and not if it is "Fasle" """ log.info('set SER_INST_SCRIPTS_ENABLED to {}'.format(config_attr))
0
a30af93befdf2e27b711484a74ba02079be93792
389ds/389-ds-base
Bug 604263 - Fix memory leak when password change is rejected If a password is changed using the password modify extended operation, the modify used to modify the password will be leaked if the change is rejected due to password policy or access control settings. This patch frees the mod when the password change is rejected.
commit a30af93befdf2e27b711484a74ba02079be93792 Author: Nathan Kinder <[email protected]> Date: Tue Jun 15 14:33:17 2010 -0700 Bug 604263 - Fix memory leak when password change is rejected If a password is changed using the password modify extended operation, the modify used to modify the password will be leaked if the change is rejected due to password policy or access control settings. This patch frees the mod when the password change is rejected. diff --git a/ldap/servers/slapd/modify.c b/ldap/servers/slapd/modify.c index 955ef820a..ead066d11 100644 --- a/ldap/servers/slapd/modify.c +++ b/ldap/servers/slapd/modify.c @@ -529,6 +529,7 @@ static int modify_internal_pb (Slapi_PBlock *pb) if (pw_change == -1) { /* The internal result code will already have been set by op_shared_allow_pw_change() */ + ldap_mods_free(normalized_mods, 1); return 0; } }
0
2e5ee4d10c24f7d102fc7461f23f77ac631dc507
389ds/389-ds-base
Trac Ticket #298 - crash when replicating orphaned tombstone entry https://fedorahosted.org/389/ticket/298 Fix description: 1. The cause of the crash was freeing a to-be-added entry in tombstone_to_glue although the entry is consumed in slapi_add_entry_internal_set_pb/slapi_add_internal_pb. This patch removes the redundant slapi_entry_free from tombstone_to_glue. 2. Introducing is_suffix_dn_ext to pass is_tombstone flag for getting the proper parent sdn of a tombstoned entry. 3. Logic handling ancestor tombstone was broken. In _entryrdn_insert_key, if _entryrdn_get_tombstone_elem finds a child node, it was checking if the node is a tombstone or not immediately. It should have been done in the next loop. 4. Reducing repeated "WARNING: bad entry: ID ##" messages.
commit 2e5ee4d10c24f7d102fc7461f23f77ac631dc507 Author: Noriko Hosoi <[email protected]> Date: Fri Feb 17 16:28:48 2012 -0800 Trac Ticket #298 - crash when replicating orphaned tombstone entry https://fedorahosted.org/389/ticket/298 Fix description: 1. The cause of the crash was freeing a to-be-added entry in tombstone_to_glue although the entry is consumed in slapi_add_entry_internal_set_pb/slapi_add_internal_pb. This patch removes the redundant slapi_entry_free from tombstone_to_glue. 2. Introducing is_suffix_dn_ext to pass is_tombstone flag for getting the proper parent sdn of a tombstoned entry. 3. Logic handling ancestor tombstone was broken. In _entryrdn_insert_key, if _entryrdn_get_tombstone_elem finds a child node, it was checking if the node is a tombstone or not immediately. It should have been done in the next loop. 4. Reducing repeated "WARNING: bad entry: ID ##" messages. diff --git a/ldap/servers/plugins/replication/urp.c b/ldap/servers/plugins/replication/urp.c index 1328996ef..7000b0ee6 100644 --- a/ldap/servers/plugins/replication/urp.c +++ b/ldap/servers/plugins/replication/urp.c @@ -726,7 +726,7 @@ urp_fixup_add_entry (Slapi_Entry *e, const char *target_uniqueid, const char *pa */ slapi_add_entry_internal_set_pb ( newpb, - e, + e, /* entry will be consumed */ NULL, /*Controls*/ repl_get_plugin_identity ( PLUGIN_MULTIMASTER_REPLICATION ), OP_FLAG_REPLICATED | OP_FLAG_REPL_FIXUP | opflags); @@ -1260,14 +1260,15 @@ is_suffix_entry ( Slapi_PBlock *pb, Slapi_Entry *entry, Slapi_DN **parentdn ) } int -is_suffix_dn ( Slapi_PBlock *pb, const Slapi_DN *dn, Slapi_DN **parentdn ) +is_suffix_dn_ext ( Slapi_PBlock *pb, const Slapi_DN *dn, Slapi_DN **parentdn, + int is_tombstone ) { Slapi_Backend *backend; int rc; *parentdn = slapi_sdn_new(); slapi_pblock_get( pb, SLAPI_BACKEND, &backend ); - slapi_sdn_get_backend_parent (dn, *parentdn, backend); + slapi_sdn_get_backend_parent_ext (dn, *parentdn, backend, is_tombstone); /* A suffix entry doesn't have parent dn */ rc = slapi_sdn_isempty (*parentdn) ? 1 : 0; @@ -1275,6 +1276,12 @@ is_suffix_dn ( Slapi_PBlock *pb, const Slapi_DN *dn, Slapi_DN **parentdn ) return rc; } +int +is_suffix_dn ( Slapi_PBlock *pb, const Slapi_DN *dn, Slapi_DN **parentdn ) +{ + return is_suffix_dn_ext ( pb, dn, parentdn, 0 ); +} + static int mod_namingconflict_attr (const char *uniqueid, const Slapi_DN *entrysdn, const Slapi_DN *conflictsdn, CSN *opcsn) diff --git a/ldap/servers/plugins/replication/urp.h b/ldap/servers/plugins/replication/urp.h index 2ca7ad2c5..ec7c94d21 100644 --- a/ldap/servers/plugins/replication/urp.h +++ b/ldap/servers/plugins/replication/urp.h @@ -63,6 +63,7 @@ int urp_fixup_rename_entry (Slapi_Entry *entry, const char *newrdn, int opflags) int urp_fixup_modify_entry (const char *uniqueid, const Slapi_DN *sdn, CSN *opcsn, Slapi_Mods *smods, int opflags); int is_suffix_dn (Slapi_PBlock *pb, const Slapi_DN *dn, Slapi_DN **parenddn); +int is_suffix_dn_ext (Slapi_PBlock *pb, const Slapi_DN *dn, Slapi_DN **parenddn, int is_tombstone); /* * urp_glue.c diff --git a/ldap/servers/plugins/replication/urp_tombstone.c b/ldap/servers/plugins/replication/urp_tombstone.c index 6dd9a2ace..a61f1e52d 100644 --- a/ldap/servers/plugins/replication/urp_tombstone.c +++ b/ldap/servers/plugins/replication/urp_tombstone.c @@ -169,7 +169,7 @@ tombstone_to_glue ( /* JCM - This DN calculation is odd. It could resolve to NULL * which won't help us identify the correct backend to search. */ - is_suffix_dn (pb, tombstonedn, &parentdn); + is_suffix_dn_ext (pb, tombstonedn, &parentdn, 1 /* is_tombstone */); parentuniqueid= slapi_entry_attr_get_charptr (tombstoneentry, SLAPI_ATTR_VALUE_PARENT_UNIQUEID); /* Allocated */ tombstone_to_glue_resolve_parent (pb, sessionid, parentdn, parentuniqueid, opcsn); @@ -201,7 +201,6 @@ tombstone_to_glue ( "%s: Can't resurrect tombstone %s to glue reason '%s', error=%d\n", sessionid, addingdn, reason, op_result); } - slapi_entry_free (addingentry); return op_result; } diff --git a/ldap/servers/slapd/add.c b/ldap/servers/slapd/add.c index 206dfc4fa..53fd3fc93 100644 --- a/ldap/servers/slapd/add.c +++ b/ldap/servers/slapd/add.c @@ -377,7 +377,7 @@ int slapi_add_internal_set_pb (Slapi_PBlock *pb, const char *dn, LDAPMod **attrs return rc; } - +/* Note: Passed entry e is going to be consumed. */ /* Initialize a pblock for a call to slapi_add_internal_pb() */ void slapi_add_entry_internal_set_pb (Slapi_PBlock *pb, Slapi_Entry *e, LDAPControl **controls, Slapi_ComponentId *plugin_identity, int operation_flags) @@ -415,12 +415,12 @@ static int add_internal_pb (Slapi_PBlock *pb) { opresult = LDAP_PARAM_ERROR; slapi_pblock_set(pb, SLAPI_PLUGIN_INTOP_RESULT, &opresult); - return 0; + return 0; } slapi_pblock_get(pb, SLAPI_OPERATION, &op); - op->o_handler_data = &opresult; - op->o_result_handler = internal_getresult_callback; + op->o_handler_data = &opresult; + op->o_result_handler = internal_getresult_callback; slapi_pblock_set(pb, SLAPI_REQCONTROLS, controls); @@ -431,9 +431,9 @@ static int add_internal_pb (Slapi_PBlock *pb) set_config_params (pb); /* perform the add operation */ - op_shared_add (pb); + op_shared_add (pb); - slapi_pblock_set(pb, SLAPI_PLUGIN_INTOP_RESULT, &opresult); + slapi_pblock_set(pb, SLAPI_PLUGIN_INTOP_RESULT, &opresult); return 0; } @@ -731,6 +731,7 @@ done: slapi_entry_free(pse); slapi_ch_free((void **)&operation->o_params.p.p_add.parentuniqueid); slapi_entry_free(e); + slapi_pblock_set(pb, SLAPI_ADD_ENTRY, NULL); valuearray_free(&unhashed_password_vals); slapi_ch_free((void**)&pwdtype); slapi_ch_free_string(&proxydn); diff --git a/ldap/servers/slapd/back-ldbm/import-threads.c b/ldap/servers/slapd/back-ldbm/import-threads.c index 458ead0f6..4c1129d57 100644 --- a/ldap/servers/slapd/back-ldbm/import-threads.c +++ b/ldap/servers/slapd/back-ldbm/import-threads.c @@ -2039,7 +2039,7 @@ foreman_do_entrydn(ImportJob *job, FifoItem *fi) fi->line, fi->filename); idl_free(IDL); /* skip this one */ - fi->bad = 1; + fi->bad = FIFOITEM_BAD; job->skipped++; return -1; /* skip to next entry */ } @@ -2214,7 +2214,7 @@ import_foreman(void *param) slapi_entry_get_dn(fi->entry->ep_entry), fi->line, fi->filename); /* skip this one */ - fi->bad = 1; + fi->bad = FIFOITEM_BAD; job->skipped++; goto cont; /* below */ } diff --git a/ldap/servers/slapd/back-ldbm/import.c b/ldap/servers/slapd/back-ldbm/import.c index 7f03eb3a6..4958dc02d 100644 --- a/ldap/servers/slapd/back-ldbm/import.c +++ b/ldap/servers/slapd/back-ldbm/import.c @@ -106,14 +106,13 @@ FifoItem *import_fifo_fetch(ImportJob *job, ID id, int worker) } else { return NULL; } - if (fi->entry) { + if (fi->entry && fi->bad && (FIFOITEM_BAD == fi->bad)) { + fi->bad = FIFOITEM_BAD_PRINTED; if (worker) { - if (fi->bad) { - import_log_notice(job, "WARNING: bad entry: ID %d", id); - return NULL; - } - PR_ASSERT(fi->entry->ep_refcnt > 0); + import_log_notice(job, "WARNING: bad entry: ID %d", id); + return NULL; } + PR_ASSERT(fi->entry->ep_refcnt > 0); } return fi; } diff --git a/ldap/servers/slapd/back-ldbm/import.h b/ldap/servers/slapd/back-ldbm/import.h index bef5fa09f..c9a3f8a82 100644 --- a/ldap/servers/slapd/back-ldbm/import.h +++ b/ldap/servers/slapd/back-ldbm/import.h @@ -83,12 +83,15 @@ struct _import_index_info /* item on the entry FIFO */ typedef struct { struct backentry *entry; - char *filename; /* or NULL */ - int line; /* filename/line are used to report errors */ - int bad; /* foreman did not like the entry */ - size_t esize; /* entry size */ + char *filename; /* or NULL */ + int line; /* filename/line are used to report errors */ + int bad; /* foreman did not like the entry */ + size_t esize; /* entry size */ } FifoItem; +#define FIFOITEM_BAD 1 +#define FIFOITEM_BAD_PRINTED 2 + typedef struct { FifoItem *item; size_t size; /* Queue size in entries (computed in import_fifo_init). */ diff --git a/ldap/servers/slapd/back-ldbm/ldbm_add.c b/ldap/servers/slapd/back-ldbm/ldbm_add.c index 748ef267a..d29ff5f0f 100644 --- a/ldap/servers/slapd/back-ldbm/ldbm_add.c +++ b/ldap/servers/slapd/back-ldbm/ldbm_add.c @@ -176,8 +176,7 @@ ldbm_back_add( Slapi_PBlock *pb ) } - if (!is_tombstone_operation && !is_resurect_operation) - { + if (!is_tombstone_operation) { rc= slapi_setbit_int(rc,SLAPI_RTN_BIT_FETCH_EXISTING_DN_ENTRY); } rc= slapi_setbit_int(rc,SLAPI_RTN_BIT_FETCH_EXISTING_UNIQUEID_ENTRY); diff --git a/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c b/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c index a0aaa8022..2a7b1e406 100644 --- a/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c +++ b/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c @@ -1584,8 +1584,7 @@ retry_get: slapi_ch_free(&ptr); } bail: - slapi_log_error(SLAPI_LOG_TRACE, ENTRYRDN_TAG, - "<-- _entryrdn_get_elem\n"); + slapi_log_error(SLAPI_LOG_TRACE, ENTRYRDN_TAG, "<-- _entryrdn_get_elem\n"); return rc; } @@ -2231,6 +2230,12 @@ _entryrdn_insert_key(backend *be, /* Check the direct child in the RDN array, first */ rdnidx = slapi_rdn_get_prev_ext(srdn, rdnidx, &childnrdn, FLAG_ALL_NRDNS); + if ((rdnidx < 0) || (NULL == childnrdn)) { + slapi_log_error(SLAPI_LOG_FATAL, ENTRYRDN_TAG, + "_entryrdn_insert_key: RDN list \"%s\" is broken: " + "idx(%d)\n", slapi_rdn_get_rdn(srdn), rdnidx); + goto bail; + } /* Generate a key for child tree */ /* E.g., C1 */ keybuf = slapi_ch_smprintf("%c%u", RDN_INDEX_CHILD, workid); @@ -2298,7 +2303,7 @@ _entryrdn_insert_key(backend *be, */ rc = _entryrdn_get_tombstone_elem(cursor, tmpsrdn, &key, childnrdn, &tmpelem); - if (rc || (NULL == tmpelem)) { + if (rc) { char *dn = NULL; slapi_rdn_get_dn(tmpsrdn, &dn); if (DB_NOTFOUND == rc) { @@ -2312,24 +2317,16 @@ _entryrdn_insert_key(backend *be, } slapi_ch_free_string(&dn); goto bail; - } else { - int tmpidx = slapi_rdn_get_prev_ext(srdn, rdnidx, - &childnrdn, FLAG_ALL_NRDNS); - if (childnrdn && - (0 == strncasecmp(childnrdn, SLAPI_ATTR_UNIQUEID, - sizeof(SLAPI_ATTR_UNIQUEID) - 1))) { - rdnidx = tmpidx; - } } /* Node is a tombstone. */ - slapi_ch_free((void **)&elem); - elem = tmpelem; - currid = id_stored_to_internal(elem->rdn_elem_id); - nrdn = childnrdn; - workid = currid; - slapi_ch_free((void **)&parentelem); - parentelem = elem; - elem = NULL; + if (tmpelem) { + currid = id_stored_to_internal(tmpelem->rdn_elem_id); + nrdn = childnrdn; + workid = currid; + slapi_ch_free((void **)&parentelem); + parentelem = tmpelem; + slapi_ch_free((void **)&elem); + } } } else { char *dn = NULL; @@ -2823,14 +2820,6 @@ _entryrdn_index_read(backend *be, slapi_rdn_free(&tmpsrdn); } goto bail; - } else { - int tmpidx = slapi_rdn_get_prev_ext(srdn, rdnidx, - &childnrdn, FLAG_ALL_NRDNS); - if (childnrdn && - (0 == strncasecmp(childnrdn, SLAPI_ATTR_UNIQUEID, - sizeof(SLAPI_ATTR_UNIQUEID) - 1))) { - rdnidx = tmpidx; - } } } else { slapi_ch_free((void **)&tmpelem); diff --git a/ldap/servers/slapd/back-ldbm/ldif2ldbm.c b/ldap/servers/slapd/back-ldbm/ldif2ldbm.c index 1e28070bd..77f7cc9b8 100644 --- a/ldap/servers/slapd/back-ldbm/ldif2ldbm.c +++ b/ldap/servers/slapd/back-ldbm/ldif2ldbm.c @@ -256,7 +256,9 @@ int add_op_attrs(Slapi_PBlock *pb, struct ldbminfo *li, struct backentry *ep, * If so, need to get the grandparent of the leaf. */ if (slapi_entry_flag_is_set(ep->ep_entry, - SLAPI_ENTRY_FLAG_TOMBSTONE)) { + SLAPI_ENTRY_FLAG_TOMBSTONE) && + (0 == strncasecmp(pdn, SLAPI_ATTR_UNIQUEID, + sizeof(SLAPI_ATTR_UNIQUEID) - 1))) { char *ppdn = slapi_dn_parent(pdn); slapi_ch_free_string(&pdn); if (NULL == ppdn) {
0
054849011dfed22c5b07e26225678e436232752f
389ds/389-ds-base
Ticket 49290 - unindexed range searches don't provide notes=U Bug Description: An unindexed range search would not issue notes=U in the access log. Fix Description: When an unindexed range search is found, add the operation note that the query is unindexed. https://pagure.io/389-ds-base/issue/49290 Author: wibrown Review by: tbordaz (Thanks!)
commit 054849011dfed22c5b07e26225678e436232752f Author: William Brown <[email protected]> Date: Fri Jun 23 16:04:08 2017 +1000 Ticket 49290 - unindexed range searches don't provide notes=U Bug Description: An unindexed range search would not issue notes=U in the access log. Fix Description: When an unindexed range search is found, add the operation note that the query is unindexed. https://pagure.io/389-ds-base/issue/49290 Author: wibrown Review by: tbordaz (Thanks!) diff --git a/dirsrvtests/tests/tickets/ticket49290_test.py b/dirsrvtests/tests/tickets/ticket49290_test.py new file mode 100644 index 000000000..72ba9f93f --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket49290_test.py @@ -0,0 +1,66 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2017 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# + +import pytest +import ldap + +from lib389.topologies import topology_st +from lib389._constants import DEFAULT_SUFFIX, DEFAULT_BENAME + +from lib389.backend import Backends + +def test_49290_range_unindexed_notes(topology_st): + """ + Ticket 49290 had a small collection of issues - the primary issue is + that range requests on an attribute that is unindexed was not reporting + notes=U. This asserts that: + + * When unindexed, the attr shows notes=U + * when indexed, the attr does not + """ + + # First, assert that modifyTimestamp does not have an index. If it does, + # delete it. + topology_st.standalone.config.set('nsslapd-accesslog-logbuffering', 'off') + backends = Backends(topology_st.standalone) + backend = backends.get(DEFAULT_BENAME) + indexes = backend.get_indexes() + + for i in indexes.list(): + i_cn = i.get_attr_val_utf8('cn') + if i_cn.lower() == 'modifytimestamp': + i.delete() + topology_st.standalone.restart() + + # Now restart the server, and perform a modifyTimestamp range operation. + # in access, we should see notes=U (or notes=A) + results = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(modifyTimestamp>=0)', ['nsUniqueId',]) + access_lines_unindexed = topology_st.standalone.ds_access_log.match('.*notes=U.*') + assert len(access_lines_unindexed) == 1 + + # Now add the modifyTimestamp index and run db2index. This will restart + # the server + indexes.create(properties={ + 'cn': 'modifytimestamp', + 'nsSystemIndex': 'false', + 'nsIndexType' : 'eq', + }) + topology_st.standalone.stop() + topology_st.standalone.db2index(DEFAULT_BENAME) + topology_st.standalone.start() + + # Now run the modifyTimestamp range query again. Assert that there is no + # notes=U/A in the log + results = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(modifyTimestamp>=0)', ['nsUniqueId',]) + access_lines_indexed = topology_st.standalone.ds_access_log.match('.*notes=U.*') + # Remove the old lines too. + access_lines_final = set(access_lines_unindexed) - set(access_lines_indexed) + # Make sure we have no unindexed notes in the log. + assert len(access_lines_final) == 0 + diff --git a/ldap/servers/slapd/back-ldbm/index.c b/ldap/servers/slapd/back-ldbm/index.c index a3dac6643..4d744843a 100644 --- a/ldap/servers/slapd/back-ldbm/index.c +++ b/ldap/servers/slapd/back-ldbm/index.c @@ -1298,6 +1298,13 @@ index_range_read_ext( slapi_log_err(SLAPI_LOG_ARGS, "index_range_read_ext", "indextype: \"%s\" indexmask: 0x%x\n", indextype, ai->ai_indexmask); if ( !is_indexed( indextype, ai->ai_indexmask, ai->ai_index_rules )) { + + /* Mark that the search has an unindexed component */ + uint32_t opnote = 0; + slapi_pblock_get( pb, SLAPI_OPERATION_NOTES, &opnote ); + opnote |= SLAPI_OP_NOTE_UNINDEXED; + slapi_pblock_set( pb, SLAPI_OPERATION_NOTES, &opnote ); + idl = idl_allids( be ); slapi_log_err(SLAPI_LOG_TRACE, "index_range_read_ext", "(%s,%s) %lu candidates (allids)\n",
0
888f0b217b3ac3099ba3ccc5002ef5198ddd664d
389ds/389-ds-base
Issue 50201 - nsIndexIDListScanLimit accepts any value Bug Description: Setting of nsIndexIDListScanLimit like 'limit=2 limit=3' are detected and logged in error logs. But the invalid value is successfully applied in the config entry and the operation itself is successful. The impact is limited because the index will be used following idlistscanlimit rather than invalid definition nsIndexIDListScanLimit. Fix Description: Print the errors to the user when he tries to add or to modify index config entry with malformed values. Change tests accordingly. https://pagure.io/389-ds-base/issue/50201 Reviewed by: mreynolds, tbordaz (Thanks!)
commit 888f0b217b3ac3099ba3ccc5002ef5198ddd664d Author: Simon Pichugin <[email protected]> Date: Tue Apr 28 23:44:20 2020 +0200 Issue 50201 - nsIndexIDListScanLimit accepts any value Bug Description: Setting of nsIndexIDListScanLimit like 'limit=2 limit=3' are detected and logged in error logs. But the invalid value is successfully applied in the config entry and the operation itself is successful. The impact is limited because the index will be used following idlistscanlimit rather than invalid definition nsIndexIDListScanLimit. Fix Description: Print the errors to the user when he tries to add or to modify index config entry with malformed values. Change tests accordingly. https://pagure.io/389-ds-base/issue/50201 Reviewed by: mreynolds, tbordaz (Thanks!) diff --git a/dirsrvtests/tests/suites/filter/filterscanlimit_test.py b/dirsrvtests/tests/suites/filter/filterscanlimit_test.py index dd9c6ee4e..0198f6533 100644 --- a/dirsrvtests/tests/suites/filter/filterscanlimit_test.py +++ b/dirsrvtests/tests/suites/filter/filterscanlimit_test.py @@ -11,6 +11,7 @@ This script will test different type of Filers. """ import os +import ldap import pytest from lib389._constants import DEFAULT_SUFFIX, PW_DM @@ -19,11 +20,10 @@ from lib389.idm.user import UserAccounts from lib389.idm.organizationalunit import OrganizationalUnits from lib389.index import Index from lib389.idm.account import Accounts -from lib389.idm.group import UniqueGroups, Group +from lib389.idm.group import UniqueGroups pytestmark = pytest.mark.tier1 - GIVEN_NAME = 'cn=givenname,cn=index,cn=userRoot,cn=ldbm database,cn=plugins,cn=config' CN_NAME = 'cn=sn,cn=index,cn=userRoot,cn=ldbm database,cn=plugins,cn=config' UNIQMEMBER = 'cn=uniquemember,cn=index,cn=userRoot,cn=ldbm database,cn=plugins,cn=config' @@ -39,7 +39,6 @@ LIST_OF_USER_ACCOUNTING = [ "Judy Wallace", "Marcus Ward", "Judy McFarland", - "Anuj Hall", "Gern Triplett", "Emanuel Johnson", "Brad Walker", @@ -57,7 +56,6 @@ LIST_OF_USER_ACCOUNTING = [ "Randy Ulrich", "Richard Francis", "Morgan White", - "Anuj Maddox", "Jody Jensen", "Mike Carter", "Gern Tyler", @@ -77,8 +75,6 @@ LIST_OF_USER_HUMAN = [ "Robert Daugherty", "Torrey Mason", "Brad Talbot", - "Anuj Jablonski", - "Harry Miller", "Jeffrey Campaigne", "Stephen Triplett", "John Falena", @@ -107,8 +103,7 @@ LIST_OF_USER_HUMAN = [ "Tobias Schmith", "Jon Goldstein", "Janet Lutz", - "Karl Cope", -] + "Karl Cope"] LIST_OF_USER_TESTING = [ "Andy Bergin", @@ -122,8 +117,7 @@ LIST_OF_USER_TESTING = [ "Alan White", "Daniel Ward", "Lee Stockton", - "Matthew Vaughan" -] + "Matthew Vaughan"] LIST_OF_USER_DEVELOPMENT = [ "Kelly Winters", @@ -143,7 +137,6 @@ LIST_OF_USER_DEVELOPMENT = [ "Timothy Kelly", "Sue Mason", "Chris Alexander", - "Anuj Jensen", "Martin Talbot", "Scott Farmer", "Allison Jensen", @@ -152,9 +145,7 @@ LIST_OF_USER_DEVELOPMENT = [ "Dan Langdon", "Ashley Knutson", "Jon Bourke", - "Pete Hunt", - -] + "Pete Hunt"] LIST_OF_USER_PAYROLL = [ "Ashley Chassin", @@ -164,12 +155,17 @@ LIST_OF_USER_PAYROLL = [ "Patricia Shelton", "Dietrich Swain", "Allison Hunter", - "Anne-Louise Barnes" + "Anne-Louise Barnes"] -] +LIST_OF_USER_PEOPLE = [ + 'Sam Carter', + 'Tom Morris', + 'Kevin Vaughan', + 'Rich Daugherty', + 'Harry Miller', + 'Sam Schmith'] [email protected](reason="https://pagure.io/389-ds-base/issue/50201") def test_invalid_configuration(topo): """" Error handling for invalid configuration @@ -190,10 +186,7 @@ def test_invalid_configuration(topo): 'limit=0 flags=AND flags=AND', 'limit=0 type=eq values=foo values=foo', 'limit=0 type=eq values=foo,foo', - 'limit=0 type=sub', - 'limit=0 type=eq values=notvalid', 'limit', - 'limit=0 type=eq values=notavaliddn', 'limit=0 type=pres values=bogus', 'limit=0 type=eq,sub values=bogus', 'limit=', @@ -203,7 +196,8 @@ def test_invalid_configuration(topo): 'limit=-2', 'type=eq', 'limit=0 type=bogus']: - Index(topo.standalone, GIVEN_NAME).replace('nsIndexIDListScanLimit', i) + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + Index(topo.standalone, GIVEN_NAME).replace('nsIndexIDListScanLimit', i) def test_idlistscanlimit(topo): @@ -247,28 +241,24 @@ def test_idlistscanlimit(topo): (LIST_OF_USER_HUMAN, users_human), (LIST_OF_USER_TESTING, users_testing), (LIST_OF_USER_DEVELOPMENT, users_development), - (LIST_OF_USER_PAYROLL, users_payroll)]: + (LIST_OF_USER_PAYROLL, users_payroll), + (LIST_OF_USER_PEOPLE, users_people)]: for demo1 in data[0]: + fn = demo1.split()[0] + sn = demo1.split()[1] + uid = ''.join([fn[:1], sn]).lower() data[1].create(properties={ - 'uid': demo1, + 'uid': uid, 'cn': demo1, - 'sn': demo1.split()[1], + 'sn': sn, 'uidNumber': str(1000), 'gidNumber': '2000', - 'homeDirectory': '/home/' + demo1, - 'givenname': demo1.split()[0], - 'userpassword': PW_DM + 'homeDirectory': f'/home/{uid}', + 'givenname': fn, + 'userpassword': PW_DM, + 'mail': f'{uid}@test.com' }) - users_people.create(properties={ - 'uid': 'scarter', - 'cn': 'Sam Carter', - 'sn': 'Carter', - 'uidNumber': str(1000), - 'gidNumber': '2000', - 'homeDirectory': '/home/' + 'scarter', - 'mail': '[email protected]', - }) try: # Change log levels errorlog_value = topo.standalone.config.get_attr_val_utf8('nsslapd-errorlog-level') @@ -297,16 +287,12 @@ def test_idlistscanlimit(topo): Index(topo.standalone, UNIQMEMBER).\ replace('nsIndexIDListScanLimit', - 'limit=0 type=eq values=uid=kvaughan,ou=People,' - 'dc=example,dc=com,uid=rdaugherty,ou=People,dc=example,dc=com') + 'limit=0 type=eq values=uid=kvaughan\2Cou=People\2Cdc=example\2Cdc=com,' + 'uid=rdaugherty\2Cou=People\2Cdc=example\2Cdc=com') Index(topo.standalone, OBJECTCLASS).\ replace('nsIndexIDListScanLimit', 'limit=0 type=eq flags=AND values=inetOrgPerson') - Index(topo.standalone, MAIL).\ - replace('nsIndexIDListScanLimit', - 'cn=mail,cn=index,cn=userRoot,cn=ldbm database,cn=plugins,cn=config') - # Search with filter for i in ['(sn=Lutz)', '(sn=*ter)', @@ -321,22 +307,24 @@ def test_idlistscanlimit(topo): '(&(sn=*)(cn=*))', '(sn=Hunter)', '(&(givenname=Richard)(objectclass=organizationalPerson))', - '(givenname=Anuj)', + '(givenname=Morgan)', '(&(givenname=*)(cn=*))', '(givenname=*)']: assert Accounts(topo.standalone, DEFAULT_SUFFIX).filter(f'{i}') - # Creating Group - Group(topo.standalone, 'cn=Accounting Managers,ou=groups,dc=example,dc=com').\ - add('uniquemember', + # Creating Groups and adding members + groups = UniqueGroups(topo.standalone, DEFAULT_SUFFIX) + accounting_managers = groups.ensure_state(properties={'cn': 'Accounting Managers'}) + hr_managers = groups.ensure_state(properties={'cn': 'HR Managers'}) + + accounting_managers.add('uniquemember', ['uid=scarter, ou=People, dc=example,dc=com', 'uid=tmorris, ou=People, dc=example,dc=com', 'uid=kvaughan, ou=People, dc=example,dc=com', 'uid=rdaugherty, ou=People, dc=example,dc=com', 'uid=hmiller, ou=People, dc=example,dc=com']) - Group(topo.standalone, 'cn=HR Managers,ou=groups,dc=example,dc=com').\ - add('uniquemember', + hr_managers.add('uniquemember', ['uid=kvaughan, ou=People, dc=example,dc=com', 'uid=cschmith, ou=People, dc=example,dc=com']) @@ -403,10 +391,9 @@ def test_idlistscanlimit(topo): '(&(sn=*)(cn=*))', '(sn=Hunter)', '(&(givenname=Richard)(objectclass=organizationalPerson))', - '(givenname=Anuj)', + '(givenname=Morgan)', '(&(givenname=*)(cn=*))', '(givenname=*)']: - assert Accounts(topo.standalone, DEFAULT_SUFFIX).filter(value) finally: diff --git a/ldap/servers/slapd/back-ldbm/instance.c b/ldap/servers/slapd/back-ldbm/instance.c index 04c28ff39..07655a8ec 100644 --- a/ldap/servers/slapd/back-ldbm/instance.c +++ b/ldap/servers/slapd/back-ldbm/instance.c @@ -231,7 +231,7 @@ ldbm_instance_create_default_indexes(backend *be) /* ldbm_instance_config_add_index_entry(inst, 2, argv); */ e = ldbm_instance_init_config_entry(LDBM_PSEUDO_ATTR_DEFAULT, "none", 0, 0, 0); - attr_index_config(be, "ldbm index init", 0, e, 1, 0); + attr_index_config(be, "ldbm index init", 0, e, 1, 0, NULL); slapi_entry_free(e); if (!entryrdn_get_noancestorid()) { @@ -240,7 +240,7 @@ ldbm_instance_create_default_indexes(backend *be) * but we still want to use the attr index file APIs. */ e = ldbm_instance_init_config_entry(LDBM_ANCESTORID_STR, "eq", 0, 0, 0); - attr_index_config(be, "ldbm index init", 0, e, 1, 0); + attr_index_config(be, "ldbm index init", 0, e, 1, 0, NULL); slapi_entry_free(e); } diff --git a/ldap/servers/slapd/back-ldbm/ldbm_attr.c b/ldap/servers/slapd/back-ldbm/ldbm_attr.c index b9e130d77..f0d418572 100644 --- a/ldap/servers/slapd/back-ldbm/ldbm_attr.c +++ b/ldap/servers/slapd/back-ldbm/ldbm_attr.c @@ -633,6 +633,18 @@ attr_index_idlistsize_config(Slapi_Entry *e, struct attrinfo *ai, char *returnte return rc; } +/* + * Function that process index attributes and modifies attrinfo structure + * + * Called while adding default indexes, during db2index execution and + * when we add/modify/delete index config entry + * + * If char *err_buf is not NULL, it will additionally print all error messages to STDERR + * It is used when we add/modify/delete index config entry, so the user would have a better verbose + * + * returns -1, 1 on a failure + * 0 on success + */ int attr_index_config( backend *be, @@ -640,7 +652,8 @@ attr_index_config( int lineno, Slapi_Entry *e, int init __attribute__((unused)), - int indextype_none) + int indextype_none, + char *err_buf) { ldbm_instance *inst = (ldbm_instance *)be->be_instance_info; int j = 0; @@ -662,6 +675,7 @@ attr_index_config( slapi_attr_first_value(attr, &sval); attrValue = slapi_value_get_berval(sval); } else { + slapi_create_errormsg(err_buf, SLAPI_DSE_RETURNTEXT_SIZE, "Error: missing indexing arguments\n"); slapi_log_err(SLAPI_LOG_ERR, "attr_index_config", "Missing indexing arguments\n"); return -1; } @@ -705,6 +719,10 @@ attr_index_config( } a->ai_indexmask = INDEX_OFFLINE; /* note that the index isn't available */ } else { + slapi_create_errormsg(err_buf, SLAPI_DSE_RETURNTEXT_SIZE, + "Error: %s: line %d: unknown index type \"%s\" (ignored) in entry (%s), " + "valid index types are \"pres\", \"eq\", \"approx\", or \"sub\"\n", + fname, lineno, attrValue->bv_val, slapi_entry_get_dn(e)); slapi_log_err(SLAPI_LOG_ERR, "attr_index_config", "%s: line %d: unknown index type \"%s\" (ignored) in entry (%s), " "valid index types are \"pres\", \"eq\", \"approx\", or \"sub\"\n", @@ -715,6 +733,7 @@ attr_index_config( } if (hasIndexType == 0) { /* indexType missing, error out */ + slapi_create_errormsg(err_buf, SLAPI_DSE_RETURNTEXT_SIZE, "Error: missing index type\n"); slapi_log_err(SLAPI_LOG_ERR, "attr_index_config", "Missing index type\n"); attrinfo_delete(&a); return -1; @@ -873,16 +892,26 @@ attr_index_config( slapi_ch_free((void **)&official_rules); } } - if ((return_value = attr_index_idlistsize_config(e, a, myreturntext))) { + slapi_create_errormsg(err_buf, SLAPI_DSE_RETURNTEXT_SIZE, + "Error: %s: Failed to parse idscanlimit info: %d:%s\n", + fname, return_value, myreturntext); slapi_log_err(SLAPI_LOG_ERR, "attr_index_config", "%s: Failed to parse idscanlimit info: %d:%s\n", fname, return_value, myreturntext); + if (err_buf != NULL) { + /* we are inside of a callback, we shouldn't allow malformed attributes in index entries */ + attrinfo_delete(&a); + return return_value; + } } /* initialize the IDL code's private data */ return_value = idl_init_private(be, a); if (0 != return_value) { /* fatal error, exit */ + slapi_create_errormsg(err_buf, SLAPI_DSE_RETURNTEXT_SIZE, + "Error: %s: line %d:Fatal Error: Failed to initialize attribute structure\n", + fname, lineno); slapi_log_err(SLAPI_LOG_CRIT, "attr_index_config", "%s: line %d:Fatal Error: Failed to initialize attribute structure\n", fname, lineno); diff --git a/ldap/servers/slapd/back-ldbm/ldbm_index_config.c b/ldap/servers/slapd/back-ldbm/ldbm_index_config.c index 45f0034f0..720f93036 100644 --- a/ldap/servers/slapd/back-ldbm/ldbm_index_config.c +++ b/ldap/servers/slapd/back-ldbm/ldbm_index_config.c @@ -25,26 +25,34 @@ int ldbm_instance_index_config_delete_callback(Slapi_PBlock *pb, Slapi_Entry *en #define INDEXTYPE_NONE 1 static int -ldbm_index_parse_entry(ldbm_instance *inst, Slapi_Entry *e, const char *trace_string, char **index_name) +ldbm_index_parse_entry(ldbm_instance *inst, Slapi_Entry *e, const char *trace_string, char **index_name, char *err_buf) { Slapi_Attr *attr; const struct berval *attrValue; Slapi_Value *sval; + char *edn = slapi_entry_get_dn(e); /* Get the name of the attribute to index which will be the value * of the cn attribute. */ if (slapi_entry_attr_find(e, "cn", &attr) != 0) { - slapi_log_err(SLAPI_LOG_ERR, "ldbm_index_parse_entry", "Malformed index entry %s\n", - slapi_entry_get_dn(e)); + slapi_create_errormsg(err_buf, SLAPI_DSE_RETURNTEXT_SIZE, + "Error: malformed index entry %s\n", + edn); + slapi_log_err(SLAPI_LOG_ERR, + "ldbm_index_parse_entry", "Malformed index entry %s\n", + edn); return LDAP_OPERATIONS_ERROR; } slapi_attr_first_value(attr, &sval); attrValue = slapi_value_get_berval(sval); if (NULL == attrValue->bv_val || 0 == attrValue->bv_len) { + slapi_create_errormsg(err_buf, SLAPI_DSE_RETURNTEXT_SIZE, + "Error: malformed index entry %s -- empty index name\n", + edn); slapi_log_err(SLAPI_LOG_ERR, "ldbm_index_parse_entry", "Malformed index entry %s -- empty index name\n", - slapi_entry_get_dn(e)); + edn); return LDAP_OPERATIONS_ERROR; } @@ -59,16 +67,19 @@ ldbm_index_parse_entry(ldbm_instance *inst, Slapi_Entry *e, const char *trace_st attrValue = slapi_value_get_berval(sval); if (NULL == attrValue->bv_val || attrValue->bv_len == 0) { /* missing the index type, error out */ - slapi_log_err(SLAPI_LOG_ERR, - "ldbm_index_parse_entry", "Malformed index entry %s -- empty nsIndexType\n", - slapi_entry_get_dn(e)); + slapi_create_errormsg(err_buf, SLAPI_DSE_RETURNTEXT_SIZE, + "Error: malformed index entry %s -- empty nsIndexType\n", + edn); + slapi_log_err(SLAPI_LOG_ERR, "ldbm_index_parse_entry", + "Malformed index entry %s -- empty nsIndexType\n", + edn); slapi_ch_free_string(index_name); return LDAP_OPERATIONS_ERROR; } } /* ok the entry is good to process, pass it to attr_index_config */ - if (attr_index_config(inst->inst_be, (char *)trace_string, 0, e, 0, 0)) { + if (attr_index_config(inst->inst_be, (char *)trace_string, 0, e, 0, 0, err_buf)) { slapi_ch_free_string(index_name); return LDAP_OPERATIONS_ERROR; } @@ -92,7 +103,7 @@ ldbm_index_init_entry_callback(Slapi_PBlock *pb __attribute__((unused)), ldbm_instance *inst = (ldbm_instance *)arg; returntext[0] = '\0'; - *returncode = ldbm_index_parse_entry(inst, e, "from ldbm instance init", NULL); + *returncode = ldbm_index_parse_entry(inst, e, "from ldbm instance init", NULL, NULL); if (*returncode == LDAP_SUCCESS) { return SLAPI_DSE_CALLBACK_OK; } else { @@ -117,7 +128,7 @@ ldbm_instance_index_config_add_callback(Slapi_PBlock *pb __attribute__((unused)) char *index_name = NULL; returntext[0] = '\0'; - *returncode = ldbm_index_parse_entry(inst, e, "from DSE add", &index_name); + *returncode = ldbm_index_parse_entry(inst, e, "from DSE add", &index_name, returntext); if (*returncode == LDAP_SUCCESS) { struct attrinfo *ai = NULL; /* if the index is a "system" index, we assume it's being added by @@ -179,7 +190,7 @@ ldbm_instance_index_config_delete_callback(Slapi_PBlock *pb, slapi_attr_first_value(attr, &sval); attrValue = slapi_value_get_berval(sval); - attr_index_config(inst->inst_be, "From DSE delete", 0, e, 0, INDEXTYPE_NONE); + attr_index_config(inst->inst_be, "From DSE delete", 0, e, 0, INDEXTYPE_NONE, returntext); ainfo_get(inst->inst_be, attrValue->bv_val, &ainfo); if (NULL == ainfo) { @@ -213,14 +224,19 @@ ldbm_instance_index_config_modify_callback(Slapi_PBlock *pb __attribute__((unuse Slapi_Value *sval; const struct berval *attrValue; struct attrinfo *ainfo = NULL; + char *edn = slapi_entry_get_dn(e); + char *edn_after = slapi_entry_get_dn(entryAfter); returntext[0] = '\0'; *returncode = LDAP_SUCCESS; if (slapi_entry_attr_find(entryAfter, "cn", &attr) != 0) { + slapi_create_errormsg(returntext, SLAPI_DSE_RETURNTEXT_SIZE, + "Error: malformed index entry %s - missing cn attribute\n", + edn_after); slapi_log_err(SLAPI_LOG_ERR, "ldbm_instance_index_config_modify_callback", "Malformed index entry %s - missing cn attribute\n", - slapi_entry_get_dn(entryAfter)); + edn_after); *returncode = LDAP_OBJECT_CLASS_VIOLATION; return SLAPI_DSE_CALLBACK_ERROR; } @@ -228,31 +244,40 @@ ldbm_instance_index_config_modify_callback(Slapi_PBlock *pb __attribute__((unuse attrValue = slapi_value_get_berval(sval); if (NULL == attrValue->bv_val || 0 == attrValue->bv_len) { + slapi_create_errormsg(returntext, SLAPI_DSE_RETURNTEXT_SIZE, + "Error: malformed index entry %s - missing index name\n", + edn); slapi_log_err(SLAPI_LOG_ERR, "ldbm_instance_index_config_modify_callback", "Malformed index entry %s, missing index name\n", - slapi_entry_get_dn(e)); + edn); *returncode = LDAP_UNWILLING_TO_PERFORM; return SLAPI_DSE_CALLBACK_ERROR; } ainfo_get(inst->inst_be, attrValue->bv_val, &ainfo); if (NULL == ainfo) { + slapi_create_errormsg(returntext, SLAPI_DSE_RETURNTEXT_SIZE, + "Error: malformed index entry %s - missing cn attribute info\n", + edn); slapi_log_err(SLAPI_LOG_ERR, "ldbm_instance_index_config_modify_callback", "Malformed index entry %s - missing cn attribute info\n", - slapi_entry_get_dn(e)); + edn); *returncode = LDAP_UNWILLING_TO_PERFORM; return SLAPI_DSE_CALLBACK_ERROR; } if (slapi_entry_attr_find(entryAfter, "nsIndexType", &attr) != 0) { + slapi_create_errormsg(returntext, SLAPI_DSE_RETURNTEXT_SIZE, + "Error: malformed index entry %s - missing nsIndexType attribute\n", + edn_after); slapi_log_err(SLAPI_LOG_ERR, "ldbm_instance_index_config_modify_callback", "Malformed index entry %s - missing nsIndexType attribute\n", - slapi_entry_get_dn(entryAfter)); + edn_after); *returncode = LDAP_OBJECT_CLASS_VIOLATION; return SLAPI_DSE_CALLBACK_ERROR; } - if (attr_index_config(inst->inst_be, "from DSE modify", 0, entryAfter, 0, 0)) { + if (attr_index_config(inst->inst_be, "from DSE modify", 0, entryAfter, 0, 0, returntext)) { *returncode = LDAP_UNWILLING_TO_PERFORM; return SLAPI_DSE_CALLBACK_ERROR; } @@ -364,7 +389,7 @@ ldbm_instance_index_config_enable_index(ldbm_instance *inst, Slapi_Entry *e) ainfo_get(inst->inst_be, index_name, &ai); } if (!ai) { - rc = ldbm_index_parse_entry(inst, e, "from DSE add", &index_name); + rc = ldbm_index_parse_entry(inst, e, "from DSE add", &index_name, NULL); } if (rc == LDAP_SUCCESS) { /* Assume the caller knows if it is OK to go online immediately */ diff --git a/ldap/servers/slapd/back-ldbm/ldif2ldbm.c b/ldap/servers/slapd/back-ldbm/ldif2ldbm.c index 9d82c8228..f2ef5ecd4 100644 --- a/ldap/servers/slapd/back-ldbm/ldif2ldbm.c +++ b/ldap/servers/slapd/back-ldbm/ldif2ldbm.c @@ -291,7 +291,7 @@ db2index_add_indexed_attr(backend *be, char *attrString) } } - attr_index_config(be, "from db2index()", 0, e, 0, 0); + attr_index_config(be, "from db2index()", 0, e, 0, 0, NULL); slapi_entry_free(e); return (0); diff --git a/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h b/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h index 9a86c752b..a07acee5e 100644 --- a/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h +++ b/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h @@ -24,7 +24,7 @@ void attrinfo_delete(struct attrinfo **pp); void ainfo_get(backend *be, char *type, struct attrinfo **at); void attr_masks(backend *be, char *type, int *indexmask, int *syntaxmask); void attr_masks_ex(backend *be, char *type, int *indexmask, int *syntaxmask, struct attrinfo **at); -int attr_index_config(backend *be, char *fname, int lineno, Slapi_Entry *e, int init, int none); +int attr_index_config(backend *be, char *fname, int lineno, Slapi_Entry *e, int init, int none, char *err_buf); int db2index_add_indexed_attr(backend *be, char *attrString); int ldbm_compute_init(void); void attrinfo_deletetree(ldbm_instance *inst);
0
d1b2a6e5f80ccc9a3abdfad088cf51f7dd860136
389ds/389-ds-base
Issue 6761 - Password modify extended operation should skip password policy checks when executed by root DN Description: When the LDAP password policy extended operation is executed by root DN on a regular user under constraints of a password policy, eg password history, it should skip password policy checks because the root DN should be allowed to set a regular user password to any value however this is currently not the case. The fix is to check if the bind is a pwd admin and set SLAPI_REQUESTOR_ISROOT for the new internal operatiin that will update the password. This does not bypass aci evalaution. relates: https://github.com/389ds/389-ds-base/issues/6761 reviewed by: tbordaz & spichugi(Thanks!)
commit d1b2a6e5f80ccc9a3abdfad088cf51f7dd860136 Author: Mark Reynolds <[email protected]> Date: Thu May 1 09:44:33 2025 -0400 Issue 6761 - Password modify extended operation should skip password policy checks when executed by root DN Description: When the LDAP password policy extended operation is executed by root DN on a regular user under constraints of a password policy, eg password history, it should skip password policy checks because the root DN should be allowed to set a regular user password to any value however this is currently not the case. The fix is to check if the bind is a pwd admin and set SLAPI_REQUESTOR_ISROOT for the new internal operatiin that will update the password. This does not bypass aci evalaution. relates: https://github.com/389ds/389-ds-base/issues/6761 reviewed by: tbordaz & spichugi(Thanks!) diff --git a/dirsrvtests/tests/suites/password/pwdAdmin_test.py b/dirsrvtests/tests/suites/password/pwdAdmin_test.py index 3a2924b79..bb8d930c1 100644 --- a/dirsrvtests/tests/suites/password/pwdAdmin_test.py +++ b/dirsrvtests/tests/suites/password/pwdAdmin_test.py @@ -6,7 +6,9 @@ # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # +import os import pytest +import time from lib389.tasks import * from lib389.utils import * from lib389.topologies import topology_st @@ -14,8 +16,7 @@ from lib389.topologies import topology_st from lib389.idm.user import UserAccounts from lib389.idm.group import Groups from lib389.idm.domain import Domain - -from lib389._constants import SUFFIX, DN_DM, PASSWORD, DEFAULT_SUFFIX +from lib389._constants import SUFFIX, DEFAULT_SUFFIX pytestmark = pytest.mark.tier1 @@ -436,6 +437,42 @@ def test_pwd_admin_config_test_skip_updates(topology_st, password_policy): assert not found +def test_pwd_admin_extended_op(topology_st, password_policy): + """Test RootDN can bypass passwsord policy via extended op password modify + + :id: 687fec1d-1081-4012-a2c4-41b427b9e764 + + :setup: Standalone instance + :steps: + 1. Add test entry + 2. Run passwd_s (extended op) twice as root DN + + :expectedresults: + 1. Success + 2. Success + """ + + inst = topology_st.standalone + inst.enable_tls() + + users = UserAccounts(inst, DEFAULT_SUFFIX) + user = users.create(properties={ + 'uid': 'extop', + 'cn': 'extop', + 'sn': 'extop', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/extop', + 'userPassword': "password" + }) + user.replace('userpassword', 'password2') + + inst.passwd_s(user.dn, 'password2', 'password') + inst.passwd_s(user.dn, 'password', 'password2') + + user.delete() + + if __name__ == '__main__': # Run isolated # -s for DEBUG mode diff --git a/dirsrvtests/tests/suites/password/pwdModify_test.py b/dirsrvtests/tests/suites/password/pwdModify_test.py index 9e32823c7..c0e29f77d 100644 --- a/dirsrvtests/tests/suites/password/pwdModify_test.py +++ b/dirsrvtests/tests/suites/password/pwdModify_test.py @@ -153,21 +153,16 @@ def test_pwd_modify_with_different_operation(topo): with pytest.raises(ldap.INSUFFICIENT_ACCESS): topo.standalone.simple_bind_s(user.dn, NEW_PASSWD) assert topo.standalone.passwd_s(user_2.dn, None, NEW_PASSWD) - log.info("Directory Manager attempts to change password for testuser2(userPassword attribute is Undefined)") - topo.standalone.simple_bind_s(DN_DM, PASSWORD) - assert topo.standalone.passwd_s(user_2.dn, None, OLD_PASSWD) - log.info("Create a password syntax policy. Attempt to change to password that violates that policy") - topo.standalone.config.set('PasswordCheckSyntax', 'on') - with pytest.raises(ldap.CONSTRAINT_VIOLATION): - assert topo.standalone.passwd_s(user_2.dn, OLD_PASSWD, SHORT_PASSWD) + log.info("Reset password syntax policy") + topo.standalone.simple_bind_s(DN_DM, PASSWORD) topo.standalone.config.set('PasswordCheckSyntax', 'off') log.info("userPassword mod with control results in ber decode error") - topo.standalone.simple_bind_s(DN_DM, PASSWORD) assert topo.standalone.modify_ext_s(user.dn, [(ldap.MOD_REPLACE, 'userpassword', b'abcdefg')], serverctrls=[LDAPControl('2.16.840.1.113730.3.4.2', 1, None)]) log.info("Reseting the testuser's password") - topo.standalone.passwd_s(user.dn, 'abcdefg', NEW_PASSWD) + topo.standalone.passwd_s(user.dn, 'abcdefg', OLD_PASSWD) + topo.standalone.passwd_s(user_2.dn, None, OLD_PASSWD) def test_pwd_modify_with_password_policy(topo, pwd_policy_setup): @@ -193,16 +188,17 @@ def test_pwd_modify_with_password_policy(topo, pwd_policy_setup): topo.standalone.passwd_s(user_2.dn, NEW_PASSWD, OLD_PASSWD) regex = re.search('Z(.+)', user_2.get_attr_val_utf8('passwordhistory')) assert NEW_PASSWD == regex.group(1) + log.info("Try changing password to one stored in history. Should fail") + assert topo.standalone.simple_bind_s(user_2.dn, OLD_PASSWD) with pytest.raises(ldap.CONSTRAINT_VIOLATION): assert topo.standalone.passwd_s(user_2.dn, OLD_PASSWD, NEW_PASSWD) log.info("Change the password several times in a row, and try binding after each change") - topo.standalone.passwd_s(user.dn, NEW_PASSWD, OLD_PASSWD) assert topo.standalone.simple_bind_s(user.dn, OLD_PASSWD) - topo.standalone.passwd_s(user.dn, OLD_PASSWD, SHORT_PASSWD) - assert topo.standalone.simple_bind_s(user.dn, SHORT_PASSWD) + topo.standalone.passwd_s(user.dn, OLD_PASSWD, NEW_PASSWD) + topo.standalone.passwd_s(user.dn, NEW_PASSWD, SHORT_PASSWD) with pytest.raises(ldap.CONSTRAINT_VIOLATION): - topo.standalone.passwd_s(user.dn, SHORT_PASSWD, OLD_PASSWD) + topo.standalone.passwd_s(user.dn, SHORT_PASSWD, NEW_PASSWD) def test_pwd_modify_with_subsuffix(topo): diff --git a/ldap/servers/slapd/modify.c b/ldap/servers/slapd/modify.c index 36dd1fbfb..0a351d46a 100644 --- a/ldap/servers/slapd/modify.c +++ b/ldap/servers/slapd/modify.c @@ -1374,6 +1374,15 @@ op_shared_allow_pw_change(Slapi_PBlock *pb, LDAPMod *mod, char **old_pw, Slapi_M rc = -1; goto done; } + } else if (pw_is_pwp_admin(pb, pwpolicy, PWP_ADMIN_OR_ROOTDN)) { + /* This is an internal operation, but we still need to check if this + * is a password admin */ + if (!SLAPI_IS_MOD_DELETE(mod->mod_op) && pwpolicy->pw_history) { + /* Updating pw history, get the old password */ + get_old_pw(pb, &sdn, old_pw); + } + rc = 1; + goto done; } /* check if password is within password minimum age; diff --git a/ldap/servers/slapd/passwd_extop.c b/ldap/servers/slapd/passwd_extop.c index f758ac018..4bb60afd6 100644 --- a/ldap/servers/slapd/passwd_extop.c +++ b/ldap/servers/slapd/passwd_extop.c @@ -120,6 +120,7 @@ passwd_apply_mods(Slapi_PBlock *pb_orig, const Slapi_DN *sdn, Slapi_Mods *mods, { LDAPControl **req_controls_copy = NULL; LDAPControl **pb_resp_controls = NULL; + passwdPolicy *pwpolicy = NULL; int ret = 0; slapi_log_err(SLAPI_LOG_TRACE, "passwd_apply_mods", "=>\n"); @@ -132,6 +133,14 @@ passwd_apply_mods(Slapi_PBlock *pb_orig, const Slapi_DN *sdn, Slapi_Mods *mods, } Slapi_PBlock *pb = slapi_pblock_new(); + pwpolicy = new_passwdPolicy(pb_orig, slapi_sdn_get_ndn(sdn)); + if (pw_is_pwp_admin(pb_orig, pwpolicy, PWP_ADMIN_OR_ROOTDN)) { + /* If this is root DN or password admin set is_requestor_root so + * set this in the new pblock so password updates are correctly + * applied */ + int is_requestor_root = 1; + slapi_pblock_set(pb, SLAPI_REQUESTOR_ISROOT, &is_requestor_root); + } slapi_modify_internal_set_pb_ext(pb, sdn, slapi_mods_get_ldapmods_byref(mods), req_controls_copy, NULL, /* UniqueID */ @@ -564,7 +573,6 @@ passwd_modify_extop(Slapi_PBlock *pb) tag = ber_peek_tag(ber, &len); } - /* identify userID field by tags */ if (tag == LDAP_EXTOP_PASSMOD_TAG_USERID) { rc = 0;
0
b110f3bf60b676468f8add11556e3d517c73f127
389ds/389-ds-base
Issue 5640 - Update logconv for new logging format Description: Some of the "closed" messages have been replaced by "disconnect" The tool needs to handles these changes relates: https://github.com/389ds/389-ds-base/issues/5640 Reviewed by: spichugi(Thanks!)
commit b110f3bf60b676468f8add11556e3d517c73f127 Author: Mark Reynolds <[email protected]> Date: Mon Feb 20 13:17:30 2023 -0500 Issue 5640 - Update logconv for new logging format Description: Some of the "closed" messages have been replaced by "disconnect" The tool needs to handles these changes relates: https://github.com/389ds/389-ds-base/issues/5640 Reviewed by: spichugi(Thanks!) diff --git a/ldap/admin/src/logconv.pl b/ldap/admin/src/logconv.pl index a048fe609..5ba91e99c 100755 --- a/ldap/admin/src/logconv.pl +++ b/ldap/admin/src/logconv.pl @@ -2249,7 +2249,7 @@ sub parseLineNormal $fdTaken++; if ($1 > $highestFdTaken){ $highestFdTaken = $1; } } - if (m/ fd=/ && m/closed/i){ + if (m/ fd=/ && m/ (closed|disconnect) /i){ ($connID) = $_ =~ /conn=(\d*)\s/; handleConnClose($connID); } @@ -2362,7 +2362,7 @@ sub parseLineNormal } $isVlvNotes = 0; } - if (m/ closed error 32/i){ + if (m/ (closed|disconnect) error 32/i){ $brokenPipeCount++; if (m/- T1/){ $hashes->{rc}->{"T1"}++; } elsif (m/- T2/){ $hashes->{rc}->{"T2"}++; } @@ -2377,7 +2377,7 @@ sub parseLineNormal elsif (m/- U1/){ $hashes->{rc}->{"U1"}++; } else { $hashes->{rc}->{"other"}++; } } - if (m/ closed error 131/i || m/ closed error -5961/i){ + if (m/ (closed|disconnect) error 131/i || m/ (closed|disconnect) error -5961/i){ $connResetByPeerCount++; if (m/- T1/){ $hashes->{src}->{"T1"}++; } elsif (m/- T2/){ $hashes->{src}->{"T2"}++; } @@ -2392,7 +2392,7 @@ sub parseLineNormal elsif (m/- U1/){ $hashes->{src}->{"U1"}++; } else { $hashes->{src}->{"other"}++; } } - if (m/ closed error 11/i){ + if (m/ (closed|disconnect) error 11/i){ $resourceUnavailCount++; if (m/- T1/){ $hashes->{rsrc}->{"T1"}++; } elsif (m/- T2/){ $hashes->{rsrc}->{"T2"}++; }
0
1370c060cc834212acc4304d87a73c167b6bb296
389ds/389-ds-base
Issue 3555 - UI - Fix audit issue with npm - babel/traverse (#5959) Description: Run npm audit fix to address the vulnerability in babel/traverse. Relates: https://github.com/389ds/389-ds-base/issues/3555 Reviewed by: @progier389 (Thanks!)
commit 1370c060cc834212acc4304d87a73c167b6bb296 Author: Simon Pichugin <[email protected]> Date: Wed Oct 18 09:04:59 2023 -0700 Issue 3555 - UI - Fix audit issue with npm - babel/traverse (#5959) Description: Run npm audit fix to address the vulnerability in babel/traverse. Relates: https://github.com/389ds/389-ds-base/issues/3555 Reviewed by: @progier389 (Thanks!) diff --git a/src/cockpit/389-console/package-lock.json b/src/cockpit/389-console/package-lock.json index b3cc90f60..06d6da308 100644 --- a/src/cockpit/389-console/package-lock.json +++ b/src/cockpit/389-console/package-lock.json @@ -79,12 +79,13 @@ } }, "node_modules/@babel/code-frame": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.22.5.tgz", - "integrity": "sha512-Xmwn266vad+6DAqEB2A6V/CcZVp62BbwVmcOJc2RPuwih1kw02TjQvWVWlcKGbBPd+8/0V5DEkOcizRGYsspYQ==", + "version": "7.22.13", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.22.13.tgz", + "integrity": "sha512-XktuhWlJ5g+3TJXc5upd9Ks1HutSArik6jf2eAjYFyIOf4ej3RN+184cZbzDvbPnuTJIUhPKKJE3cIsYTiAT3w==", "dev": true, "dependencies": { - "@babel/highlight": "^7.22.5" + "@babel/highlight": "^7.22.13", + "chalk": "^2.4.2" }, "engines": { "node": ">=6.9.0" @@ -132,13 +133,13 @@ } }, "node_modules/@babel/generator": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.22.5.tgz", - "integrity": "sha512-+lcUbnTRhd0jOewtFSedLyiPsD5tswKkbgcezOqqWFUVNEwoUTlpPOBmvhG7OXWLR4jMdv0czPGH5XbflnD1EA==", + "version": "7.23.0", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.23.0.tgz", + "integrity": "sha512-lN85QRR+5IbYrMWM6Y4pE/noaQtg4pNiqeNGX60eqOfo6gtEj6uw/JagelB8vVztSd7R6M5n1+PQkDbHbBRU4g==", "dev": true, "peer": true, "dependencies": { - "@babel/types": "^7.22.5", + "@babel/types": "^7.23.0", "@jridgewell/gen-mapping": "^0.3.2", "@jridgewell/trace-mapping": "^0.3.17", "jsesc": "^2.5.1" @@ -181,9 +182,9 @@ } }, "node_modules/@babel/helper-environment-visitor": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/helper-environment-visitor/-/helper-environment-visitor-7.22.5.tgz", - "integrity": "sha512-XGmhECfVA/5sAt+H+xpSg0mfrHq6FzNr9Oxh7PSEBBRUb/mL7Kz3NICXb194rCqAEdxkhPT1a88teizAFyvk8Q==", + "version": "7.22.20", + "resolved": "https://registry.npmjs.org/@babel/helper-environment-visitor/-/helper-environment-visitor-7.22.20.tgz", + "integrity": "sha512-zfedSIzFhat/gFhWfHtgWvlec0nqB9YEIVrpuwjruLlXfUSnA8cJB0miHKwqDnQ7d32aKo2xt88/xZptwxbfhA==", "dev": true, "peer": true, "engines": { @@ -191,14 +192,14 @@ } }, "node_modules/@babel/helper-function-name": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.22.5.tgz", - "integrity": "sha512-wtHSq6jMRE3uF2otvfuD3DIvVhOsSNshQl0Qrd7qC9oQJzHvOL4qQXlQn2916+CXGywIjpGuIkoyZRRxHPiNQQ==", + "version": "7.23.0", + "resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.23.0.tgz", + "integrity": "sha512-OErEqsrxjZTJciZ4Oo+eoZqeW9UIiOcuYKRJA4ZAgV9myA+pOXhhmpfNCKjEH/auVfEYVFJ6y1Tc4r0eIApqiw==", "dev": true, "peer": true, "dependencies": { - "@babel/template": "^7.22.5", - "@babel/types": "^7.22.5" + "@babel/template": "^7.22.15", + "@babel/types": "^7.23.0" }, "engines": { "node": ">=6.9.0" @@ -274,9 +275,9 @@ } }, "node_modules/@babel/helper-split-export-declaration": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.22.5.tgz", - "integrity": "sha512-thqK5QFghPKWLhAV321lxF95yCg2K3Ob5yw+M3VHWfdia0IkPXUtoLH8x/6Fh486QUvzhb8YOWHChTVen2/PoQ==", + "version": "7.22.6", + "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.22.6.tgz", + "integrity": "sha512-AsUnxuLhRYsisFiaJwvp1QF+I3KjD5FOxut14q/GzovUe6orHLesW2C7d754kRm53h5gqrz6sFl6sxc4BVtE/g==", "dev": true, "peer": true, "dependencies": { @@ -297,9 +298,9 @@ } }, "node_modules/@babel/helper-validator-identifier": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.22.5.tgz", - "integrity": "sha512-aJXu+6lErq8ltp+JhkJUfk1MTGyuA4v7f3pA+BJ5HLfNC6nAQ0Cpi9uOquUj8Hehg0aUiHzWQbOVJGao6ztBAQ==", + "version": "7.22.20", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.22.20.tgz", + "integrity": "sha512-Y4OZ+ytlatR8AI+8KZfKuL5urKp7qey08ha31L8b3BwewJAoJamTzyvxPR/5D+KkdJCGPq/+8TukHBlY10FX9A==", "dev": true, "engines": { "node": ">=6.9.0" @@ -331,13 +332,13 @@ } }, "node_modules/@babel/highlight": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.22.5.tgz", - "integrity": "sha512-BSKlD1hgnedS5XRnGOljZawtag7H1yPfQp0tdNJCHoH6AZ+Pcm9VvkrK59/Yy593Ypg0zMxH2BxD1VPYUQ7UIw==", + "version": "7.22.20", + "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.22.20.tgz", + "integrity": "sha512-dkdMCN3py0+ksCgYmGG8jKeGA/8Tk+gJwSYYlFGxG5lmhfKNoAy004YpLxpS1W2J8m/EK2Ew+yOs9pVRwO89mg==", "dev": true, "dependencies": { - "@babel/helper-validator-identifier": "^7.22.5", - "chalk": "^2.0.0", + "@babel/helper-validator-identifier": "^7.22.20", + "chalk": "^2.4.2", "js-tokens": "^4.0.0" }, "engines": { @@ -345,9 +346,9 @@ } }, "node_modules/@babel/parser": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.22.5.tgz", - "integrity": "sha512-DFZMC9LJUG9PLOclRC32G63UXwzqS2koQC8dkx+PLdmt1xSePYpbT/NbsrJy8Q/muXz7o/h/d4A7Fuyixm559Q==", + "version": "7.23.0", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.23.0.tgz", + "integrity": "sha512-vvPKKdMemU85V9WE/l5wZEmImpCtLqbnTvqDS2U1fJ96KrxoW7KrXhNsNCblQlg8Ck4b85yxdTyelsMUgFUXiw==", "dev": true, "peer": true, "bin": { @@ -410,35 +411,35 @@ } }, "node_modules/@babel/template": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.22.5.tgz", - "integrity": "sha512-X7yV7eiwAxdj9k94NEylvbVHLiVG1nvzCV2EAowhxLTwODV1jl9UzZ48leOC0sH7OnuHrIkllaBgneUykIcZaw==", + "version": "7.22.15", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.22.15.tgz", + "integrity": "sha512-QPErUVm4uyJa60rkI73qneDacvdvzxshT3kksGqlGWYdOTIUOwJ7RDUL8sGqslY1uXWSL6xMFKEXDS3ox2uF0w==", "dev": true, "peer": true, "dependencies": { - "@babel/code-frame": "^7.22.5", - "@babel/parser": "^7.22.5", - "@babel/types": "^7.22.5" + "@babel/code-frame": "^7.22.13", + "@babel/parser": "^7.22.15", + "@babel/types": "^7.22.15" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/traverse": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.22.5.tgz", - "integrity": "sha512-7DuIjPgERaNo6r+PZwItpjCZEa5vyw4eJGufeLxrPdBXBoLcCJCIasvK6pK/9DVNrLZTLFhUGqaC6X/PA007TQ==", + "version": "7.23.2", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.23.2.tgz", + "integrity": "sha512-azpe59SQ48qG6nu2CzcMLbxUudtN+dOM9kDbUqGq3HXUJRlo7i8fvPoxQUzYgLZ4cMVmuZgm8vvBpNeRhd6XSw==", "dev": true, "peer": true, "dependencies": { - "@babel/code-frame": "^7.22.5", - "@babel/generator": "^7.22.5", - "@babel/helper-environment-visitor": "^7.22.5", - "@babel/helper-function-name": "^7.22.5", + "@babel/code-frame": "^7.22.13", + "@babel/generator": "^7.23.0", + "@babel/helper-environment-visitor": "^7.22.20", + "@babel/helper-function-name": "^7.23.0", "@babel/helper-hoist-variables": "^7.22.5", - "@babel/helper-split-export-declaration": "^7.22.5", - "@babel/parser": "^7.22.5", - "@babel/types": "^7.22.5", + "@babel/helper-split-export-declaration": "^7.22.6", + "@babel/parser": "^7.23.0", + "@babel/types": "^7.23.0", "debug": "^4.1.0", "globals": "^11.1.0" }, @@ -447,14 +448,14 @@ } }, "node_modules/@babel/types": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.22.5.tgz", - "integrity": "sha512-zo3MIHGOkPOfoRXitsgHLjEXmlDaD/5KU1Uzuc9GNiZPhSqVxVRtxuPaSBZDsYZ9qV88AjtMtWW7ww98loJ9KA==", + "version": "7.23.0", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.23.0.tgz", + "integrity": "sha512-0oIyUfKoI3mSqMvsxBdclDwxXKXAUA8v/apZbc+iSyARYou1o8ZGDxbUYyLFoW2arqS2jDGqJuZvv1d/io1axg==", "dev": true, "peer": true, "dependencies": { "@babel/helper-string-parser": "^7.22.5", - "@babel/helper-validator-identifier": "^7.22.5", + "@babel/helper-validator-identifier": "^7.22.20", "to-fast-properties": "^2.0.0" }, "engines": {
0
8fe7586640565b5d7866e1482a37f9a5db1a7583
389ds/389-ds-base
Issue 6068 - Add dscontainer stop function Bug Description: There currently is not a stop function in dscontainer. It would be nice to have for use cases such as testing/debugging, plus custom container setups run during the Docker build in which dscontainer is started to do some custom configs, then later a stop function would be nice to gracefully stop dscontainer. Discussed in https://github.com/389ds/389-ds-base/discussions/6058. Fix Description: A simple stop() function added to dscontainer that gracefully stops the ns-slapd process. Fixes: https://github.com/389ds/389-ds-base/issues/6068 Co-authored-by: Viktor Ashirov <[email protected]>
commit 8fe7586640565b5d7866e1482a37f9a5db1a7583 Author: Ryan Slominski <[email protected]> Date: Wed Jan 31 09:41:19 2024 -0500 Issue 6068 - Add dscontainer stop function Bug Description: There currently is not a stop function in dscontainer. It would be nice to have for use cases such as testing/debugging, plus custom container setups run during the Docker build in which dscontainer is started to do some custom configs, then later a stop function would be nice to gracefully stop dscontainer. Discussed in https://github.com/389ds/389-ds-base/discussions/6058. Fix Description: A simple stop() function added to dscontainer that gracefully stops the ns-slapd process. Fixes: https://github.com/389ds/389-ds-base/issues/6068 Co-authored-by: Viktor Ashirov <[email protected]> diff --git a/src/lib389/cli/dscontainer b/src/lib389/cli/dscontainer index 1fa3d23b7..688121c99 100755 --- a/src/lib389/cli/dscontainer +++ b/src/lib389/cli/dscontainer @@ -34,8 +34,7 @@ import subprocess import argparse, argcomplete from argparse import RawTextHelpFormatter - -from lib389 import DirSrv +from lib389 import DirSrv, pid_exists, pid_from_file from lib389.cli_base import setup_script_logger from lib389.instance.setup import SetupDs from lib389.instance.options import General2Base, Slapd2Base @@ -58,6 +57,9 @@ from lib389.idm.directorymanager import DirectoryManager # is always available! log = setup_script_logger("container-init", True) +# PID FILE +PID_FILE = "/data/run/slapd-localhost.pid" + # Handle any dead child process signals we receive. Wait for them to terminate, or # if they are not found, move on. @@ -337,9 +339,7 @@ binddn = cn=Directory Manager ds_proc = subprocess.Popen([ "%s/ns-slapd" % paths.sbin_dir, "-D", paths.config_dir, - # This container version doesn't actually use or need the pidfile to track - # the process. - # "-i", "/data/run/slapd-localhost.pid", + "-i", PID_FILE, "-d", loglevel, ], stdout=None, stderr=None, env=os.environ.copy()) @@ -425,6 +425,18 @@ def begin_healthcheck(ds_proc, log_exception): return (True, False) +def stop(): + stop_timeout = os.getenv("DS_STOP_TIMEOUT", 60) + count = int(stop_timeout) + pid = pid_from_file(PID_FILE) + os.kill(pid, signal.SIGTERM) + while pid_exists(pid) and count > 0: + time.sleep(1) + count -= 1 + if pid_exists(pid): + os.kill(pid, signal.SIGKILL) + + if __name__ == '__main__': # Before all else, we are INIT so setup sigchild signal.signal(signal.SIGCHLD, _sigchild_handler) @@ -462,6 +474,9 @@ container host. parser.add_argument('-r', '--runit', help="Actually run the instance! You understand what that means ...", action='store_true', default=False, dest='runit') + parser.add_argument('-s', '--stop', + help="Stop the instance", + action='store_true', default=False, dest='stop') parser.add_argument('-H', '--healthcheck', help="Start a healthcheck inside of the container for an instance. You should understand what this means ...", action='store_true', default=False, dest='healthcheck') @@ -472,6 +487,8 @@ container host. if args.runit: begin_magic() + elif args.stop: + stop() elif args.healthcheck: if begin_healthcheck(None, False) == (False, True): sys.exit(0)
0
563b0a42e0b707d86edae8792e0a86e63ed7e90e
389ds/389-ds-base
Resolves: bug 232910 Description: ACI targetattr list parser is whitespace sensitive Fix Description: I made it too sensitive. The parser should allow simple unquoted strings. However, if it begins with a quote, it must end with a quote.
commit 563b0a42e0b707d86edae8792e0a86e63ed7e90e Author: Rich Megginson <[email protected]> Date: Fri Oct 19 19:01:16 2007 +0000 Resolves: bug 232910 Description: ACI targetattr list parser is whitespace sensitive Fix Description: I made it too sensitive. The parser should allow simple unquoted strings. However, if it begins with a quote, it must end with a quote. diff --git a/ldap/servers/plugins/acl/aclparse.c b/ldap/servers/plugins/acl/aclparse.c index 8d012a657..70def8d9e 100644 --- a/ldap/servers/plugins/acl/aclparse.c +++ b/ldap/servers/plugins/acl/aclparse.c @@ -1234,14 +1234,21 @@ __aclp__init_targetattr (aci_t *aci, char *attr_val) __acl_strip_leading_space(&s); __acl_strip_trailing_space(s); len = strlen(s); - if (*s == '"' && s[len-1] == '"') { - s[len-1] = '\0'; - s++; - } else { - slapi_log_error(SLAPI_LOG_FATAL, plugin_name, - "__aclp__init_targetattr: Error: The statement does not begin and end with a \": [%s]\n", - s); - return ACL_SYNTAX_ERR; + /* Simple targetattr statements may not be quoted e.g. + targetattr=* or targetattr=userPassword + if it begins with a quote, it must end with one as well + */ + if (*s == '"') { + s++; /* skip leading quote */ + if (s[len-1] == '"') { + s[len-1] = '\0'; /* trim trailing quote */ + } else { + /* error - if it begins with a quote, it must end with a quote */ + slapi_log_error(SLAPI_LOG_FATAL, plugin_name, + "__aclp__init_targetattr: Error: The statement does not begin and end with a \": [%s]\n", + attr_val); + return ACL_SYNTAX_ERR; + } } str = s;
0
32ab01f55684859213bfebf8190b82ba84f374c5
389ds/389-ds-base
minor fixes for bdb 4.2/4.3 and mozldap
commit 32ab01f55684859213bfebf8190b82ba84f374c5 Author: Rich Megginson <[email protected]> Date: Thu Oct 25 17:16:08 2012 -0600 minor fixes for bdb 4.2/4.3 and mozldap diff --git a/ldap/servers/plugins/rootdn_access/rootdn_access.c b/ldap/servers/plugins/rootdn_access/rootdn_access.c index ad1e125fa..53239153a 100644 --- a/ldap/servers/plugins/rootdn_access/rootdn_access.c +++ b/ldap/servers/plugins/rootdn_access/rootdn_access.c @@ -46,6 +46,7 @@ #include <nspr.h> #include <time.h> #include <ctype.h> +#include <string.h> /* * Add an entry like the following to dse.ldif to enable this plugin: diff --git a/ldap/servers/slapd/back-ldbm/dblayer.c b/ldap/servers/slapd/back-ldbm/dblayer.c index 26007f46d..5e53de928 100644 --- a/ldap/servers/slapd/back-ldbm/dblayer.c +++ b/ldap/servers/slapd/back-ldbm/dblayer.c @@ -136,7 +136,7 @@ #define LOG_FLUSH(env, lsn) (env)->log_flush((env), (lsn)) #define LOCK_DETECT(env, flags, atype, aborted) \ (env)->lock_detect((env), (flags), (atype), (aborted)) -#if 1000*DB_VERSION_MAJOR + 100*DB_VERSION_MINOR >= 4000 /* db4.4 or later */ +#if 1000*DB_VERSION_MAJOR + 100*DB_VERSION_MINOR >= 4400 /* db4.4 or later */ #define DB_ENV_SET_TAS_SPINS(env, tas_spins) \ (env)->mutex_set_tas_spins((env), (tas_spins)) #else /* < 4.4 */ diff --git a/ldap/servers/slapd/util.c b/ldap/servers/slapd/util.c index a70910fd0..bdff36676 100644 --- a/ldap/servers/slapd/util.c +++ b/ldap/servers/slapd/util.c @@ -340,12 +340,16 @@ filter_stuff_func(void *arg, const char *val, PRUint32 slen) buf = escaped_filter.bv_val; } #else + char *val2 = NULL; buf = slapi_ch_calloc(sizeof(char), filter_len*3 + 1); - if(do_escape_string(val, filter_len, buf, special_filter) == NULL){ + val2 = do_escape_string(val, filter_len, buf, special_filter); + if(val2 == NULL){ LDAPDebug(LDAP_DEBUG_TRACE, "slapi_filter_sprintf: failed to escape filter value(%s)\n",val,0,0); ctx->next_arg_needs_esc_norm = 0; slapi_ch_free_string(&buf); return -1; + } else if (val == val2) { /* value did not need escaping and was just returned */ + strcpy(buf, val); /* just use value as-is - len did not change */ } else { filter_len = strlen(buf); }
0
98c88d0520ec7ce9324917d388dceda3737ae4d8
389ds/389-ds-base
Ticket 49055 - Refactor create_test.py Description: Now create_test.py works considering new changes in tickets, suites and topology fixtures. - If you choose a topology that already exists, create_test.py will just import the suitable fixture. - And if you choose a non existing topology, create_test.py will make it for you. So you can move it to lib389/topologies.py later. Also, remove dirsrvtests/cmd dir, because it conflicts with another Python module and lib389 already has this functionality. https://fedorahosted.org/389/ticket/49055 Reviewed by: wibrown (Thanks!)
commit 98c88d0520ec7ce9324917d388dceda3737ae4d8 Author: Simon Pichugin <[email protected]> Date: Thu Jan 5 16:01:54 2017 +0100 Ticket 49055 - Refactor create_test.py Description: Now create_test.py works considering new changes in tickets, suites and topology fixtures. - If you choose a topology that already exists, create_test.py will just import the suitable fixture. - And if you choose a non existing topology, create_test.py will make it for you. So you can move it to lib389/topologies.py later. Also, remove dirsrvtests/cmd dir, because it conflicts with another Python module and lib389 already has this functionality. https://fedorahosted.org/389/ticket/49055 Reviewed by: wibrown (Thanks!) diff --git a/dirsrvtests/cmd/__init__.py b/dirsrvtests/cmd/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/dirsrvtests/cmd/dsadm/__init__.py b/dirsrvtests/cmd/dsadm/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/dirsrvtests/cmd/dsadm/dsadm.py b/dirsrvtests/cmd/dsadm/dsadm.py deleted file mode 100755 index 247a295e5..000000000 --- a/dirsrvtests/cmd/dsadm/dsadm.py +++ /dev/null @@ -1,543 +0,0 @@ -#! /usr/bin/python2 -# -# --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2015 Red Hat, Inc. -# All rights reserved. -# -# License: GPL (version 3 or any later version). -# See LICENSE for details. -# --- END COPYRIGHT BLOCK --- - -# Authors: -# Thierry Bordaz <[email protected]> - -import sys -import os -import argparse -import pdb -import tempfile -import time -import pwd -import grp -import platform -import socket -import shutil -from subprocess import Popen, PIPE, STDOUT -import string - -SETUP_DS = "/sbin/setup-ds.pl" -REMOVE_DS = "/sbin/remove-ds.pl" -INITCONFIGDIR = ".dirsrv" -SCRIPT_START = "start-slapd" -SCRIPT_STOP = "stop-slapd" -SCRIPT_RESTART = "restart-slapd" -ENVIRON_SERVERID = '389-SERVER-ID' -ENVIRON_USER = '389-USER' -ENVIRON_GROUP = '389-GROUP' -ENVIRON_DIRECTORY = '389-DIRECTORY' -ENVIRON_PORT = '389-PORT' -ENVIRON_SECURE_PORT = '389-SECURE-PORT' -DEFAULT_PORT_ROOT = str(389) -DEFAULT_PORT_NON_ROOT = str(1389) -DEFAULT_SECURE_PORT_ROOT = str(636) -DEFAULT_SECURE_PORT_NON_ROOT = str(1636) -DEFAULT_USER = 'nobody' -DEFAULT_GROUP = 'nobody' -DEFAULT_ROOT_DN = 'cn=Directory Manager' -DEFAULT_HOSTNAME = socket.gethostname() - - - -def validate_user(user): - ''' - If a user is provided it returns its username - else it returns the current username. - It checks that the userId or userName exists - - :param: user (optional) can be a userName or userId - :return: userName of the provided user, if none is provided, it returns current user name - ''' - assert(user) - if user.isdigit(): - try: - username = pwd.getpwuid(int(user)).pw_name - except KeyError: - raise KeyError('Unknown userId %d' % user) - return username - else: - try: - pwd.getpwnam(user).pw_uid - except KeyError: - raise KeyError('Unknown userName %s' % user) - return user - -def get_default_user(): - user = os.environ.get(ENVIRON_USER, None) - if not user: - user = os.getuid() - return str(user) - -def get_default_group(): - ''' - If a group is provided it returns its groupname - else it returns the current groupname. - It checks that the groupId or groupName exists - - :param: group (optional) can be a groupName or groupId - :return: groupName of the provided group, if none is provided, it returns current group name - ''' - group = os.environ.get(ENVIRON_GROUP, None) - if not group: - return pwd.getpwuid(os.getuid()).pw_name - return group - -def validate_group(group): - assert(group) - if str(group).isdigit(): - try: - groupname = grp.getgrgid(group).gr_name - return groupname - except: - raise KeyError('Unknown groupId %d' % group) - else: - try: - groupname = grp.getgrnam(group).gr_name - return groupname - except: - raise KeyError('Unknown groupName %s' % group) - -def test_get_group(): - try: - grpname = get_default_group() - print('get_group: %s' % grpname) - except: - raise - print("Can not find user group") - pass - try: - grpname = get_default_group(group='tbordaz') - print('get_group: %s' % grpname) - except: - raise - print("Can not find user group") - pass - try: - grpname = get_default_group(group='coucou') - print('get_group: %s' % grpname) - except: - print("Can not find user group coucou") - pass - try: - grpname = get_default_group('thierry') - print('get_group: %s' % grpname) - except: - raise - print("Can not find user group thierry") - pass - try: - grpname = get_default_group(1000) - print('get_group: %s' % grpname) - except: - raise - print("Can not find user group 1000") - pass - try: - grpname = get_default_group(20532) - print('get_group: %s' % grpname) - except: - raise - print("Can not find user group 20532") - pass - try: - grpname = get_default_group(123) - print('get_group: %s' % grpname) - except: - print("Can not find user group 123") - pass - -def get_default_port(): - port = os.environ.get(ENVIRON_PORT, None) - if port: - return port - - if os.getuid() == 0: - return DEFAULT_PORT_ROOT - else: - return DEFAULT_PORT_NON_ROOT - -def validate_port(port): - assert port - if not port.isdigit() or int(port) <= 0 : - raise Exception("port number is invalid: %s" % port) - -def get_default_directory(): - directory = os.environ.get(ENVIRON_DIRECTORY, None) - if not directory: - directory = os.getcwd() - return directory - -def validate_directory(directory): - assert directory - if not os.path.isdir(directory): - raise Exception("Supplied directory path is not a directory") - - if not os.access(directory, os.W_OK): - raise Exception("Supplied directory is not writable") - -def get_default_serverid(): - serverid = os.environ.get(ENVIRON_SERVERID, None) - if not serverid: - serverid = socket.gethostname().split('.')[0] - return serverid - -def validate_serverid(serverid): - if not serverid: - raise Exception("Server id is not defined") - return serverid - - -def get_inst_dir(serverid): - assert serverid - home = os.getenv("HOME") - inst_initconfig_file = "%s/%s/dirsrv-%s" % (home, INITCONFIGDIR, serverid) - if not os.path.isfile(inst_initconfig_file): - raise Exception("%s config file not found" % inst_initconfig_file) - f = open(inst_initconfig_file, "r") - for line in f: - if line.startswith("INST_DIR"): - inst_dir = line.split("=")[1] - inst_dir = inst_dir.replace("\r", "") - inst_dir = inst_dir.replace("\n", "") - return inst_dir - -def sanity_check(): - if os.getuid() == 0: - raise Exception("Not tested for root user.. sorry") - - home = os.getenv("HOME") - inst_initconfig_dir = "%s/%s" % (home, INITCONFIGDIR) - if not os.path.isdir(inst_initconfig_dir): - raise Exception("Please create the directory \'%s\' and retry." % inst_initconfig_dir ) - -class DSadmCmd(object): - def __init__(self): - self.version = '0.1' - - def _start_subparser(self, subparsers): - start_parser = subparsers.add_parser( - 'start', - help='Start a Directory Server Instance') - start_parser.add_argument('-I', '--server-id', dest='server_id', type=str, nargs='?', - metavar='SERVER-ID', - help='Server Identifier (Default: %s) ' % get_default_serverid()) - start_parser.set_defaults(func=self.start_action) - - def _stop_subparser(self, subparsers): - start_parser = subparsers.add_parser( - 'stop', - help='Stop a Directory Server Instance') - start_parser.add_argument('-I', '--server-id', dest='server_id', type=str, nargs='?', - metavar='SERVER-ID', - help='Server Identifier (Default: %s) ' % get_default_serverid()) - start_parser.set_defaults(func=self.stop_action) - - def _restart_subparser(self, subparsers): - start_parser = subparsers.add_parser( - 'restart', - help='Retart a Directory Server Instance') - start_parser.add_argument('-I', '--server-id', dest='server_id', type=str, nargs='?', - metavar='SERVER-ID', - help='Server Identifier (Default: %s) ' % get_default_serverid()) - start_parser.set_defaults(func=self.restart_action) - - def _delete_subparser(self, subparsers): - delete_parser = subparsers.add_parser( - 'delete', - help='Delete a Directory Server Instance') - delete_parser.add_argument('-I', '--server-id', dest='server_id', type=str, nargs='?', - metavar='SERVER-ID', - help='Server Identifier (Default: %s) ' % get_default_serverid()) - delete_parser.add_argument('-debug', '--debug', dest='debug_level', type=int, nargs='?', - metavar='DEBUG_LEVEL', - help='Debug level (Default: 0)') - delete_parser.set_defaults(func=self.delete_action) - - def _create_subparser(self, subparsers): - create_parser = subparsers.add_parser( - 'create', - help='Create a Directory Server Instance') - create_parser.add_argument('-I', '--server-id', dest='server_id', type=str, nargs='?', - metavar='SERVER-ID', - help='Server Identifier (Default: %s) ' % get_default_serverid()) - create_parser.add_argument('-s', '--suffix', dest='suffix', type=str, nargs='?', - metavar='SUFFIX-DN', - help='Suffix (Default: create no suffix)') - create_parser.add_argument('-p', '--port', dest='port', type=int, nargs='?', - metavar='NON-SECURE-PORT', - help='Normal Port to listen (Default: %s(root)/%s(non-root)) ' % (DEFAULT_PORT_ROOT, DEFAULT_PORT_NON_ROOT)) - - create_parser.add_argument('-P', '--secure-port', dest='secure_port', type=int, nargs='?', - metavar='SECURE-PORT', - help='Secure Port to listen (Default: %s(root)/%s(non-root))' % (DEFAULT_SECURE_PORT_ROOT, DEFAULT_SECURE_PORT_NON_ROOT)) - - create_parser.add_argument('-D', '--rootDN', dest='root_dn', type=str, nargs='?', - metavar='ROOT-DN', - help='Uses DN as Directory Manager DN (Default: \'%s\')' % (DEFAULT_ROOT_DN)) - - create_parser.add_argument('-u', '--user-name', dest='user_name', type=str, nargs='?', - metavar='USER-NAME', - help='User name of the instance owner (Default: %s)' % DEFAULT_USER) - - create_parser.add_argument('-g', '--group-name', dest='group_name', type=str, nargs='?', - metavar='GROUP-NAME', - help='Group name of the instance owner (Default: %s)' % DEFAULT_GROUP) - - create_parser.add_argument('-d', '--directory-path', dest='directory_path', type=str, nargs='?', - metavar='DIRECTORY-PATH', - help='Installation directory path (Default: %s)' % get_default_directory()) - create_parser.add_argument('-debug', '--debug', dest='debug_level', type=int, nargs='?', - metavar='DEBUG_LEVEL', - help='Debug level (Default: 0)') - create_parser.add_argument('-k', '--keep_template', dest='keep_template', type=str, nargs='?', - help='Keep template file') - - create_parser.set_defaults(func=self.create_action) - - # - # common function for start/stop/restart actions - # - def script_action(self, args, script, action_str): - args = vars(args) - serverid = args.get('server_id', None) - if not serverid: - serverid = get_default_serverid() - - script_file = "%s/%s" % (get_inst_dir(serverid), script) - if not os.path.isfile(script_file): - raise Exception("%s not found" % script_file) - - if not os.access(script_file, os.X_OK): - raise Exception("%s not executable" % script_file) - - env = os.environ.copy() - prog = [ script_file ] - pipe = Popen(prog, cwd=os.getcwd(), env=env, - stdin=PIPE, stdout=PIPE, stderr=STDOUT) - child_stdin = pipe.stdin - child_stdout = pipe.stdout - for line in child_stdout: - sys.stdout.write(line) - child_stdout.close() - child_stdin.close() - - rc = pipe.wait() - if rc == 0: - print("Directory %s %s" % (serverid, action_str)) - else: - print("Failure: directory %s not %s (%s)" % (serverid, action_str, rc)) - return - - def start_action(self, args): - self.script_action(args, SCRIPT_START, "started") - - - def stop_action(self, args): - self.script_action(args, SCRIPT_STOP, "stopped") - - - def restart_action(self, args): - - self.script_action(args, SCRIPT_RESTART, "restarted") - - def delete_action(self, args): - args = vars(args) - serverid = args.get('server_id', None) - if not serverid: - serverid = get_default_serverid() - - #prepare the remove-ds options - debug_level = args.get('debug_level', None) - if debug_level: - debug_str = ['-d'] - for i in range(1, int(debug_level)): - debug_str.append('d') - debug_str = ''.join(debug_str) - - env = os.environ.copy() - prog = [REMOVE_DS] - if debug_level: - prog.append(debug_str) - prog.append("-i") - prog.append("slapd-%s" % serverid) - - # run the REMOVE_DS command and print the possible output - pipe = Popen(prog, cwd=os.getcwd(), env=env, - stdin=PIPE, stdout=PIPE, stderr=STDOUT) - child_stdin = pipe.stdin - child_stdout = pipe.stdout - for line in child_stdout: - if debug_level: - sys.stdout.write(line) - child_stdout.close() - child_stdin.close() - - rc = pipe.wait() - if rc == 0: - print("Directory server \'%s\' successfully deleted" % serverid) - else: - print("Fail to delete directory \'%s\': %d" % (serverid, rc)) - return - - # - # used by create subcommand to build the template file - # - def _create_setup_ds_file(self, args, user=None, group=None): - # Get/checks the argument with the following order - # - parameter - # - Environment - # - default - serverid = args.get('server_id', None) - if not serverid: - serverid = get_default_serverid() - serverid = validate_serverid(serverid) - - username = args.get('user_name', None) - if not username: - username = get_default_user() - username = validate_user(username) - - groupname = args.get('group_name', None) - if not groupname: - groupname = get_default_group() - groupname = validate_group(groupname) - - directoryname = args.get('directory_path', None) - if not directoryname: - directoryname = get_default_directory() - validate_directory(directoryname) - - portnumber = args.get('port', None) - if not portnumber: - portnumber = get_default_port() - validate_port(portnumber) - - suffix = args.get('suffix', None) - - tempf = tempfile.NamedTemporaryFile(delete=False) - - tempf.write('[General]\n') - tempf.write('FullMachineName=%s\n' % DEFAULT_HOSTNAME) - tempf.write('SuiteSpotUserID=%s\n' % username) - tempf.write('SuiteSpotGroup=%s\n' % groupname) - tempf.write('ServerRoot=%s\n' % directoryname) - tempf.write('\n') - tempf.write('[slapd]\n') - tempf.write('ServerPort=1389\n') - tempf.write('ServerIdentifier=%s\n' % serverid) - if suffix: - tempf.write('Suffix=%s\n' % suffix) - tempf.write('RootDN=cn=Directory Manager\n') - tempf.write('RootDNPwd=Secret12\n') - tempf.write('sysconfdir=%s/etc\n' % directoryname) - tempf.write('localstatedir=%s/var\n' % directoryname) - tempf.write('inst_dir=%s/lib/dirsrv/slapd-%s\n'% (directoryname, serverid)) - tempf.write('config_dir=%s/etc/dirsrv/slapd-%s\n' % (directoryname, serverid)) - tempf.close() - - keep_template = args.get('keep_template', None) - if keep_template: - shutil.copy(tempf.name, keep_template) - - - return tempf - - # - # It silently creates an instance. - # After creation the instance is started - # - def create_action(self, args): - args = vars(args) - - # retrieve the serverid here just to log the final status - serverid = args.get('server_id', None) - if not serverid: - serverid = get_default_serverid() - - # prepare the template file - tempf = self._create_setup_ds_file(args) - - #prepare the setup-ds options - debug_level = args.get('debug_level', None) - if debug_level: - debug_str = ['-d'] - for i in range(1, int(debug_level)): - debug_str.append('d') - debug_str = ''.join(debug_str) - - # - # run the SETUP_DS command and print the possible output - # - env = os.environ.copy() - prog = [SETUP_DS] - if debug_level: - prog.append(debug_str) - prog.append("--silent") - prog.append("--file=%s" % tempf.name) - tempf.close() - - pipe = Popen(prog, cwd=os.getcwd(), env=env, - stdin=PIPE, stdout=PIPE, stderr=STDOUT) - child_stdin = pipe.stdin - child_stdout = pipe.stdout - for line in child_stdout: - if debug_level: - sys.stdout.write(line) - child_stdout.close() - child_stdin.close() - - os.unlink(tempf.name) - rc = pipe.wait() - if rc == 0: - print("Directory server \'%s\' successfully created" % serverid) - else: - print("Fail to create directory \'%s\': %d" % (serverid, rc)) - return - - # - # parser of the main command. It contains subcommands - # - def get_parser(self, argv): - - - parser = argparse.ArgumentParser( - description='Managing a local directory server instance') - - subparsers = parser.add_subparsers( - metavar='SUBCOMMAND', - help='The action to perform') - - #pdb.set_trace() - # subcommands - self._create_subparser(subparsers) - self._delete_subparser(subparsers) - self._start_subparser(subparsers) - self._stop_subparser(subparsers) - self._restart_subparser(subparsers) - - # Sanity check that the debug level is valid - args = vars(parser.parse_args(argv)) - debug_level = args.get('debug_level', None) - if debug_level and (int(debug_level) < 1 or int(debug_level > 5)): - raise Exception("invalid debug level: range 1..5") - - return parser - - def main(self, argv): - sanity_check() - parser = self.get_parser(argv) - args = parser.parse_args(argv) - args.func(args) - return - -if __name__ == '__main__': - DSadmCmd().main(sys.argv[1:]) diff --git a/dirsrvtests/create_test.py b/dirsrvtests/create_test.py index 423d9d22d..887580e2f 100755 --- a/dirsrvtests/create_test.py +++ b/dirsrvtests/create_test.py @@ -8,13 +8,13 @@ # See LICENSE for details. # --- END COPYRIGHT BLOCK --- -import sys import optparse +import sys +from lib389 import topologies """This script generates a template test script that handles the non-interesting parts of a test script: -- topology fixture (only for tickets), - for suites we have predefined fixtures in lib389/topologies.py +- topology fixture that doesn't exist in in lib389/topologies.py - test function (to be completed by the user), - run-isolated function """ @@ -81,6 +81,38 @@ def writeFinalizer(): TEST.write('\n\n') +def get_existing_topologies(inst, masters, hubs, consumers): + """Check if the requested topology exists""" + + if inst: + if inst == 1: + i = 'st' + else: + i = 'i{}'.format(inst) + else: + i = '' + if masters: + ms = 'm{}'.format(masters) + else: + ms = '' + if hubs: + hs = 'h{}'.format(hubs) + else: + hs = '' + if consumers: + cs = 'c{}'.format(consumers) + else: + cs = '' + + my_topology = 'topology_{}{}{}{}'.format(i, ms, hs, cs) + + # Returns True in the first element of a list, if topology was found + if my_topology in dir(topologies): + return [True, my_topology] + else: + return [False, my_topology] + + desc = 'Script to generate an initial lib389 test script. ' + \ 'This generates the topology, test, final, and run-isolated functions.' @@ -90,7 +122,7 @@ if len(sys.argv) > 0: # Script options parser.add_option('-t', '--ticket', dest='ticket', default=None) parser.add_option('-s', '--suite', dest='suite', default=None) - parser.add_option('-i', '--instances', dest='inst', default=None) + parser.add_option('-i', '--instances', dest='inst', default='0') parser.add_option('-m', '--masters', dest='masters', default='0') parser.add_option('-h', '--hubs', dest='hubs', default='0') parser.add_option('-c', '--consumers', dest='consumers', default='0') @@ -139,27 +171,29 @@ if len(sys.argv) > 0: if args.inst: if not args.inst.isdigit() or \ int(args.inst) > 99 or \ - int(args.inst) < 1: + int(args.inst) < 0: print('Invalid value for "--instances", it must be a number ' + 'greater than 0 and not greater than 99') displayUsage() if int(args.inst) > 0: if int(args.masters) > 0 or \ - int(args.hubs) > 0 or \ - int(args.consumers) > 0: + int(args.hubs) > 0 or \ + int(args.consumers) > 0: print('You can not mix "--instances" with replication.') displayUsage() # Extract usable values - masters = int(args.masters) - hubs = int(args.hubs) - consumers = int(args.consumers) ticket = args.ticket suite = args.suite - if not args.inst: + if not args.inst and not args.masters and not args.hubs and not args.consumers: instances = 1 + my_topology = [True, 'topology_st'] else: instances = int(args.inst) + masters = int(args.masters) + hubs = int(args.hubs) + consumers = int(args.consumers) + my_topology = get_existing_topologies(instances, masters, hubs, consumers) filename = args.filename # Create/open the new test script file @@ -176,23 +210,28 @@ if len(sys.argv) > 0: exit(1) # Write the imports - TEST.write('import os\nimport sys\nimport time\nimport ldap\n' + + if my_topology[0]: + topology_import = 'from lib389.topologies import {}\n'.format(my_topology[1]) + else: + topology_import = '' + + TEST.write('import time\nimport ldap\n' + 'import logging\nimport pytest\n') TEST.write('from lib389 import DirSrv, Entry, tools, tasks\nfrom ' + 'lib389.tools import DirSrvTools\nfrom lib389._constants ' + 'import *\nfrom lib389.properties import *\n' + - 'from lib389.tasks import *\nfrom lib389.utils import *\n\n') - - # Add topology function for a ticket only. - # Suites have presetuped fixtures in lib389/topologies.py - if ticket: - TEST.write('DEBUGGING = False\n\n') - TEST.write('if DEBUGGING:\n') - TEST.write(' logging.getLogger(__name__).setLevel(logging.DEBUG)\n') - TEST.write('else:\n') - TEST.write(' logging.getLogger(__name__).setLevel(logging.INFO)\n') - TEST.write('log = logging.getLogger(__name__)\n\n\n') - + 'from lib389.tasks import *\nfrom lib389.utils import *\n' + + '{}\n'.format(topology_import)) + + TEST.write('DEBUGGING = os.getenv("DEBUGGING", default=False)\n') + TEST.write('if DEBUGGING:\n') + TEST.write(' logging.getLogger(__name__).setLevel(logging.DEBUG)\n') + TEST.write('else:\n') + TEST.write(' logging.getLogger(__name__).setLevel(logging.INFO)\n') + TEST.write('log = logging.getLogger(__name__)\n\n\n') + + # Add topology function for non existing (in lib389/topologies.py) topologies only + if not my_topology[0]: # Write the replication or standalone classes repl_deployment = False @@ -247,7 +286,7 @@ if len(sys.argv) > 0: # Write the 'topology function' TEST.write('@pytest.fixture(scope="module")\n') - TEST.write('def topology(request):\n') + TEST.write('def {}(request):\n'.format(my_topology[1])) if repl_deployment: TEST.write(' """Create Replication Deployment"""\n') @@ -363,9 +402,9 @@ if len(sys.argv) > 0: "defaultProperties[REPLICATION_TRANSPORT]}\n") TEST.write(' m' + str(master_idx) + '_m' + str(idx) + '_agmt = master' + str(master_idx) + - '.agreement.create(suffix=SUFFIX, host=master' + - str(idx) + '.host, port=master' + str(idx) + - '.port, properties=properties)\n') + '.agreement.create(suffix=SUFFIX, host=master' + + str(idx) + '.host, port=master' + str(idx) + + '.port, properties=properties)\n') TEST.write(' if not m' + str(master_idx) + '_m' + str(idx) + '_agmt:\n') TEST.write(' log.fatal("Fail to create a master -> ' + @@ -504,7 +543,7 @@ if len(sys.argv) > 0: for idx in range(hubs): idx += 1 TEST.write(' master1.agreement.init(SUFFIX, HOST_HUB_' + - str(idx) + ', PORT_HUB_' + str(idx) + ')\n') + str(idx) + ', PORT_HUB_' + str(idx) + ')\n') TEST.write(' master1.waitForReplInit(m1_h' + str(idx) + '_agmt)\n') for idx in range(consumers): @@ -561,7 +600,7 @@ if len(sys.argv) > 0: TEST.write(', hub' + str(idx + 1)) for idx in range(consumers): TEST.write(', consumer' + str(idx + 1)) - TEST.write(')\n\n') + TEST.write(')\n\n\n') # Standalone servers else: @@ -612,12 +651,11 @@ if len(sys.argv) > 0: if idx == 1: continue TEST.write(', standalone' + str(idx)) - TEST.write(')\n\n') - TEST.write('\n') + TEST.write(')\n\n\n') # Write the test function if ticket: - TEST.write('def test_ticket' + ticket + '(topology):\n') + TEST.write('def test_ticket{}({}):\n'.format(ticket, my_topology[1])) if repl_deployment: TEST.write(' """Write your replication test here.\n\n') TEST.write(' To access each DirSrv instance use: ' + @@ -626,7 +664,7 @@ if len(sys.argv) > 0: ',...\n\n') TEST.write(' Also, if you need any testcase initialization,\n') TEST.write(' please, write additional fixture for that' + - '(include ' + 'finalizer).\n') + '(including finalizer).\n') else: TEST.write(' """Write your testcase here...\n\n') TEST.write(' Also, if you need any testcase initialization,\n') @@ -634,20 +672,11 @@ if len(sys.argv) > 0: '(include finalizer).\n') TEST.write(' """\n\n') else: - TEST.write('def test_something(topology_XX):\n') + TEST.write('def test_something({}):\n'.format(my_topology[1])) TEST.write(' """Write a single test here...\n\n') TEST.write(' Also, if you need any test suite initialization,\n') - TEST.write(' please, write additional fixture for that(include finalizer).\n' + - ' Topology for suites are predefined in lib389/topologies.py.\n\n' - ' Choose one of the options:\n' - ' 1) topology_st for standalone\n' - ' topology.standalone\n' - ' 2) topology_m2 for two masters\n' - ' topology.ms["master{1,2}"]\n' - ' each master has agreements\n' - ' topology.ms["master{1,2}_agmts"][m{1,2}_m{2,1}]\n' - ' 3) topology_m4 for four masters\n' - ' the same as topology_m2 but has more masters and agreements\n' + TEST.write(' please, write additional fixture for that (including finalizer).\n' + ' Topology for suites are predefined in lib389/topologies.py.\n' ' """\n\n') TEST.write(' if DEBUGGING:\n')
0
5beb93d42efb807838c09c5fab898876876f8d09
389ds/389-ds-base
Trac Ticket #340 - Change on SLAPI_MODRDN_NEWSUPERIOR is not evaluated in acl https://fedorahosted.org/389/ticket/340 Bug Description: When modrdn operation was executed, only newrdn change was passed to the acl plugin. Also, the change was used only for the acl search, but not for the acl target in the items in the acl cache. Fix Description: This patch also passes the newsuperior update to the acl plugin. And the modrdn updates are applied to the acl target in the acl cache.
commit 5beb93d42efb807838c09c5fab898876876f8d09 Author: Noriko Hosoi <[email protected]> Date: Fri Sep 21 12:35:18 2012 -0700 Trac Ticket #340 - Change on SLAPI_MODRDN_NEWSUPERIOR is not evaluated in acl https://fedorahosted.org/389/ticket/340 Bug Description: When modrdn operation was executed, only newrdn change was passed to the acl plugin. Also, the change was used only for the acl search, but not for the acl target in the items in the acl cache. Fix Description: This patch also passes the newsuperior update to the acl plugin. And the modrdn updates are applied to the acl target in the acl cache. diff --git a/ldap/servers/plugins/acl/acl.c b/ldap/servers/plugins/acl/acl.c index 15e474e4a..338940415 100644 --- a/ldap/servers/plugins/acl/acl.c +++ b/ldap/servers/plugins/acl/acl.c @@ -170,9 +170,9 @@ acl_access_allowed_modrdn( * Test if have access to make the first rdn of dn in entry e. */ -static int check_rdn_access( Slapi_PBlock *pb, Slapi_Entry *e, const char *dn, - int access) { - +static int +check_rdn_access( Slapi_PBlock *pb, Slapi_Entry *e, const char *dn, int access) +{ char **dns; char **rdns; int retCode = LDAP_INSUFFICIENT_ACCESS; @@ -655,7 +655,8 @@ cleanup_and_ret: } -static void print_access_control_summary( char *source, int ret_val, char *clientDn, +static void +print_access_control_summary( char *source, int ret_val, char *clientDn, struct acl_pblock *aclpb, char *right, char *attr, @@ -1524,11 +1525,12 @@ acl_check_mods( * **************************************************************************/ extern void -acl_modified (Slapi_PBlock *pb, int optype, char *n_dn, void *change) +acl_modified (Slapi_PBlock *pb, int optype, Slapi_DN *e_sdn, void *change) { struct berval **bvalue; char **value; int rv=0; /* returned value */ + const char* n_dn; char* new_RDN; char* parent_DN; char* new_DN; @@ -1537,10 +1539,12 @@ acl_modified (Slapi_PBlock *pb, int optype, char *n_dn, void *change) int j; Slapi_Attr *attr = NULL; Slapi_Entry *e = NULL; - Slapi_DN *e_sdn; aclUserGroup *ugroup = NULL; - e_sdn = slapi_sdn_new_normdn_byval ( n_dn ); + if (NULL == e_sdn) { + return; + } + n_dn = slapi_sdn_get_dn(e_sdn); /* Before we proceed, Let's first check if we are changing any groups. ** If we are, then we need to change the signature */ @@ -1768,45 +1772,64 @@ acl_modified (Slapi_PBlock *pb, int optype, char *n_dn, void *change) } break; - }/* case op is modify*/ + }/* case op is modify*/ - case SLAPI_OPERATION_MODRDN: - - new_RDN = (char*) change; - slapi_log_error (SLAPI_LOG_ACL, plugin_name, - "acl_modified (MODRDN %s => \"%s\"\n", - n_dn, new_RDN); + case SLAPI_OPERATION_MODRDN: + { + char **rdn_parent; + rdn_parent = (char **)change; + new_RDN = rdn_parent[0]; + parent_DN = rdn_parent[1]; /* compute new_DN: */ - parent_DN = slapi_dn_parent (n_dn); - if (parent_DN == NULL) { - new_DN = new_RDN; + if (NULL == parent_DN) { + parent_DN = slapi_dn_parent(n_dn); + } + if (NULL == parent_DN) { + if (NULL == new_RDN) { + slapi_log_error (SLAPI_LOG_ACL, plugin_name, + "acl_modified (MODRDN %s => \"no change\"\n", + n_dn); + break; + } else { + new_DN = new_RDN; + } } else { - new_DN = slapi_create_dn_string("%s,%s", new_RDN, parent_DN); + if (NULL == new_RDN) { + Slapi_RDN *rdn= slapi_rdn_new(); + slapi_sdn_get_rdn(e_sdn, rdn); + new_DN = slapi_create_dn_string("%s,%s", slapi_rdn_get_rdn(rdn), + parent_DN); + slapi_rdn_free(&rdn); + } else { + new_DN = slapi_create_dn_string("%s,%s", new_RDN, parent_DN); + } } + slapi_log_error (SLAPI_LOG_ACL, plugin_name, + "acl_modified (MODRDN %s => \"%s\"\n", n_dn, new_RDN); /* Change the acls */ - acllist_acicache_WRITE_LOCK(); + acllist_acicache_WRITE_LOCK(); /* acllist_moddn_aci_needsLock expects normalized new_DN, * which is no need to be case-ignored */ acllist_moddn_aci_needsLock ( e_sdn, new_DN ); acllist_acicache_WRITE_UNLOCK(); /* deallocat the parent_DN */ - if (parent_DN != NULL) { - slapi_ch_free ( (void **) &new_DN ); - slapi_ch_free ( (void **) &parent_DN ); + if (parent_DN != NULL) { + slapi_ch_free_string(&new_DN); + if (parent_DN != rdn_parent[1]) { + slapi_ch_free_string(&parent_DN); + } } break; - - default: + } /* case op is modrdn */ + default: /* print ERROR */ break; } /*optype switch */ - - slapi_sdn_free ( &e_sdn ); - } + /*************************************************************************** * * acl__scan_for_acis diff --git a/ldap/servers/plugins/acl/acl.h b/ldap/servers/plugins/acl/acl.h index 4fa3e3f99..28c38e72f 100644 --- a/ldap/servers/plugins/acl/acl.h +++ b/ldap/servers/plugins/acl/acl.h @@ -796,7 +796,8 @@ int acl_read_access_allowed_on_attr ( Slapi_PBlock *pb, Slapi_Entry *e, char struct berval *val, int access); void acl_set_acllist (Slapi_PBlock *pb, int scope, char *base); void acl_gen_err_msg(int access, char *edn, char *attr, char **errbuf); -void acl_modified ( Slapi_PBlock *pb, int optype, char *dn, void *change); +void acl_modified (Slapi_PBlock *pb, int optype, Slapi_DN *e_sdn, void *change); + int acl_access_allowed_disjoint_resource( Slapi_PBlock *pb, Slapi_Entry *e, char *attr, struct berval *val, int access ); int acl_access_allowed_main ( Slapi_PBlock *pb, Slapi_Entry *e, char **attrs, @@ -866,7 +867,7 @@ void acllist_print_tree ( Avlnode *root, int *depth, char *start, char *side); AciContainer *acllist_get_aciContainer_new ( ); void acllist_done_aciContainer ( AciContainer *); -aclUserGroup* aclg_find_userGroup (char *n_dn); +aclUserGroup* aclg_find_userGroup (const char *n_dn); void aclg_regen_ugroup_signature( aclUserGroup *ugroup); void aclg_markUgroupForRemoval ( aclUserGroup *u_group ); void aclg_reader_incr_ugroup_refcnt(aclUserGroup* u_group); diff --git a/ldap/servers/plugins/acl/aclgroup.c b/ldap/servers/plugins/acl/aclgroup.c index c69429383..2231304b4 100644 --- a/ldap/servers/plugins/acl/aclgroup.c +++ b/ldap/servers/plugins/acl/aclgroup.c @@ -213,7 +213,7 @@ aclg_reset_userGroup ( struct acl_pblock *aclpb ) */ aclUserGroup* -aclg_find_userGroup(char *n_dn) +aclg_find_userGroup(const char *n_dn) { aclUserGroup *u_group = NULL; int i; diff --git a/ldap/servers/plugins/acl/acllist.c b/ldap/servers/plugins/acl/acllist.c index 9b5363a80..e8198af37 100644 --- a/ldap/servers/plugins/acl/acllist.c +++ b/ldap/servers/plugins/acl/acllist.c @@ -600,7 +600,6 @@ void acllist_init_scan (Slapi_PBlock *pb, int scope, const char *base) { Acl_PBlock *aclpb; - int i; AciContainer *root; char *basedn = NULL; int index; @@ -671,11 +670,6 @@ acllist_init_scan (Slapi_PBlock *pb, int scope, const char *base) aclpb->aclpb_state &= ~ACLPB_SEARCH_BASED_ON_LIST ; acllist_acicache_READ_UNLOCK(); - - i = 0; - while ( i < aclpb_max_selected_acls && aclpb->aclpb_base_handles_index[i] != -1 ) { - i++; - } } /* @@ -893,34 +887,50 @@ acllist_acicache_WRITE_LOCK( ) int acllist_moddn_aci_needsLock ( Slapi_DN *oldsdn, char *newdn ) { - - AciContainer *aciListHead; AciContainer *head; + aci_t *acip; + const char *oldndn; /* first get the container */ aciListHead = acllist_get_aciContainer_new ( ); slapi_sdn_free(&aciListHead->acic_sdn); - aciListHead->acic_sdn = oldsdn; - + aciListHead->acic_sdn = oldsdn; if ( NULL == (head = (AciContainer *) avl_find( acllistRoot, aciListHead, - (IFP) __acllist_aciContainer_node_cmp ) ) ) { + (IFP) __acllist_aciContainer_node_cmp ) ) ) { slapi_log_error ( SLAPI_PLUGIN_ACL, plugin_name, - "Can't find the acl in the tree for moddn operation:olddn%s\n", - slapi_sdn_get_ndn ( oldsdn )); + "Can't find the acl in the tree for moddn operation:olddn%s\n", + slapi_sdn_get_ndn ( oldsdn )); aciListHead->acic_sdn = NULL; __acllist_free_aciContainer ( &aciListHead ); - return 1; + return 1; } - - /* Now set the new DN */ - slapi_sdn_done ( head->acic_sdn ); - slapi_sdn_set_normdn_byval ( head->acic_sdn, newdn ); - + /* Now set the new DN */ + slapi_sdn_set_normdn_byval(head->acic_sdn, newdn); + + /* If necessary, reset the target DNs, as well. */ + oldndn = slapi_sdn_get_ndn(oldsdn); + for (acip = head->acic_list; acip; acip = acip->aci_next) { + const char *ndn = slapi_sdn_get_ndn(acip->aci_sdn); + char *p = PL_strstr(ndn, oldndn); + if (p) { + if (p == ndn) { + /* target dn is identical, replace it with new DN*/ + slapi_sdn_set_normdn_byval(acip->aci_sdn, newdn); + } else { + /* target dn is a descendent of olddn, merge it with new DN*/ + char *mynewdn; + *p = '\0'; + mynewdn = slapi_ch_smprintf("%s%s", ndn, newdn); + slapi_sdn_set_normdn_passin(acip->aci_sdn, mynewdn); + } + } + } + aciListHead->acic_sdn = NULL; __acllist_free_aciContainer ( &aciListHead ); diff --git a/ldap/servers/slapd/dn.c b/ldap/servers/slapd/dn.c index 11e56a9e9..b79d0f270 100644 --- a/ldap/servers/slapd/dn.c +++ b/ldap/servers/slapd/dn.c @@ -2097,7 +2097,7 @@ slapi_sdn_set_normdn_byval(Slapi_DN *sdn, const char *normdn) slapi_sdn_done(sdn); sdn->flag = slapi_setbit_uchar(sdn->flag, FLAG_DN); if(normdn == NULL) { - sdn->dn = slapi_ch_strdup(normdn); + sdn->dn = NULL; sdn->ndn_len = 0; } else { sdn->dn = slapi_ch_strdup(normdn); diff --git a/ldap/servers/slapd/plugin_acl.c b/ldap/servers/slapd/plugin_acl.c index b8781560b..3bc3f21e1 100644 --- a/ldap/servers/slapd/plugin_acl.c +++ b/ldap/servers/slapd/plugin_acl.c @@ -134,11 +134,10 @@ int plugin_call_acl_mods_update ( Slapi_PBlock *pb, int optype ) { struct slapdplugin *p; - char *dn; int rc = 0; - void *change = NULL; - Slapi_Entry *te = NULL; - Slapi_DN *sdn = NULL; + void *change = NULL; + Slapi_Entry *te = NULL; + Slapi_DN *sdn = NULL; Operation *operation; slapi_pblock_get (pb, SLAPI_OPERATION, &operation); @@ -146,7 +145,7 @@ plugin_call_acl_mods_update ( Slapi_PBlock *pb, int optype ) (void)slapi_pblock_get( pb, SLAPI_TARGET_SDN, &sdn ); switch ( optype ) { - case SLAPI_OPERATION_MODIFY: + case SLAPI_OPERATION_MODIFY: (void)slapi_pblock_get( pb, SLAPI_MODIFY_MODS, &change ); break; case SLAPI_OPERATION_ADD: @@ -158,11 +157,27 @@ plugin_call_acl_mods_update ( Slapi_PBlock *pb, int optype ) } break; case SLAPI_OPERATION_MODRDN: + { + void *mychange[2]; + char *newrdn = NULL; + Slapi_DN *psdn = NULL; + char *pdn = NULL; + /* newrdn: "change" is normalized but not case-ignored */ /* The acl plugin expects normalized newrdn, but no need to be case- * ignored. */ - (void)slapi_pblock_get( pb, SLAPI_MODRDN_NEWRDN, &change ); + (void)slapi_pblock_get( pb, SLAPI_MODRDN_NEWRDN, &newrdn ); + (void)slapi_pblock_get( pb, SLAPI_MODRDN_NEWSUPERIOR_SDN, &psdn ); + if (psdn) { + pdn = (char *)slapi_sdn_get_dn(psdn); + } else { + (void)slapi_pblock_get( pb, SLAPI_MODRDN_NEWSUPERIOR, &pdn ); + } + mychange[0] = newrdn; + mychange[1] = pdn; + change = mychange; break; + } } if (NULL == sdn) { @@ -172,10 +187,9 @@ plugin_call_acl_mods_update ( Slapi_PBlock *pb, int optype ) } /* call the global plugins first and then the backend specific */ - dn = (char*)slapi_sdn_get_ndn(sdn); /* jcm - Had to cast away const */ for ( p = get_plugin_list(PLUGIN_LIST_ACL); p != NULL; p = p->plg_next ) { if (plugin_invoke_plugin_sdn(p, SLAPI_PLUGIN_ACL_MODS_UPDATE, pb, sdn)){ - rc = (*p->plg_acl_mods_update)(pb, optype, dn, change ); + rc = (*p->plg_acl_mods_update)(pb, optype, sdn, change ); if ( rc != LDAP_SUCCESS ) break; } }
0
4b41a02484db645a593b9d6ac6c4e062dd374395
389ds/389-ds-base
Ticket 49388 - repl-monitor - matches null string many times in regex Bug Description: When using a wildcard(*) for the hostname, some of the regex's for parsing the various configurations throws out warnings. Fix Description: When a wildcard is detected reset the hostnode variable to nothing. https://pagure.io/389-ds-base/issue/49388 Reviewed by: firstyear(Thanks!)
commit 4b41a02484db645a593b9d6ac6c4e062dd374395 Author: Mark Reynolds <[email protected]> Date: Mon Oct 2 16:19:47 2017 -0400 Ticket 49388 - repl-monitor - matches null string many times in regex Bug Description: When using a wildcard(*) for the hostname, some of the regex's for parsing the various configurations throws out warnings. Fix Description: When a wildcard is detected reset the hostnode variable to nothing. https://pagure.io/389-ds-base/issue/49388 Reviewed by: firstyear(Thanks!) diff --git a/ldap/admin/src/scripts/repl-monitor.pl.in b/ldap/admin/src/scripts/repl-monitor.pl.in index a3efa8e6e..97c1462a5 100755 --- a/ldap/admin/src/scripts/repl-monitor.pl.in +++ b/ldap/admin/src/scripts/repl-monitor.pl.in @@ -1053,6 +1053,10 @@ sub add_server # Remove the domain name from the host name my ($hostnode) = $host; $hostnode = $1 if $host =~ /^(.+?)\./; + if ($hostnode eq "*") { + # handle wild card correctly for regex + $hostnode = ""; + } # new host:port if (!$binddn || $binddn eq "" || $binddn eq "*" ||
0
df1abddb5c8e2253fc601ed242a87e6165e6912c
389ds/389-ds-base
Issue 5170 - RFE - improve filter logging to assist debugging (#5301) Bug Description: To help with this issue, improve filter logging for future reports Fix Description: Improve the logging. fixes: https://github.com/389ds/389-ds-base/issues/5170 Author: William Brown <[email protected]> Review by: @progier389
commit df1abddb5c8e2253fc601ed242a87e6165e6912c Author: Firstyear <[email protected]> Date: Thu May 19 09:54:38 2022 +1000 Issue 5170 - RFE - improve filter logging to assist debugging (#5301) Bug Description: To help with this issue, improve filter logging for future reports Fix Description: Improve the logging. fixes: https://github.com/389ds/389-ds-base/issues/5170 Author: William Brown <[email protected]> Review by: @progier389 diff --git a/ldap/servers/slapd/back-ldbm/filterindex.c b/ldap/servers/slapd/back-ldbm/filterindex.c index 7c1cf5bae..2d19bb8cf 100644 --- a/ldap/servers/slapd/back-ldbm/filterindex.c +++ b/ldap/servers/slapd/back-ldbm/filterindex.c @@ -185,10 +185,10 @@ ava_candidates( Operation *pb_op; Connection *pb_conn; - slapi_log_err(SLAPI_LOG_TRACE, "ava_candidates", "=>\n"); + slapi_log_err(SLAPI_LOG_FILTER, "ava_candidates", "=>\n"); if (slapi_filter_get_ava(f, &type, &bval) != 0) { - slapi_log_err(SLAPI_LOG_TRACE, "ava_candidates", "slapi_filter_get_ava failed\n"); + slapi_log_err(SLAPI_LOG_FILTER, "ava_candidates", "slapi_filter_get_ava failed\n"); return (NULL); } @@ -197,8 +197,7 @@ ava_candidates( slapi_pblock_get(pb, SLAPI_CONNECTION, &pb_conn); slapi_attr_init(&sattr, type); -#ifdef LDAP_ERROR_LOGGING - if (loglevel_is_set(LDAP_DEBUG_TRACE)) { + if (slapi_is_loglevel_set(SLAPI_LOG_FILTER)) { char *op = NULL; char buf[BUFSIZ]; @@ -216,10 +215,8 @@ ava_candidates( op = "~="; break; } - slapi_log_err(SLAPI_LOG_TRACE, "ava_candidates", " %s%s%s\n", type, op, - encode(bval, buf)); + slapi_log_err(SLAPI_LOG_FILTER, "ava_candidates", " %s%s%s\n", type, op, encode(bval, buf)); } -#endif switch (ftype) { case LDAP_FILTER_GE: @@ -229,14 +226,16 @@ ava_candidates( * is on strict, we reject in search.c, if we ar off, the flag will NOT * be set on the filter at all! */ + slapi_log_err(SLAPI_LOG_FILTER, "ava_candidates", "WARNING - filter contains an INVALID attribute!\n"); slapi_pblock_set_flag_operation_notes(pb, SLAPI_OP_NOTE_FILTER_INVALID); } if (f->f_flags & SLAPI_FILTER_INVALID_ATTR_UNDEFINE) { + slapi_log_err(SLAPI_LOG_FILTER, "ava_candidates", "REJECTING invalid filter per policy!\n"); idl = idl_alloc(0); } else { idl = range_candidates(pb, be, type, bval, NULL, err, &sattr, allidslimit); } - slapi_log_err(SLAPI_LOG_TRACE, "ava_candidates", "<= %lu\n", + slapi_log_err(SLAPI_LOG_FILTER, "ava_candidates", "<= idl len %lu\n", (u_long)IDL_NIDS(idl)); goto done; break; @@ -247,14 +246,16 @@ ava_candidates( * is on strict, we reject in search.c, if we ar off, the flag will NOT * be set on the filter at all! */ + slapi_log_err(SLAPI_LOG_FILTER, "ava_candidates", "WARNING - filter contains an INVALID attribute!\n"); slapi_pblock_set_flag_operation_notes(pb, SLAPI_OP_NOTE_FILTER_INVALID); } if (f->f_flags & SLAPI_FILTER_INVALID_ATTR_UNDEFINE) { + slapi_log_err(SLAPI_LOG_FILTER, "ava_candidates", "REJECTING invalid filter per policy!\n"); idl = idl_alloc(0); } else { idl = range_candidates(pb, be, type, NULL, bval, err, &sattr, allidslimit); } - slapi_log_err(SLAPI_LOG_TRACE, "ava_candidates", "<= %lu\n", + slapi_log_err(SLAPI_LOG_FILTER, "ava_candidates", "<= idl len %lu\n", (u_long)IDL_NIDS(idl)); goto done; break; @@ -265,7 +266,7 @@ ava_candidates( indextype = (char *)indextype_APPROX; break; default: - slapi_log_err(SLAPI_LOG_TRACE, "ava_candidates", "<= invalid filter\n"); + slapi_log_err(SLAPI_LOG_FILTER, "ava_candidates", "<= invalid filter\n"); goto done; break; } @@ -303,9 +304,11 @@ ava_candidates( * is on strict, we reject in search.c, if we ar off, the flag will NOT * be set on the filter at all! */ + slapi_log_err(SLAPI_LOG_FILTER, "ava_candidates", "WARNING - filter contains an INVALID attribute!\n"); slapi_pblock_set_flag_operation_notes(pb, SLAPI_OP_NOTE_FILTER_INVALID); } if (f->f_flags & SLAPI_FILTER_INVALID_ATTR_UNDEFINE) { + slapi_log_err(SLAPI_LOG_FILTER, "ava_candidates", "REJECTING invalid filter per policy!\n"); idl = idl_alloc(0); } else { slapi_attr_assertion2keys_ava_sv(&sattr, &tmp, (Slapi_Value ***)&ivals, LDAP_FILTER_EQUALITY_FAST); @@ -338,9 +341,11 @@ ava_candidates( * is on strict, we reject in search.c, if we ar off, the flag will NOT * be set on the filter at all! */ + slapi_log_err(SLAPI_LOG_FILTER, "ava_candidates", "WARNING - filter contains an INVALID attribute!\n"); slapi_pblock_set_flag_operation_notes(pb, SLAPI_OP_NOTE_FILTER_INVALID); } if (f->f_flags & SLAPI_FILTER_INVALID_ATTR_UNDEFINE) { + slapi_log_err(SLAPI_LOG_FILTER, "ava_candidates", "REJECTING invalid filter per policy!\n"); idl = idl_alloc(0); } else { slapi_value_init_berval(&sv, bval); @@ -894,6 +899,7 @@ list_candidates( * and allids - we should not process anymore, and fallback to full * table scan at this point. */ + slapi_log_err(SLAPI_LOG_TRACE, "list_candidates", "OR shortcut condition - must apply filter test\n"); sr->sr_flags |= SR_FLAG_MUST_APPLY_FILTER_TEST; goto apply_set_op; } @@ -903,6 +909,7 @@ list_candidates( * If we encounter a zero length idl, we bail now because this can never * result in a meaningful result besides zero. */ + slapi_log_err(SLAPI_LOG_TRACE, "list_candidates", "AND shortcut condition - must apply filter test\n"); sr->sr_flags |= SR_FLAG_MUST_APPLY_FILTER_TEST; goto apply_set_op; } @@ -932,8 +939,7 @@ apply_set_op: idl = idl_set_intersect(idl_set, be); } - slapi_log_err(SLAPI_LOG_TRACE, "list_candidates", "<= %lu\n", - (u_long)IDL_NIDS(idl)); + slapi_log_err(SLAPI_LOG_TRACE, "list_candidates", "<= idl len %lu\n", (u_long)IDL_NIDS(idl)); out: idl_set_destroy(idl_set); if (is_and) {
0
1bbef77aab28c2f6d89de810811b08267d0bb542
389ds/389-ds-base
Issue 4861 - Improve instructions in custom.conf for memory leak detection Description: Extend instructions in /usr/lib/systemd/system/[email protected]/custom.conf to provide guides on how to use valgrind and AddressSanitizer. Fixes: https://github.com/389ds/389-ds-base/issues/4861 Reviewed by: @mreynolds389 (Thanks!)
commit 1bbef77aab28c2f6d89de810811b08267d0bb542 Author: Viktor Ashirov <[email protected]> Date: Thu Aug 5 23:05:51 2021 +0200 Issue 4861 - Improve instructions in custom.conf for memory leak detection Description: Extend instructions in /usr/lib/systemd/system/[email protected]/custom.conf to provide guides on how to use valgrind and AddressSanitizer. Fixes: https://github.com/389ds/389-ds-base/issues/4861 Reviewed by: @mreynolds389 (Thanks!) diff --git a/wrappers/systemd.template.service.custom.conf.in b/wrappers/systemd.template.service.custom.conf.in index 1c9241095..884bc0e83 100644 --- a/wrappers/systemd.template.service.custom.conf.in +++ b/wrappers/systemd.template.service.custom.conf.in @@ -52,6 +52,66 @@ TimeoutStopSec=600 # Preload jemalloc Environment=LD_PRELOAD=@libdir@/@package_name@/lib/libjemalloc.so.2 -# Uncomment to enable leak checking using jemalloc's heap profiler -# https://github.com/jemalloc/jemalloc/wiki/Use-Case%3A-Leak-Checking -#Environment=MALLOC_CONF=prof_leak:true,lg_prof_sample:0,prof_final:true,prof_prefix:/var/run/dirsrv/jeprof +################################################## +# Heap profiling with jemalloc # +################################################## +# Generated files will be named /run/dirsrv/jeprof*.heap +# Uncomment *one* of the following lines to enable leak checking using jemalloc's heap profiler. +# See https://github.com/jemalloc/jemalloc/wiki/Use-Case%3A-Leak-Checking for more details. +#Environment=MALLOC_CONF=prof:true,prof_leak:true,lg_prof_sample:19,prof_final:true,prof_prefix:/run/dirsrv/jeprof +# +# +# Additionally print stats in a human readable form: +#Environment=MALLOC_CONF=prof:true,prof_leak:true,lg_prof_sample:19,prof_final:true,stats_print:true,prof_prefix:/run/dirsrv/jeprof +# +# +# Or in a machine readable form (JSON) +#Environment=MALLOC_CONF=prof:true,prof_leak:true,lg_prof_sample:19,prof_final:true,stats_print:true,stats_print_opts:J,prof_prefix:/run/dirsrv/jeprof +# +# +################################################## +# Leak detection with Valgrind # +################################################## +# Generated files will be named /run/dirsrv/ns-slapd-INSTANCE_NAME.valgrind.PID +# Make sure valgrind is installed and debuginfo is present for 389-ds-base and 389-ds-base-libs. +# E.g. on Fedora/RHEL: +# # dnf install valgrind -y +# # debuginfo-install 389-ds-base 389-ds-base-libs -y +# +# Uncomment the following lines. Empty keys reset their values so we can override them. +#TimeoutStartSec=3600 +#TimeoutStopSec=3600 +#Environment= +#ExecStartPre= +#ExecStart= +#ExecStart=/usr/bin/valgrind --tool=memcheck --num-callers=40 --leak-check=full --show-leak-kinds=all --track-origins=yes --log-file=/run/dirsrv/ns-slapd-%i.valgrind.%%p /usr/sbin/ns-slapd -D /etc/dirsrv/slapd-%i -i /run/dirsrv/slapd-%i.pid +# +# +################################################## +# Leak detection with AddressSanitizer # +################################################## +# Generated files will be named /run/dirsrv/ns-slapd-INSTANCE_NAME.asan.PID +# Make sure libasan is installed and debuginfo is present for 389-ds-base and 389-ds-base-libs. +# E.g. on Fedora/RHEL: +# # dnf install libasan -y +# # debuginfo-install 389-ds-base 389-ds-base-libs -y +# +# To get the exact library name to use with LD_PRELOAD, run +# # rpm -ql libasan | grep libasan +# +# On versions of systemd=>246 you also need to ensure that `sysctl fs.suid_dumpable` is set to 1. +# (add fs.suid_dumpable=1 to /etc/sysctl.d/99-sysctl.conf and run `sysctl -p`) +# +# You also might need to temporary disable SELinux: +# # setenforce 0 +# Don't forget to enable it back after you're done! +# # setenforce 1 +# or create a custom SELinux policy to allow ptrace() for ns-slapd process. +# +# Uncomment the following lines. Empty keys reset their values so we can override them. +#TimeoutStartSec=3600 +#TimeoutStopSec=3600 +#Environment= +#ExecStartPre= +#Environment=LD_PRELOAD=/usr/lib64/libasan.so.6 +#Environment=ASAN_OPTIONS=log_path=/run/dirsrv/ns-slapd-%i.asan:print_stacktrace=1:detect_leaks=1:exit_code=0:fast_unwind_on_malloc=0
0
0be073f9d70d0d88127d550a24f5c735a8c04253
389ds/389-ds-base
Ticket #48784 - Make the SSL version set to the client library configurable. Description: The value to set to LDAP_OPT_X_TLS_PROTOCOL_MIN is hardcoded: optval = LDAP_OPT_X_TLS_PROTOCOL_SSL3; ldap_set_option(ld, LDAP_OPT_X_TLS_PROTOCOL_MIN, &optval); Changing the code to retrieve the supported SSL min version and set it. https://fedorahosted.org/389/ticket/48784 Reviewed by [email protected] (Thank you, William!)
commit 0be073f9d70d0d88127d550a24f5c735a8c04253 Author: Noriko Hosoi <[email protected]> Date: Fri Apr 1 09:52:17 2016 -0700 Ticket #48784 - Make the SSL version set to the client library configurable. Description: The value to set to LDAP_OPT_X_TLS_PROTOCOL_MIN is hardcoded: optval = LDAP_OPT_X_TLS_PROTOCOL_SSL3; ldap_set_option(ld, LDAP_OPT_X_TLS_PROTOCOL_MIN, &optval); Changing the code to retrieve the supported SSL min version and set it. https://fedorahosted.org/389/ticket/48784 Reviewed by [email protected] (Thank you, William!) diff --git a/ldap/servers/slapd/ldaputil.c b/ldap/servers/slapd/ldaputil.c index 3851be5c8..8a54cb9c2 100644 --- a/ldap/servers/slapd/ldaputil.c +++ b/ldap/servers/slapd/ldaputil.c @@ -70,6 +70,11 @@ static PRCallOnceType ol_init_callOnce = {0,0}; static PRLock *ol_init_lock = NULL; +#if defined(USE_OPENLDAP) +extern void getSSLVersionRangeOL(int *min, int *max); +extern int getSSLVersionRange(char **min, char **max); +#endif + static PRStatus internal_ol_init_init(void) { @@ -572,35 +577,38 @@ setup_ol_tls_conn(LDAP *ld, int clientauth) int rc = 0; if (config_get_ssl_check_hostname()) { - ssl_strength = LDAP_OPT_X_TLS_HARD; + ssl_strength = LDAP_OPT_X_TLS_HARD; } else { - /* verify certificate only */ - ssl_strength = LDAP_OPT_X_TLS_NEVER; + /* verify certificate only */ + ssl_strength = LDAP_OPT_X_TLS_NEVER; } if ((rc = ldap_set_option(ld, LDAP_OPT_X_TLS_REQUIRE_CERT, &ssl_strength))) { - slapi_log_error(SLAPI_LOG_FATAL, "setup_ol_tls_conn", - "failed: unable to set REQUIRE_CERT option to %d\n", ssl_strength); + slapi_log_error(SLAPI_LOG_FATAL, "setup_ol_tls_conn", + "failed: unable to set REQUIRE_CERT option to %d\n", ssl_strength); } /* tell it where our cert db is */ if ((rc = ldap_set_option(ld, LDAP_OPT_X_TLS_CACERTDIR, certdir))) { - slapi_log_error(SLAPI_LOG_FATAL, "setup_ol_tls_conn", - "failed: unable to set CACERTDIR option to %s\n", certdir); + slapi_log_error(SLAPI_LOG_FATAL, "setup_ol_tls_conn", + "failed: unable to set CACERTDIR option to %s\n", certdir); } slapi_ch_free_string(&certdir); #if defined(LDAP_OPT_X_TLS_PROTOCOL_MIN) - optval = LDAP_OPT_X_TLS_PROTOCOL_SSL3; + getSSLVersionRangeOL(&optval, NULL); if ((rc = ldap_set_option(ld, LDAP_OPT_X_TLS_PROTOCOL_MIN, &optval))) { - slapi_log_error(SLAPI_LOG_FATAL, "setup_ol_tls_conn", - "failed: unable to set minimum TLS protocol level to SSL3\n"); + char *minstr = NULL; + (void)getSSLVersionRange(&minstr, NULL); + slapi_log_error(SLAPI_LOG_FATAL, "setup_ol_tls_conn", + "failed: unable to set minimum TLS protocol level to %s\n", minstr); + slapi_ch_free_string(&minstr); } #endif /* LDAP_OPT_X_TLS_PROTOCOL_MIN */ if (clientauth) { - rc = slapd_SSL_client_auth(ld); - if (rc) { - slapi_log_error(SLAPI_LOG_FATAL, "setup_ol_tls_conn", - "failed: unable to setup connection for TLS/SSL EXTERNAL client cert authentication - %d\n", rc); - } + rc = slapd_SSL_client_auth(ld); + if (rc) { + slapi_log_error(SLAPI_LOG_FATAL, "setup_ol_tls_conn", + "failed: unable to setup connection for TLS/SSL EXTERNAL client cert authentication - %d\n", rc); + } } /* have to do this last - this creates the new TLS handle and sets/copies @@ -608,8 +616,8 @@ setup_ol_tls_conn(LDAP *ld, int clientauth) that optval is zero, meaning create a context for a client */ optval = 0; if ((rc = ldap_set_option(ld, LDAP_OPT_X_TLS_NEWCTX, &optval))) { - slapi_log_error(SLAPI_LOG_FATAL, "setup_ol_tls_conn", - "failed: unable to create new TLS context - %d\n", rc); + slapi_log_error(SLAPI_LOG_FATAL, "setup_ol_tls_conn", + "failed: unable to create new TLS context - %d\n", rc); } return rc; diff --git a/ldap/servers/slapd/ssl.c b/ldap/servers/slapd/ssl.c index 38efc73d0..544c9bcf3 100644 --- a/ldap/servers/slapd/ssl.c +++ b/ldap/servers/slapd/ssl.c @@ -380,21 +380,100 @@ getSSLVersionInfo(int *ssl2, int *ssl3, int *tls1) int getSSLVersionRange(char **min, char **max) { - if (!slapd_ssl_listener_is_initialized()) { + if (!min && !max) { return -1; } - if ((NULL == min) || (NULL == max)) { + if (!slapd_ssl_listener_is_initialized()) { + if (min) { + *min = slapi_getSSLVersion_str(LDAP_OPT_X_TLS_PROTOCOL_TLS1_0, NULL, 0); + } + if (max) { + *max = slapi_getSSLVersion_str(LDAP_OPT_X_TLS_PROTOCOL_TLS1_2, NULL, 0); + } return -1; } #if defined(NSS_TLS10) return -1; /* not supported */ #else /* NSS_TLS11 or newer */ - *min = slapi_getSSLVersion_str(slapdNSSVersions.min, NULL, 0); - *max = slapi_getSSLVersion_str(slapdNSSVersions.max, NULL, 0); + if (min) { + *min = slapi_getSSLVersion_str(slapdNSSVersions.min, NULL, 0); + } + if (max) { + *max = slapi_getSSLVersion_str(slapdNSSVersions.max, NULL, 0); + } return 0; #endif } +#if defined(USE_OPENLDAP) +void +getSSLVersionRangeOL(int *min, int *max) +{ + /* default range values */ + if (min) { + *min = LDAP_OPT_X_TLS_PROTOCOL_TLS1_0; + } + if (max) { + *max = LDAP_OPT_X_TLS_PROTOCOL_TLS1_2; + } + if (!slapd_ssl_listener_is_initialized()) { + return; + } +#if defined(NSS_TLS10) + *max = LDAP_OPT_X_TLS_PROTOCOL_TLS1_0; + return; +#else /* NSS_TLS11 or newer */ + if (min) { + switch (slapdNSSVersions.min) { + case SSL_LIBRARY_VERSION_3_0: + *min = LDAP_OPT_X_TLS_PROTOCOL_SSL3; + break; + case SSL_LIBRARY_VERSION_TLS_1_0: + *min = LDAP_OPT_X_TLS_PROTOCOL_TLS1_0; + break; + case SSL_LIBRARY_VERSION_TLS_1_1: + *min = LDAP_OPT_X_TLS_PROTOCOL_TLS1_1; + break; + case SSL_LIBRARY_VERSION_TLS_1_2: + *min = LDAP_OPT_X_TLS_PROTOCOL_TLS1_2; + break; + default: + if (slapdNSSVersions.min > SSL_LIBRARY_VERSION_TLS_1_2) { + *min = LDAP_OPT_X_TLS_PROTOCOL_TLS1_2 + 1; + } else { + *min = LDAP_OPT_X_TLS_PROTOCOL_SSL3; + } + break; + } + } + if (max) { + switch (slapdNSSVersions.max) { + case SSL_LIBRARY_VERSION_3_0: + *max = LDAP_OPT_X_TLS_PROTOCOL_SSL3; + break; + case SSL_LIBRARY_VERSION_TLS_1_0: + *max = LDAP_OPT_X_TLS_PROTOCOL_TLS1_0; + break; + case SSL_LIBRARY_VERSION_TLS_1_1: + *max = LDAP_OPT_X_TLS_PROTOCOL_TLS1_1; + break; + case SSL_LIBRARY_VERSION_TLS_1_2: + *max = LDAP_OPT_X_TLS_PROTOCOL_TLS1_2; + break; + default: + if (slapdNSSVersions.max > SSL_LIBRARY_VERSION_TLS_1_2) { + *max = LDAP_OPT_X_TLS_PROTOCOL_TLS1_2 + 1; + } else { + *max = LDAP_OPT_X_TLS_PROTOCOL_SSL3; + } + break; + } + } + return; +#endif +} +#endif /* USE_OPENLDAP */ + static void _conf_init_ciphers() {
0
f91b5826a57304011f7a027d05a20a0ed05bf3ea
389ds/389-ds-base
Ticket #48048 - Fix coverity issues - 2015/2/24 Coverity defect 13067 - Explicit null dereferenced (FORWARD_NULL) Description: Added NULL check for op. modified: cb_forward_operation in cb_utils.c
commit f91b5826a57304011f7a027d05a20a0ed05bf3ea Author: Noriko Hosoi <[email protected]> Date: Tue Feb 24 14:05:25 2015 -0800 Ticket #48048 - Fix coverity issues - 2015/2/24 Coverity defect 13067 - Explicit null dereferenced (FORWARD_NULL) Description: Added NULL check for op. modified: cb_forward_operation in cb_utils.c diff --git a/ldap/servers/plugins/chainingdb/cb_utils.c b/ldap/servers/plugins/chainingdb/cb_utils.c index f35943b16..5ab650991 100644 --- a/ldap/servers/plugins/chainingdb/cb_utils.c +++ b/ldap/servers/plugins/chainingdb/cb_utils.c @@ -127,21 +127,25 @@ struct berval ** referrals2berval(char ** referrals) { ** We also check max hop count for loop detection for both internal ** and external operations */ - -int cb_forward_operation(Slapi_PBlock * pb ) { - - Slapi_Operation *op=NULL; - Slapi_Backend *be; - struct slapi_componentid *cid = NULL; - char *pname; - cb_backend_instance *cb; - int retcode; - LDAPControl **ctrls=NULL; - - slapi_pblock_get (pb, SLAPI_OPERATION, &op); +int +cb_forward_operation(Slapi_PBlock * pb) +{ + Slapi_Operation *op = NULL; + Slapi_Backend *be; + struct slapi_componentid *cid = NULL; + char *pname; + cb_backend_instance *cb; + int retcode; + LDAPControl **ctrls=NULL; + + slapi_pblock_get (pb, SLAPI_OPERATION, &op); + if (NULL == op) { + slapi_log_error(SLAPI_LOG_PLUGIN, CB_PLUGIN_SUBSYSTEM, "No operation is set.\n"); + return LDAP_UNWILLING_TO_PERFORM; + } /* Loop detection */ - slapi_pblock_get( pb, SLAPI_REQCONTROLS, &ctrls ); + slapi_pblock_get( pb, SLAPI_REQCONTROLS, &ctrls ); if ( NULL != ctrls ) { struct berval *ctl_value=NULL;
0
db124a2711576de2dd0009528d60b2860e815210
389ds/389-ds-base
Ticket 47421 - memory leaks in set_krb5_creds Bug Description: Valgrind shows memory leaks in set_krb5_creds() Fix description: krb5_unparse_name() allocates/returns the principle name. But we can call this function twice without freeing the first returned value. https://fedorahosted.org/389/ticket/47421 Reviewed by: richm(Thanks!)
commit db124a2711576de2dd0009528d60b2860e815210 Author: Mark Reynolds <[email protected]> Date: Thu Jul 18 15:50:36 2013 -0400 Ticket 47421 - memory leaks in set_krb5_creds Bug Description: Valgrind shows memory leaks in set_krb5_creds() Fix description: krb5_unparse_name() allocates/returns the principle name. But we can call this function twice without freeing the first returned value. https://fedorahosted.org/389/ticket/47421 Reviewed by: richm(Thanks!) diff --git a/ldap/servers/slapd/ldaputil.c b/ldap/servers/slapd/ldaputil.c index ae07c4426..f9117671d 100644 --- a/ldap/servers/slapd/ldaputil.c +++ b/ldap/servers/slapd/ldaputil.c @@ -1950,6 +1950,7 @@ set_krb5_creds( } } + slapi_ch_free_string(&princ_name); if ((rc = krb5_unparse_name(ctx, princ, &princ_name))) { slapi_log_error(SLAPI_LOG_FATAL, logname, "Unable to get name of principal: "
0
43c6ff2e7801ff6bbc03961b3161dd60aebf707a
389ds/389-ds-base
Ticket #47835 - Coverity: 12687..12692 12687 - Unbounded source buffer Description: To solve "Passing string argv[0] of unknown size to usage, which expects a string of a particular size", get ARG_MAX and pass it to slapi_ch_strndup. Reviewed by [email protected] (Thanks, Rich!) https://fedorahosted.org/389/ticket/47835
commit 43c6ff2e7801ff6bbc03961b3161dd60aebf707a Author: Noriko Hosoi <[email protected]> Date: Tue Jul 1 11:35:37 2014 -0700 Ticket #47835 - Coverity: 12687..12692 12687 - Unbounded source buffer Description: To solve "Passing string argv[0] of unknown size to usage, which expects a string of a particular size", get ARG_MAX and pass it to slapi_ch_strndup. Reviewed by [email protected] (Thanks, Rich!) https://fedorahosted.org/389/ticket/47835 diff --git a/ldap/servers/slapd/tools/dbscan.c b/ldap/servers/slapd/tools/dbscan.c index 023fadea8..bbfcd0eb6 100644 --- a/ldap/servers/slapd/tools/dbscan.c +++ b/ldap/servers/slapd/tools/dbscan.c @@ -1077,16 +1077,17 @@ is_changelog(char *filename) static void usage(char *argv0) { - char *copy = strdup(argv0); + long arg_max = sysconf(_SC_ARG_MAX); + char *copy = strndup(argv0, arg_max); char *p0 = NULL, *p1 = NULL; - if (NULL != copy) { + if (copy && (strlen(copy) < arg_max)) { /* the full path is not needed in the usages */ - p0 = strrchr(argv0, '/'); - if (NULL != p0) { + p0 = strrchr(copy, '/'); + if (p0) { *p0 = '\0'; p0++; } else { - p0 = argv0; + p0 = copy; } p1 = strrchr(p0, '-'); /* get rid of -bin from the usage */ if (NULL != p1) { @@ -1124,6 +1125,9 @@ static void usage(char *argv0) printf(" # display summary of objectclass.db4\n"); printf(" %s -f objectclass.db4\n", p0); printf("\n"); + if (copy) { + free(copy); + } exit(1); }
0
938fb3478ba5c0f985f79d84876d643e9453d15c
389ds/389-ds-base
Ticket 50784 - performance testing scripts Bug Description: Everyone loves things to be fast, so to understand how to achieve that we need metrics and observability into the server to know what to change. Fix Description: This adds some python based test runnenrs able to setup and trigger ldclt against remote hosts. For those remove hosts, this adds support for them to have systemtap probes activated (--enable-systemtap), which can then be hooked by the scripts in profiling/stap/ to get histograms of function latency and timing. This also adds the needed debug info to use mutrace (http://0pointer.de/blog/projects/mutrace.html) to identify lock contention https://pagure.io/389-ds-base/issue/50784 Author: William Brown <[email protected]> Review by: tbordaz, mreynolds (Thanks!)
commit 938fb3478ba5c0f985f79d84876d643e9453d15c Author: William Brown <[email protected]> Date: Fri Feb 2 11:02:19 2018 +1000 Ticket 50784 - performance testing scripts Bug Description: Everyone loves things to be fast, so to understand how to achieve that we need metrics and observability into the server to know what to change. Fix Description: This adds some python based test runnenrs able to setup and trigger ldclt against remote hosts. For those remove hosts, this adds support for them to have systemtap probes activated (--enable-systemtap), which can then be hooked by the scripts in profiling/stap/ to get histograms of function latency and timing. This also adds the needed debug info to use mutrace (http://0pointer.de/blog/projects/mutrace.html) to identify lock contention https://pagure.io/389-ds-base/issue/50784 Author: William Brown <[email protected]> Review by: tbordaz, mreynolds (Thanks!) diff --git a/Makefile.am b/Makefile.am index 026687e4e..6409d0881 100644 --- a/Makefile.am +++ b/Makefile.am @@ -29,6 +29,7 @@ SYSTEMD_DEFINES = @systemd_defs@ CMOCKA_INCLUDES = $(CMOCKA_CFLAGS) PROFILING_DEFINES = @profiling_defs@ +SYSTEMTAP_DEFINES = @systemtap_defs@ NSPR_INCLUDES = $(NSPR_CFLAGS) # Rust inclusions. @@ -142,7 +143,7 @@ PATH_DEFINES = -DLOCALSTATEDIR="\"$(localstatedir)\"" -DSYSCONFDIR="\"$(sysconfd # Now that we have all our defines in place, setup the CPPFLAGS # These flags are the "must have" for all components -AM_CPPFLAGS = $(DEBUG_DEFINES) $(PROFILING_DEFINES) $(RUST_DEFINES) +AM_CPPFLAGS = $(DEBUG_DEFINES) $(PROFILING_DEFINES) $(SYSTEMTAP_DEFINES) $(RUST_DEFINES) AM_CFLAGS = $(DEBUG_CFLAGS) $(GCCSEC_CFLAGS) $(ASAN_CFLAGS) $(MSAN_CFLAGS) $(TSAN_CFLAGS) $(UBSAN_CFLAGS) AM_CXXFLAGS = $(DEBUG_CXXFLAGS) $(GCCSEC_CFLAGS) $(ASAN_CFLAGS) $(MSAN_CFLAGS) $(TSAN_CFLAGS) $(UBSAN_CFLAGS) # Flags for Directory Server diff --git a/configure.ac b/configure.ac index 982bae835..cf62bcff0 100644 --- a/configure.ac +++ b/configure.ac @@ -113,8 +113,8 @@ AC_ARG_ENABLE(debug, AS_HELP_STRING([--enable-debug], [Enable debug features (de AC_MSG_RESULT($enable_debug) if test "$enable_debug" = yes ; then debug_defs="-DDEBUG -DMCC_DEBUG" - debug_cflags="-g3 -O0" - debug_cxxflags="-g3 -O0" + debug_cflags="-g3 -O0 -rdynamic" + debug_cxxflags="-g3 -O0 -rdynamic" debug_rust_defs="-C debuginfo=2" cargo_defs="" rust_target_dir="debug" @@ -253,6 +253,18 @@ fi AC_SUBST([profiling_defs]) AC_SUBST([profiling_links]) +AC_MSG_CHECKING(for --enable-systemtap) +AC_ARG_ENABLE(systemtap, AS_HELP_STRING([--enable-systemtap], [Enable systemtap probe features (default: no)]), + [], [ enable_systemtap=no ]) +AC_MSG_RESULT($enable_systemtap) +if test "$enable_systemtap" = yes ; then + systemtap_defs="-DSYSTEMTAP" +else + systemtap_defs="" +fi +AC_SUBST([systemtap_defs]) + + # these enables are for optional or experimental features AC_MSG_CHECKING(for --enable-pam-passthru) AC_ARG_ENABLE(pam-passthru, diff --git a/dirsrvtests/tests/perf/search_performance_test.py b/dirsrvtests/tests/perf/search_performance_test.py new file mode 100644 index 000000000..bad54f4e3 --- /dev/null +++ b/dirsrvtests/tests/perf/search_performance_test.py @@ -0,0 +1,161 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2018 Red Hat, Inc. +# Copyright (C) 2019 William Brown <[email protected]> +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# + +# Performance tests look different to others, they require some extra +# environmental settings. + +import ldap +import os +from lib389 import DirSrv +from lib389._constants import DEFAULT_SUFFIX + +from lib389.topologies import topology_st as topology + +from lib389.idm.domain import Domain +from lib389.idm.group import Groups +from lib389.idm.user import nsUserAccounts +from lib389.backend import Backends + +from lib389.ldclt import Ldclt +import time + +# We want to write a CSV such as: +# category,1 thread,4 thread,8 thread,16 thread +# testcategory,500,800,1000,2000 +# testcategory2,500,800,1000,2000 +TEST_MARKER = 'configured: search_performance_test.py' +# GROUP_MAX = 4000 +# USER_MAX = 6000 + +GROUP_MAX = 4000 +USER_MAX = 6000 + +TARGET_HOST = os.environ.get('PERF_TARGET_HOST', 'localhost') +TARGET_PORT = os.environ.get('PERF_TARGET_PORT', '389') + +def assert_data_present(inst): + # Do we have the backend marker? + d = Domain(inst, DEFAULT_SUFFIX) + try: + desc = d.get_attr_val_utf8('description') + if desc == TEST_MARKER: + return + except: + # Just reset everything. + pass + # Reset the backends + bes = Backends(inst) + try: + be = bes.get(DEFAULT_SUFFIX) + be.delete() + except: + pass + + be = bes.create(properties={ + 'nsslapd-suffix': DEFAULT_SUFFIX, + 'cn': 'userRoot', + }) + be.create_sample_entries('001004002') + + # Load our data + # We can't use dbgen as that relies on local access :( + + # Add 40,000 groups + groups = Groups(inst, DEFAULT_SUFFIX) + for i in range(1,GROUP_MAX): + rdn = 'group_{0:07d}'.format(i) + groups.create(properties={ + 'cn': rdn, + }) + + # Add 60,000 users + users = nsUserAccounts(inst, DEFAULT_SUFFIX) + for i in range(1,USER_MAX): + rdn = 'user_{0:07d}'.format(i) + users.create(properties={ + 'uid': rdn, + 'cn': rdn, + 'displayName': rdn, + 'uidNumber' : '%s' % i, + 'gidNumber' : '%s' % i, + 'homeDirectory' : '/home/%s' % rdn, + 'userPassword': rdn, + }) + + # Add the marker + d.replace('description', TEST_MARKER) + # Done! + +# Single uid +# 1000 uid +# 4000 uid +# 5000 uid +# 10,000 uid + +# & of single uid +# & of two 1000 uid sets +# & of two 4000 uid sets +# & of two 5000 uid sets +# & of two 10,000 uid sets + +# | of single uid +# | of two 1000 uid sets +# | of two 4000 uid sets +# | of two 5000 uid sets +# | of two 10,000 uid sets + +# & of user and group + +# | of user and group + +def _do_search_performance(inst, thread_count): + # Configure thread count + # Restart + print("Configuring with %s threads ..." % thread_count) + time.sleep(1) + inst.config.set('nsslapd-threadnumber', str(thread_count)) + inst.restart() + ld = Ldclt(inst) + out = ld.search_loadtest(DEFAULT_SUFFIX, "(uid=user_XXXXXXX)", min=1, max=USER_MAX) + return out + +# Need a check here +def test_user_search_performance(): + inst = DirSrv(verbose=True) + inst.remote_simple_allocate( + f"ldaps://{TARGET_HOST}", + password="password" + ) + # Need a better way to set this. + inst.host = TARGET_HOST + inst.port = TARGET_PORT + inst.open(reqcert=ldap.OPT_X_TLS_NEVER) + assert_data_present(inst) + r1 = _do_search_performance(inst, 1) + # r2 = _do_search_performance(inst, 4) + # r3 = _do_search_performance(inst, 6) + # r4 = _do_search_performance(inst, 8) + # r5 = _do_search_performance(inst, 12) + r6 = _do_search_performance(inst, 16) + # print("category,t1,t4,t6,t8,t12,t16") + # print("search,%s,%s,%s,%s,%s,%s" % (r1, r2, r3, r4, r5, r6)) + +def test_group_search_performance(): + pass + +## TODO +# Tweak cache levels +# turbo mode +# ldclt threads = 2x server? +# add perf logs to each test + + + + diff --git a/ldap/servers/slapd/log.c b/ldap/servers/slapd/log.c index bfcf57475..8c62a4b22 100644 --- a/ldap/servers/slapd/log.c +++ b/ldap/servers/slapd/log.c @@ -31,6 +31,10 @@ #include <pwd.h> /* getpwnam */ #define _PSEP '/' +#ifdef SYSTEMTAP +#include <sys/sdt.h> +#endif + /************************************************************************** * GLOBALS, defines, and ... *************************************************************************/ @@ -2556,6 +2560,10 @@ vslapd_log_access(char *fmt, va_list ap) int32_t rc = LDAP_SUCCESS; time_t tnl; +#ifdef SYSTEMTAP + STAP_PROBE(ns-slapd, vslapd_log_access__entry); +#endif + /* We do this sooner, because that we we can use the message in other calls */ if ((vlen = vsnprintf(vbuf, SLAPI_LOG_BUFSIZ, fmt, ap)) == -1) { log__error_emergency("vslapd_log_access, Unable to format message", 1, 0); @@ -2602,8 +2610,16 @@ vslapd_log_access(char *fmt, va_list ap) rc = -1; } +#ifdef SYSTEMTAP + STAP_PROBE(ns-slapd, vslapd_log_access__prepared); +#endif + log_append_buffer2(tnl, loginfo.log_access_buffer, buffer, blen, vbuf, vlen); +#ifdef SYSTEMTAP + STAP_PROBE(ns-slapd, vslapd_log_access__buffer); +#endif + return (rc); } diff --git a/ldap/servers/slapd/opshared.c b/ldap/servers/slapd/opshared.c index d06d49b0f..9fe78655c 100644 --- a/ldap/servers/slapd/opshared.c +++ b/ldap/servers/slapd/opshared.c @@ -16,6 +16,10 @@ #include "log.h" #include "slap.h" +#ifdef SYSTEMTAP +#include <sys/sdt.h> +#endif + #define PAGEDRESULTS_PAGE_END 1 #define PAGEDRESULTS_SEARCH_END 2 @@ -271,6 +275,10 @@ op_shared_search(Slapi_PBlock *pb, int send_result) be_list[0] = NULL; referral_list[0] = NULL; +#ifdef SYSTEMTAP + STAP_PROBE(ns-slapd, op_shared_search__entry); +#endif + /* get search parameters */ slapi_pblock_get(pb, SLAPI_ORIGINAL_TARGET_DN, &base); slapi_pblock_get(pb, SLAPI_SEARCH_TARGET_SDN, &sdn); @@ -636,6 +644,10 @@ op_shared_search(Slapi_PBlock *pb, int send_result) } } +#ifdef SYSTEMTAP + STAP_PROBE(ns-slapd, op_shared_search__prepared); +#endif + nentries = 0; rc = -1; /* zero backends would mean failure */ while (be) { @@ -940,6 +952,10 @@ op_shared_search(Slapi_PBlock *pb, int send_result) be = next_be; /* this be won't be used for PAGED_RESULTS */ } +#ifdef SYSTEMTAP + STAP_PROBE(ns-slapd, op_shared_search__backends); +#endif + /* if referrals were sent back by the mapping tree * add them to the list of referral in the pblock instead * of searching the backend @@ -1036,6 +1052,10 @@ free_and_return_nolock: slapi_ch_free_string(&proxydn); slapi_ch_free_string(&proxystr); + +#ifdef SYSTEMTAP + STAP_PROBE(ns-slapd, op_shared_search__return); +#endif } /* Returns 1 if this processing on this entry is finished diff --git a/ldap/servers/slapd/search.c b/ldap/servers/slapd/search.c index 6cdb27601..b4ddf8897 100644 --- a/ldap/servers/slapd/search.c +++ b/ldap/servers/slapd/search.c @@ -30,6 +30,10 @@ #include "pratom.h" #include "snmp_collator.h" +#ifdef SYSTEMTAP +#include <sys/sdt.h> +#endif + static void log_search_access(Slapi_PBlock *pb, const char *base, int scope, const char *filter, const char *msg); void @@ -57,6 +61,9 @@ do_search(Slapi_PBlock *pb) Connection *pb_conn = NULL; slapi_log_err(SLAPI_LOG_TRACE, "do_search", "=>\n"); +#ifdef SYSTEMTAP + STAP_PROBE(ns-slapd, do_search__entry); +#endif slapi_pblock_get(pb, SLAPI_OPERATION, &operation); ber = operation->o_ber; @@ -373,6 +380,11 @@ do_search(Slapi_PBlock *pb) slapi_pblock_set(pb, SLAPI_SEARCH_SIZELIMIT, &sizelimit); slapi_pblock_set(pb, SLAPI_SEARCH_TIMELIMIT, &timelimit); + + /* + * op_shared_search defines STAP_PROBE for __entry and __return, + * so these can be used to delineate the start and end here. + */ op_shared_search(pb, psearch ? 0 : 1 /* send result */); slapi_pblock_get(pb, SLAPI_PLUGIN_OPRETURN, &rc); @@ -402,6 +414,10 @@ free_and_return: } slapi_ch_free_string(&rawbase); } + +#ifdef SYSTEMTAP + STAP_PROBE(ns-slapd, do_search__return); +#endif } static void diff --git a/profiling/stap/probe_do_search_detail.stp b/profiling/stap/probe_do_search_detail.stp new file mode 100644 index 000000000..673dff1e0 --- /dev/null +++ b/profiling/stap/probe_do_search_detail.stp @@ -0,0 +1,64 @@ +#!/bin/env stap + +global do_search_full +global do_search_prepared +global do_search_complete +global do_search_finalise + +global entry_times% +global prepared_times% +global search_times% +global finalise_times% + +// do_search__entry +// do_search__prepared +// do_search__op_shared_search_complete +// do_search__return + +probe process(@1).mark("do_search__entry") { + entry_times[tid()] = gettimeofday_us() + prepared_times[tid()] = gettimeofday_us() +} + +probe process(@1).mark("op_shared_search__entry") { + do_search_prepared <<< gettimeofday_us() - prepared_times[tid()] + delete prepared_times[tid()] + + search_times[tid()] = gettimeofday_us() +} + +probe process(@1).mark("op_shared_search__return") { + do_search_complete <<< gettimeofday_us() - search_times[tid()] + delete search_times[tid()] + + finalise_times[tid()] = gettimeofday_us() +} + +probe process(@1).mark("do_search__return") { + do_search_finalise <<< gettimeofday_us() - finalise_times[tid()] + delete finalise_times[tid()] + + do_search_full <<< gettimeofday_us() - entry_times[tid()] + delete entry_times[tid()] +} + +function report() { + printf("Distribution of do_search_full latencies (in nanoseconds) for %d samples\n", @count(do_search_full)) + printf("max/avg/min: %d/%d/%d\n", @max(do_search_full), @avg(do_search_full), @min(do_search_full)) + print(@hist_log(do_search_full)) + + printf("Distribution of do_search_prepared latencies (in nanoseconds) for %d samples\n", @count(do_search_prepared)) + printf("max/avg/min: %d/%d/%d\n", @max(do_search_prepared), @avg(do_search_prepared), @min(do_search_prepared)) + print(@hist_log(do_search_prepared)) + + printf("Distribution of do_search_complete latencies (in nanoseconds) for %d samples\n", @count(do_search_complete)) + printf("max/avg/min: %d/%d/%d\n", @max(do_search_complete), @avg(do_search_complete), @min(do_search_complete)) + print(@hist_log(do_search_complete)) + + printf("Distribution of do_search_finalise latencies (in nanoseconds) for %d samples\n", @count(do_search_finalise)) + printf("max/avg/min: %d/%d/%d\n", @max(do_search_finalise), @avg(do_search_finalise), @min(do_search_finalise)) + print(@hist_log(do_search_finalise)) +} + +probe end { report() } + diff --git a/profiling/stap/probe_log_access_detail.stp b/profiling/stap/probe_log_access_detail.stp new file mode 100644 index 000000000..38d506e7c --- /dev/null +++ b/profiling/stap/probe_log_access_detail.stp @@ -0,0 +1,51 @@ +#!/bin/env stap + +global log_access_full +global log_access_prepared +global log_access_complete + +global entry_times% +global prepared_times% +global finalise_times% + +// vslapd_log_access__entry +// vslapd_log_access__prepared +// vslapd_log_access__buffer + + +probe process(@1).mark("vslapd_log_access__entry") { + entry_times[tid()] = gettimeofday_us() + prepared_times[tid()] = gettimeofday_us() +} + +probe process(@1).mark("vslapd_log_access__prepared") { + log_access_prepared <<< gettimeofday_us() - prepared_times[tid()] + delete prepared_times[tid()] + + finalise_times[tid()] = gettimeofday_us() +} + +probe process(@1).mark("vslapd_log_access__buffer") { + log_access_complete <<< gettimeofday_us() - finalise_times[tid()] + delete finalise_times[tid()] + + log_access_full <<< gettimeofday_us() - entry_times[tid()] + delete entry_times[tid()] +} + +function report() { + printf("Distribution of log_access_full latencies (in nanoseconds) for %d samples\n", @count(log_access_full)) + printf("max/avg/min: %d/%d/%d\n", @max(log_access_full), @avg(log_access_full), @min(log_access_full)) + print(@hist_log(log_access_full)) + + printf("Distribution of log_access_prepared latencies (in nanoseconds) for %d samples\n", @count(log_access_prepared)) + printf("max/avg/min: %d/%d/%d\n", @max(log_access_prepared), @avg(log_access_prepared), @min(log_access_prepared)) + print(@hist_log(log_access_prepared)) + + printf("Distribution of log_access_complete latencies (in nanoseconds) for %d samples\n", @count(log_access_complete)) + printf("max/avg/min: %d/%d/%d\n", @max(log_access_complete), @avg(log_access_complete), @min(log_access_complete)) + print(@hist_log(log_access_complete)) +} + +probe end { report() } + diff --git a/profiling/stap/probe_op_shared_search.stp b/profiling/stap/probe_op_shared_search.stp new file mode 100644 index 000000000..2fca6c31e --- /dev/null +++ b/profiling/stap/probe_op_shared_search.stp @@ -0,0 +1,64 @@ +#!/bin/env stap + +global op_shared_search_full +global op_shared_search_prepared +global op_shared_search_complete +global op_shared_search_finalise + +global entry_times% +global prepared_times% +global search_times% +global finalise_times% + +// op_shared_search__entry +// op_shared_search__prepared +// op_shared_search__backends +// op_shared_search__complete + +probe process(@1).mark("op_shared_search__entry") { + entry_times[tid()] = gettimeofday_us() + prepared_times[tid()] = gettimeofday_us() +} + +probe process(@1).mark("op_shared_search__prepared") { + op_shared_search_prepared <<< gettimeofday_us() - prepared_times[tid()] + delete prepared_times[tid()] + + search_times[tid()] = gettimeofday_us() +} + +probe process(@1).mark("op_shared_search__backends") { + op_shared_search_complete <<< gettimeofday_us() - search_times[tid()] + delete search_times[tid()] + + finalise_times[tid()] = gettimeofday_us() +} + +probe process(@1).mark("op_shared_search__return") { + op_shared_search_finalise <<< gettimeofday_us() - finalise_times[tid()] + delete finalise_times[tid()] + + op_shared_search_full <<< gettimeofday_us() - entry_times[tid()] + delete entry_times[tid()] +} + +function report() { + printf("Distribution of op_shared_search_full latencies (in nanoseconds) for %d samples\n", @count(op_shared_search_full)) + printf("max/avg/min: %d/%d/%d\n", @max(op_shared_search_full), @avg(op_shared_search_full), @min(op_shared_search_full)) + print(@hist_log(op_shared_search_full)) + + printf("Distribution of op_shared_search_prepared latencies (in nanoseconds) for %d samples\n", @count(op_shared_search_prepared)) + printf("max/avg/min: %d/%d/%d\n", @max(op_shared_search_prepared), @avg(op_shared_search_prepared), @min(op_shared_search_prepared)) + print(@hist_log(op_shared_search_prepared)) + + printf("Distribution of op_shared_search_complete latencies (in nanoseconds) for %d samples\n", @count(op_shared_search_complete)) + printf("max/avg/min: %d/%d/%d\n", @max(op_shared_search_complete), @avg(op_shared_search_complete), @min(op_shared_search_complete)) + print(@hist_log(op_shared_search_complete)) + + printf("Distribution of op_shared_search_finalise latencies (in nanoseconds) for %d samples\n", @count(op_shared_search_finalise)) + printf("max/avg/min: %d/%d/%d\n", @max(op_shared_search_finalise), @avg(op_shared_search_finalise), @min(op_shared_search_finalise)) + print(@hist_log(op_shared_search_finalise)) +} + +probe end { report() } + diff --git a/src/lib389/lib389/__init__.py b/src/lib389/lib389/__init__.py index 4f4705e91..7ab8674c9 100644 --- a/src/lib389/lib389/__init__.py +++ b/src/lib389/lib389/__init__.py @@ -1021,7 +1021,7 @@ class DirSrv(SimpleLDAPObject, object): self.log.debug("Using external ca certificate %s", certdir) self.set_option(ldap.OPT_X_TLS_CACERTDIR, ensure_str(certdir)) - if certdir or starttls: + if certdir or starttls or uri.startswith('ldaps://'): try: # Note this sets LDAP.OPT not SELF. Because once self has opened # it can NOT change opts on reused (ie restart) @@ -1119,6 +1119,10 @@ class DirSrv(SimpleLDAPObject, object): @raise ValueError ''' + if not self.isLocal: + self.log.error("This is a remote instance!") + input('Press Enter when the instance has started ...') + return if self.status() is True: return @@ -1186,6 +1190,11 @@ class DirSrv(SimpleLDAPObject, object): @raise ValueError ''' + if not self.isLocal: + self.log.error("This is a remote instance!") + input('Press Enter when the instance has stopped ...') + return + if self.status() is False: return diff --git a/src/lib389/lib389/ldclt.py b/src/lib389/lib389/ldclt.py index 0410650f0..9b104146b 100644 --- a/src/lib389/lib389/ldclt.py +++ b/src/lib389/lib389/ldclt.py @@ -7,7 +7,7 @@ # --- END COPYRIGHT BLOCK --- import subprocess -from lib389.utils import format_cmd_list +from lib389.utils import format_cmd_list, ensure_str """ This class will allow general usage of ldclt. @@ -99,16 +99,28 @@ loginShell: /bin/false self.log.debug("ldclt loadtest ...") self.log.debug(format_cmd_list(cmd)) try: - result = subprocess.check_output(cmd) + result = ensure_str(subprocess.check_output(cmd)) # If verbose, capture / log the output. except subprocess.CalledProcessError as e: print(format_cmd_list(cmd)) print(result) raise(e) self.log.debug(result) - return result + # The output looks like: + # ldclt[44308]: Average rate: 4017.60/thr (4017.60/sec), total: 40176 + # ldclt[44308]: Number of samples achieved. Bye-bye... + # ldclt[44308]: All threads are dead - exit. + # ldclt[44308]: Global average rate: 40604.00/thr (4060.40/sec), total: 406040 + # ldclt[44308]: Global number times "no activity" reports: never + # ldclt[44308]: Global no error occurs during this session. + # So we want the "global avg rate" per second. + section = None + for line in result.splitlines(): + if 'Global average rate' in line: + section = line.split('(')[1].split(')')[0].split('/')[0] + return section - def bind_loadtest(self, subtree, min=1000, max=9999, rounds=3): + def bind_loadtest(self, subtree, min=1000, max=9999, rounds=10): # The bind users will be uid=userXXXX digits = len('%s' % max) cmd = [ @@ -128,9 +140,9 @@ loginShell: /bin/false '-e', 'bindonly', ] - self._run_ldclt(cmd) + return self._run_ldclt(cmd) - def search_loadtest(self, subtree, fpattern, min=1000, max=9999, rounds=3): + def search_loadtest(self, subtree, fpattern, min=1000, max=9999, rounds=10): digits = len('%s' % max) cmd = [ '%s/ldclt' % self.ds.get_bin_dir(), @@ -151,4 +163,4 @@ loginShell: /bin/false '-e', 'randomattrlist=cn:uid:ou', ] - self._run_ldclt(cmd) + return self._run_ldclt(cmd) diff --git a/src/lib389/lib389/topologies.py b/src/lib389/lib389/topologies.py index e5506431f..2e409eac2 100644 --- a/src/lib389/lib389/topologies.py +++ b/src/lib389/lib389/topologies.py @@ -320,7 +320,6 @@ def topology_st_gssapi(request): return topology - @pytest.fixture(scope="module") def topology_i2(request): """Create two instance DS deployment"""
0
6098e5d18fae7beaef024c8314928e55de47bb4e
389ds/389-ds-base
185811 - Don't check localuser config on Windows
commit 6098e5d18fae7beaef024c8314928e55de47bb4e Author: Nathan Kinder <[email protected]> Date: Sun Mar 19 18:36:40 2006 +0000 185811 - Don't check localuser config on Windows diff --git a/ldap/servers/slapd/log.c b/ldap/servers/slapd/log.c index 072fd5f73..022a126ce 100644 --- a/ldap/servers/slapd/log.c +++ b/ldap/servers/slapd/log.c @@ -3473,6 +3473,7 @@ log__open_errorlogfile(int logfile_state, int locked) slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); +#ifndef _WIN32 if ( slapdFrontendConfig->localuser != NULL ) { if ( (pw = getpwnam( slapdFrontendConfig->localuser )) == NULL ) return LOG_UNABLE_TO_OPENFILE; @@ -3480,6 +3481,7 @@ log__open_errorlogfile(int logfile_state, int locked) else { return LOG_UNABLE_TO_OPENFILE; } +#endif if (!locked) LOG_ERROR_LOCK_WRITE( );
0
0ae55bcd61497c2fa7aaade40fe4ff6c91216325
389ds/389-ds-base
Ticket #47875 - dirsrv not running with old openldap Description: AC_SUBST(ldap_lib_ldif) was missing from configure.ac for @ldap_lib_dir@. https://fedorahosted.org/389/ticket/47875 Reviewed by [email protected] (Thank you, Rich!!)
commit 0ae55bcd61497c2fa7aaade40fe4ff6c91216325 Author: Noriko Hosoi <[email protected]> Date: Fri Aug 15 16:13:46 2014 -0700 Ticket #47875 - dirsrv not running with old openldap Description: AC_SUBST(ldap_lib_ldif) was missing from configure.ac for @ldap_lib_dir@. https://fedorahosted.org/389/ticket/47875 Reviewed by [email protected] (Thank you, Rich!!) diff --git a/configure.ac b/configure.ac index 16ab6a93c..0f1ddf698 100644 --- a/configure.ac +++ b/configure.ac @@ -709,6 +709,7 @@ AC_SUBST(openldap_inc) AC_SUBST(openldap_lib) AC_SUBST(openldap_libdir) AC_SUBST(openldap_bindir) +AC_SUBST(ldap_lib_ldif) AC_SUBST(ldaptool_bindir) AC_SUBST(ldaptool_opts) AC_SUBST(plainldif_opts)
0
324cf9f38f57108eda0965dfe9415f39ecf63a26
389ds/389-ds-base
Ticket 48280 - enable logging of internal ops in the audit log Bug Description: many plugins add and modify entries, but these changes are not visible in the audit log. It can be very useful to have these changes available. Fix Description: This feature actually already existed: nsslapd-logAccess nsslapd-logAudit On a plugin, will turn on the respective logs. This change brings the audit log in line with access to respect the value of nsslapd-plugin-logging Such that when plugin-logging is enabled globally in cn=config, plugins will log based on the values in: nsslapd-accesslog-logging-enabled nsslapd-auditlog-logging-enabled nsslapd-auditfaillog-logging-enabled https://fedorahosted.org/389/ticket/48280 Author: wibrown Review by: ???
commit 324cf9f38f57108eda0965dfe9415f39ecf63a26 Author: William Brown <[email protected]> Date: Fri Dec 4 14:47:46 2015 +1000 Ticket 48280 - enable logging of internal ops in the audit log Bug Description: many plugins add and modify entries, but these changes are not visible in the audit log. It can be very useful to have these changes available. Fix Description: This feature actually already existed: nsslapd-logAccess nsslapd-logAudit On a plugin, will turn on the respective logs. This change brings the audit log in line with access to respect the value of nsslapd-plugin-logging Such that when plugin-logging is enabled globally in cn=config, plugins will log based on the values in: nsslapd-accesslog-logging-enabled nsslapd-auditlog-logging-enabled nsslapd-auditfaillog-logging-enabled https://fedorahosted.org/389/ticket/48280 Author: wibrown Review by: ??? diff --git a/ldap/servers/slapd/plugin.c b/ldap/servers/slapd/plugin.c index 0e99095c6..4ab644bc4 100644 --- a/ldap/servers/slapd/plugin.c +++ b/ldap/servers/slapd/plugin.c @@ -3808,7 +3808,7 @@ int plugin_build_operation_action_bitmap (int input_actions, const struct slapdp if (plugin->plg_conf.plgc_log_access || config_get_plugin_logging()) result_actions |= OP_FLAG_ACTION_LOG_ACCESS; - if (plugin->plg_conf.plgc_log_audit) + if (plugin->plg_conf.plgc_log_audit || config_get_plugin_logging()) result_actions |= OP_FLAG_ACTION_LOG_AUDIT; /*
0
0f38410ae6fcd873b4ef9be0d98c2bd26e15c5b9
389ds/389-ds-base
Issue 4498 - BUG - entryuuid replication may not work (#4503) Bug Description: EntryUUID can be duplicated in replication, due to a missing check in assign_uuid Fix Description: Add a test case to determine how this occurs, and add the correct check for existing entryUUID. fixes: https://github.com/389ds/389-ds-base/issues/4498 Author: William Brown <[email protected]> Review by: @mreynolds389
commit 0f38410ae6fcd873b4ef9be0d98c2bd26e15c5b9 Author: Firstyear <[email protected]> Date: Thu Dec 17 08:22:23 2020 +1000 Issue 4498 - BUG - entryuuid replication may not work (#4503) Bug Description: EntryUUID can be duplicated in replication, due to a missing check in assign_uuid Fix Description: Add a test case to determine how this occurs, and add the correct check for existing entryUUID. fixes: https://github.com/389ds/389-ds-base/issues/4498 Author: William Brown <[email protected]> Review by: @mreynolds389 diff --git a/dirsrvtests/tests/suites/entryuuid/replicated_test.py b/dirsrvtests/tests/suites/entryuuid/replicated_test.py new file mode 100644 index 000000000..770690c5a --- /dev/null +++ b/dirsrvtests/tests/suites/entryuuid/replicated_test.py @@ -0,0 +1,77 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2020 William Brown <[email protected]> +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- + +import ldap +import pytest +import logging +from lib389.topologies import topology_m2 as topo_m2 +from lib389.idm.user import nsUserAccounts +from lib389.paths import Paths +from lib389.utils import ds_is_older +from lib389._constants import * +from lib389.replica import ReplicationManager + +default_paths = Paths() + +pytestmark = pytest.mark.tier1 + [email protected](not default_paths.rust_enabled or ds_is_older('1.4.2.0'), reason="Entryuuid is not available in older versions") + +def test_entryuuid_with_replication(topo_m2): + """ Check that entryuuid works with replication + + :id: a5f15bf9-7f63-473a-840c-b9037b787024 + + :setup: two node mmr + + :steps: + 1. Create an entry on one server + 2. Wait for replication + 3. Assert it is on the second + + :expectedresults: + 1. Success + 1. Success + 1. Success + """ + + server_a = topo_m2.ms["master1"] + server_b = topo_m2.ms["master2"] + server_a.config.loglevel(vals=(ErrorLog.DEFAULT,ErrorLog.TRACE)) + server_b.config.loglevel(vals=(ErrorLog.DEFAULT,ErrorLog.TRACE)) + + repl = ReplicationManager(DEFAULT_SUFFIX) + + account_a = nsUserAccounts(server_a, DEFAULT_SUFFIX).create_test_user(uid=2000) + euuid_a = account_a.get_attr_vals_utf8('entryUUID') + print("🧩 %s" % euuid_a) + assert(euuid_a is not None) + assert(len(euuid_a) == 1) + + repl.wait_for_replication(server_a, server_b) + + account_b = nsUserAccounts(server_b, DEFAULT_SUFFIX).get("test_user_2000") + euuid_b = account_b.get_attr_vals_utf8('entryUUID') + print("🧩 %s" % euuid_b) + + server_a.config.loglevel(vals=(ErrorLog.DEFAULT,)) + server_b.config.loglevel(vals=(ErrorLog.DEFAULT,)) + + assert(euuid_b is not None) + assert(len(euuid_b) == 1) + assert(euuid_b == euuid_a) + + account_b.set("description", "update") + repl.wait_for_replication(server_b, server_a) + + euuid_c = account_a.get_attr_vals_utf8('entryUUID') + print("🧩 %s" % euuid_c) + assert(euuid_c is not None) + assert(len(euuid_c) == 1) + assert(euuid_c == euuid_a) + diff --git a/src/plugins/entryuuid/src/lib.rs b/src/plugins/entryuuid/src/lib.rs index 92977db05..0197c5e83 100644 --- a/src/plugins/entryuuid/src/lib.rs +++ b/src/plugins/entryuuid/src/lib.rs @@ -30,6 +30,16 @@ slapi_r_search_callback_mapfn!(entryuuid, entryuuid_fixup_cb, entryuuid_fixup_ma fn assign_uuid(e: &mut EntryRef) { let sdn = e.get_sdnref(); + // 🚧 safety barrier 🚧 + if e.contains_attr("entryUUID") { + log_error!( + ErrorLevel::Trace, + "assign_uuid -> entryUUID exists, skipping dn {}", + sdn.to_dn_string() + ); + return; + } + // We could consider making these lazy static. let config_sdn = Sdn::try_from("cn=config").expect("Invalid static dn"); let schema_sdn = Sdn::try_from("cn=schema").expect("Invalid static dn"); @@ -66,7 +76,15 @@ impl SlapiPlugin3 for EntryUuid { } fn betxn_pre_add(pb: &mut PblockRef) -> Result<(), PluginError> { - log_error!(ErrorLevel::Trace, "betxn_pre_add"); + if pb.get_is_replicated_operation() { + log_error!( + ErrorLevel::Trace, + "betxn_pre_add -> replicated operation, will not change" + ); + return Ok(()); + } + + log_error!(ErrorLevel::Trace, "betxn_pre_add -> start"); let mut e = pb.get_op_add_entryref().map_err(|_| PluginError::Pblock)?; assign_uuid(&mut e); diff --git a/src/slapi_r_plugin/src/constants.rs b/src/slapi_r_plugin/src/constants.rs index cc7d6d446..17282d719 100644 --- a/src/slapi_r_plugin/src/constants.rs +++ b/src/slapi_r_plugin/src/constants.rs @@ -169,6 +169,8 @@ pub(crate) enum PblockType { AddEntry = 60, /// SLAPI_BACKEND Backend = 130, + /// SLAPI_IS_REPLICATED_OPERATION + IsReplicationOperation = 142, /// SLAPI_PLUGIN_MR_NAMES MRNames = 624, /// SLAPI_PLUGIN_SYNTAX_NAMES diff --git a/src/slapi_r_plugin/src/pblock.rs b/src/slapi_r_plugin/src/pblock.rs index b34fa573c..6b686aa53 100644 --- a/src/slapi_r_plugin/src/pblock.rs +++ b/src/slapi_r_plugin/src/pblock.rs @@ -311,4 +311,11 @@ impl PblockRef { pub fn get_op_result(&mut self) -> i32 { self.get_value_i32(PblockType::OpResult).unwrap_or(-1) } + + pub fn get_is_replicated_operation(&mut self) -> bool { + let i = self.get_value_i32(PblockType::IsReplicationOperation).unwrap_or(0); + // Because rust returns the result of the last evaluation, we can + // just return if not equal 0. + i != 0 + } }
0
e9917234c404bec862bd9df32682ab178641928e
389ds/389-ds-base
Ticket 48820 - Move Encryption and RSA to the new object types Fix Description: Make an Encyrption and RSA types on the new objects. To make this work, rejig some of the create code to move creation to the single item. https://fedorahosted.org/389/ticket/48820 Author: wibrown Review by: spichugi (Thanks!)
commit e9917234c404bec862bd9df32682ab178641928e Author: William Brown <[email protected]> Date: Mon May 30 13:22:39 2016 +1000 Ticket 48820 - Move Encryption and RSA to the new object types Fix Description: Make an Encyrption and RSA types on the new objects. To make this work, rejig some of the create code to move creation to the single item. https://fedorahosted.org/389/ticket/48820 Author: wibrown Review by: spichugi (Thanks!) diff --git a/src/lib389/lib389/__init__.py b/src/lib389/lib389/__init__.py index 0468d59c2..c897cee2f 100644 --- a/src/lib389/lib389/__init__.py +++ b/src/lib389/lib389/__init__.py @@ -339,8 +339,8 @@ class DirSrv(SimpleLDAPObject): from lib389.index import Index from lib389.aci import Aci from lib389.monitor import Monitor - if MAJOR < 3: - from lib389.nss_ssl import NssSsl + from lib389.nss_ssl import NssSsl + from lib389.config import RSA from lib389.dirsrv_log import DirsrvAccessLog, DirsrvErrorLog from lib389.ldclt import Ldclt @@ -358,8 +358,9 @@ class DirSrv(SimpleLDAPObject): self.aci = Aci(self) self.monitor = Monitor(self) # Do we have a certdb path? - if MAJOR < 3: - self.nss_ssl = NssSsl(self) + #if MAJOR < 3: + self.nss_ssl = NssSsl(self) + self.rsa = RSA(self) self.ds_access_log = DirsrvAccessLog(self) self.ds_error_log = DirsrvErrorLog(self) self.ldclt = Ldclt(self) diff --git a/src/lib389/lib389/_mapped_object.py b/src/lib389/lib389/_mapped_object.py index 2845b8422..011baa1d1 100644 --- a/src/lib389/lib389/_mapped_object.py +++ b/src/lib389/lib389/_mapped_object.py @@ -63,24 +63,29 @@ class DSLogging(object): class DSLdapObject(DSLogging): + # TODO: Automatically create objects when they are requested to have properties added def __init__(self, instance, dn=None, batch=False): """ """ self._instance = instance super(DSLdapObject, self).__init__(self._instance.verbose) # This allows some factor objects to be overriden - self._dn = '' + self._dn = None if dn is not None: self._dn = dn self._batch = batch - self._naming_attr = None self._protected = True + # Used in creation + self._create_objectclasses = [] + self._rdn_attribute = None + self._must_attributes = None + self._basedn = "" def __unicode__(self): val = self._dn - if self._naming_attr: - val = self.get(self._naming_attr) + if self._rdn_attribute: + val = self.get(self._rdn_attribute) return ensure_str(val) def __str__(self): @@ -98,6 +103,7 @@ class DSLdapObject(DSLogging): def get(self, key): """Get an attribute under dn""" self._log.debug("%s get(%r)" % (self._dn, key)) + # We might need to add a state check for NONE dn. if self._instance.state != DIRSRV_STATE_ONLINE: ValueError("Invalid state. Cannot get properties on instance that is not ONLINE") # In the future, I plan to add a mode where if local == true, we can use @@ -123,6 +129,51 @@ class DSLdapObject(DSLogging): if not self._protected: pass + def _validate(self, tdn, properties): + """ + Used to validate a create request. + This way, it can be over-ridden without affecting + the create types + + It also checks that all the values in _must_attribute exist + in some form in the dictionary + + It has the useful trick of returning the dn, so subtypes + can use extra properties to create the dn's here for this. + """ + if properties is None: + raise ldap.UNWILLING_TO_PERFORM('Invalid request to create. Properties cannot be None') + if type(properties) != dict: + raise ldap.UNWILLING_TO_PERFORM("properties must be a dictionary") + + # I think this needs to be made case insensitive + # How will this work with the dictionary? + for attr in self._must_attributes: + if properties.get(attr, None) is None: + raise ldap.UNWILLING_TO_PERFORM('Attribute %s must not be None' % attr) + + # We may need to map over the data in the properties dict to satisfy python-ldap + # + # Do we need to do extra dn validation here? + return (tdn, properties) + + def create(self, dn, properties=None): + assert(len(self._create_objectclasses) > 0) + self._log.debug('Creating %s : %s' % (dn, properties)) + # Make sure these aren't none. + # Create the dn based on the various properties. + (dn, valid_props) = self._validate(dn, properties) + # Check if the entry exists or not? .add_s is going to error anyway ... + self._log.debug('Validated %s : %s' % (dn, properties)) + + e = Entry(dn) + e.update({'objectclass' : self._create_objectclasses}) + e.update(valid_props) + # We rely on exceptions here to indicate failure to the parent. + self._instance.add_s(e) + # If it worked, we need to fix our instance dn + self._dn = dn + # A challenge of this, is how do we manage indexes? They have two naming attribunes.... @@ -132,14 +183,11 @@ class DSLdapObjects(DSLogging): self._instance = instance super(DSLdapObjects, self).__init__(self._instance.verbose) self._objectclasses = [] - self._create_objectclasses = [] self._filterattrs = [] self._list_attrlist = ['dn'] self._basedn = "" self._batch = batch self._scope = ldap.SCOPE_SUBTREE - self._rdn_attribute = None - self._must_attributes = None def list(self): # Filter based on the objectclasses and the basedn @@ -181,17 +229,10 @@ class DSLdapObjects(DSLogging): raise ldap.UNWILLING_TO_PERFORM("Too many objects matched selection criteria %s" % selector) return self._childobject(instance=self._instance, dn=results[0].dn, batch=self._batch) + def _validate(self, rdn, properties): """ - Used to validate a create request. - This way, it can be over-ridden without affecting - the create types - - It also checks that all the values in _must_attribute exist - in some form in the dictionary - - It has the useful trick of returning the dn, so subtypes - can use extra properties to create the dn's here for this. + Validate the factor part of the creation """ if properties is None: raise ldap.UNWILLING_TO_PERFORM('Invalid request to create. Properties cannot be None') @@ -211,32 +252,15 @@ class DSLdapObjects(DSLogging): if type(rdn) != str: raise ldap.UNWILLING_TO_PERFORM("rdn %s must be a utf8 string (str)", rdn) - for attr in self._must_attributes: - if properties.get(attr, None) is None: - raise ldap.UNWILLING_TO_PERFORM('Attribute %s must not be None' % attr) - - # We may need to map over the data in the properties dict to satisfy python-ldap - # to do str -> bytes - # - # Do we need to fix anything here in the rdn_attribute? - dn = '%s=%s,%s' % (self._rdn_attribute, rdn, self._basedn) - # Do we need to do extra dn validation here? - return (dn, rdn, properties) - def create(self, rdn=None, properties=None): - assert(len(self._create_objectclasses) > 0) - self._log.debug('Creating %s : %s' % (rdn, properties)) - # Make sure these aren't none. - # Create the dn based on the various properties. - (dn, rdn, valid_props) = self._validate(rdn, properties) - # Check if the entry exists or not? .add_s is going to error anyway ... - self._log.debug('Validated %s : %s' % (dn, properties)) - - e = Entry(dn) - e.update({'objectclass' : self._create_objectclasses}) - e.update(valid_props) - self._instance.add_s(e) - - # Now return the created instance. - return self._childobject(instance=self._instance, dn=dn, batch=self._batch) + # Create the object + # Should we inject the rdn to properties? + co = self._childobject(instance=self._instance, batch=self._batch) + # Make the rdn naming attr avaliable + self._rdn_attribute = co._rdn_attribute + (rdn, properties) = self._validate(rdn, properties) + # Do we need to fix anything here in the rdn_attribute? + dn = '%s=%s,%s' % (co._rdn_attribute, rdn, self._basedn) + # Now actually commit the creation req + return co.create(dn, properties) diff --git a/src/lib389/lib389/backend.py b/src/lib389/lib389/backend.py index aba4d40d5..9ed2a0422 100644 --- a/src/lib389/lib389/backend.py +++ b/src/lib389/lib389/backend.py @@ -380,24 +380,12 @@ class BackendLegacy(object): class Backend(DSLdapObject): def __init__(self, instance, dn=None, batch=False): super(Backend, self).__init__(instance, dn, batch) - self._naming_attr = 'cn' + self._rdn_attribute = 'cn' + self._must_attributes = ['nsslapd-suffix', 'cn'] def create_sample_entries(self): self._log.debug('Creating sample entries ....') -# This only does ldbm backends. Chaining backends are a special case -# of this, so they can be subclassed off. -class Backends(DSLdapObjects): - def __init__(self, instance, batch=False): - super(Backends, self).__init__(instance=instance, batch=False) - self._objectclasses = [BACKEND_OBJECTCLASS_VALUE] - self._create_objectclasses = self._objectclasses + ['top', 'extensibleObject' ] - self._filterattrs = ['cn', 'nsslapd-suffix', 'nsslapd-directory'] - self._basedn = DN_LDBM - self._childobject = Backend - self._rdn_attribute = 'cn' - self._must_attributes = ['nsslapd-suffix', 'cn'] - def _validate(self, rdn, properties): # We always need to call the super validate first. This way we can # guarantee that properties is a dictionary. @@ -420,12 +408,21 @@ class Backends(DSLdapObjects): return (dn, rdn, nprops) def create(self, rdn=None, properties=None): - # properties for a backend might contain a key called BACKEND_SAMPLE_ENTRIES - # We need to pop this value out, and pass it to our new instance. sample_entries = properties.pop(BACKEND_SAMPLE_ENTRIES, False) - be_inst = super(Backends, self).create(rdn, properties) + super(Backend, self).create(rdn, properties) if sample_entries is True: be_inst.create_sample_entries() - return be_inst + +# This only does ldbm backends. Chaining backends are a special case +# of this, so they can be subclassed off. +class Backends(DSLdapObjects): + def __init__(self, instance, batch=False): + super(Backends, self).__init__(instance=instance, batch=False) + self._objectclasses = [BACKEND_OBJECTCLASS_VALUE] + self._create_objectclasses = self._objectclasses + ['top', 'extensibleObject' ] + self._filterattrs = ['cn', 'nsslapd-suffix', 'nsslapd-directory'] + self._basedn = DN_LDBM + self._childobject = Backend + diff --git a/src/lib389/lib389/config.py b/src/lib389/lib389/config.py index d4c00a7d6..d35efc97e 100644 --- a/src/lib389/lib389/config.py +++ b/src/lib389/lib389/config.py @@ -106,6 +106,7 @@ class Config(DSLdapObject): 'nsSSLPersonalitySSL': 'Server-Cert' } """ + self._log.debug("config.enable_ssl is deprecated! Use RSA, Encryption instead!") self._log.debug("configuring SSL with secargs:%r" % secargs) secargs = secargs or {} @@ -151,3 +152,49 @@ class Config(DSLdapObject): fields = 'nsslapd-security nsslapd-ssl-check-hostname'.split() return self.conn.getEntry(DN_CONFIG, attrlist=fields) + + +class Encryption(DSLdapObject): + """ + Manage "cn=encryption,cn=config" tree, including: + - ssl ciphers + - ssl / tls levels + """ + def __init__(self, conn, batch=False): + """@param conn - a DirSrv instance """ + super(Encryption, self).__init__(instance=conn, batch=batch) + self._dn = 'cn=encryption,%s' % DN_CONFIG + # Once created, don't allow it's removal + self._protected = True + + +class RSA(DSLdapObject): + """ + Manage the "cn=RSA,cn=encryption,cn=config" object + - Set the certificate name + - Database path + - ssl token name + """ + def __init__(self, conn, batch=False): + """@param conn - a DirSrv instance """ + super(RSA, self).__init__(instance=conn, batch=batch) + self._dn = 'cn=RSA,cn=encryption,%s' % DN_CONFIG + self._create_objectclasses = ['top', 'nsEncryptionModule'] + self._rdn_attribute = 'cn' + self._must_attributes = ['cn'] + # Once we create it, don't remove it + self._protected = True + + def _validate(self, tdn, properties): + (dn, valid_props) = super(RSA, self)._validate(tdn, properties) + # Ensure that dn matches self._dn + assert(self._dn == dn) + return (dn, valid_props) + + def create(self, dn=None, properties={'cn': 'RSA'}): + # Is this the best way to for the dn? + if dn is not None: + self._log.debug("dn on cn=Rsa create request is not None. This is a mistake.") + super(RSA, self).create(dn=self._dn, properties=properties) + +
0
d1ca0fece890c93f01592a279a26a26b1d5e1877
389ds/389-ds-base
Ticket 49554 - Update Makefile for README.md Description: The Makefile needed to be updated for README.md https://pagure.io/389-ds-base/issue/49554 Reviewed by: mreynolds(one line commit rule)
commit d1ca0fece890c93f01592a279a26a26b1d5e1877 Author: Mark Reynolds <[email protected]> Date: Wed Jan 31 14:27:51 2018 -0500 Ticket 49554 - Update Makefile for README.md Description: The Makefile needed to be updated for README.md https://pagure.io/389-ds-base/issue/49554 Reviewed by: mreynolds(one line commit rule) diff --git a/Makefile.am b/Makefile.am index 9fae072a3..a47201a54 100644 --- a/Makefile.am +++ b/Makefile.am @@ -617,7 +617,7 @@ dist_noinst_DATA = \ $(srcdir)/rpm/389-ds-base.spec.in \ $(srcdir)/rpm/389-ds-base-devel.README \ $(srcdir)/rpm/389-ds-base-git.sh \ - $(srcdir)/README \ + $(srcdir)/README.md \ $(srcdir)/LICENSE \ $(srcdir)/LICENSE.* \ $(srcdir)/VERSION.sh \
0
39ba12b4170d3d7ff65735dbac544c034eabe690
389ds/389-ds-base
Ticket #47422 - With 1.3.04 and subtree-renaming OFF, when a user is deleted after restarting the server, the same entry can't be added Bug description: 1) As reported by baburaje12, regardless of the nsslapd-subtree- rename-switch, "entrydn" was not stored in the id2entry db. The attribute value had to be stored in the db file if the switch was off. Attribute values to avoid storing in the db file are maintained in an array protected_attrs_all statically. "Entrydn" should be dynamic depending on the switch. 2) When the switch is off, import was skipping to generate the parentid index, which leads to skipping to create the entrydn, as well. Fix description: 1) Instead of keeping "entrydn" in the protected_attrs_all statically, this patch introduces an api set_attr_to_protected_list to add or remove "entrydn" based upon the value of nsslapd-subtree-rename- switch. 2) The condition to create a parentid index is fixed to always create it if the nsslapd-subtree-rename-switch is off. https://fedorahosted.org/389/ticket/47422 Reviewed by rmeggins (Thank you, Rich!)
commit 39ba12b4170d3d7ff65735dbac544c034eabe690 Author: Noriko Hosoi <[email protected]> Date: Thu Oct 10 10:38:34 2013 -0700 Ticket #47422 - With 1.3.04 and subtree-renaming OFF, when a user is deleted after restarting the server, the same entry can't be added Bug description: 1) As reported by baburaje12, regardless of the nsslapd-subtree- rename-switch, "entrydn" was not stored in the id2entry db. The attribute value had to be stored in the db file if the switch was off. Attribute values to avoid storing in the db file are maintained in an array protected_attrs_all statically. "Entrydn" should be dynamic depending on the switch. 2) When the switch is off, import was skipping to generate the parentid index, which leads to skipping to create the entrydn, as well. Fix description: 1) Instead of keeping "entrydn" in the protected_attrs_all statically, this patch introduces an api set_attr_to_protected_list to add or remove "entrydn" based upon the value of nsslapd-subtree-rename- switch. 2) The condition to create a parentid index is fixed to always create it if the nsslapd-subtree-rename-switch is off. https://fedorahosted.org/389/ticket/47422 Reviewed by rmeggins (Thank you, Rich!) diff --git a/ldap/servers/slapd/back-ldbm/import-threads.c b/ldap/servers/slapd/back-ldbm/import-threads.c index 3a3aab8d1..9191ee3f9 100644 --- a/ldap/servers/slapd/back-ldbm/import-threads.c +++ b/ldap/servers/slapd/back-ldbm/import-threads.c @@ -2713,9 +2713,10 @@ next: goto error; } - if (entryrdn_get_switch() /* subtree-rename: on */ && + if ((entryrdn_get_switch() /* subtree-rename: on */ && !slapi_entry_flag_is_set(fi->entry->ep_entry, - SLAPI_ENTRY_FLAG_TOMBSTONE)) { + SLAPI_ENTRY_FLAG_TOMBSTONE)) || + !entryrdn_get_switch()) { /* parentid index * (we have to do this here, because the parentID is dependent on * looking up by entrydn/entryrdn.) diff --git a/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c b/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c index d381166ed..2550bc921 100644 --- a/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c +++ b/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c @@ -136,6 +136,15 @@ void entryrdn_set_switch(int val) { entryrdn_switch = val; + + if (entryrdn_switch) { /* entryrdn on */ + /* Don't store entrydn in the db */ + set_attr_to_protected_list(SLAPI_ATTR_ENTRYDN, 0); + } else { /* entryrdn off */ + /* Store entrydn in the db */ + set_attr_to_protected_list(SLAPI_ATTR_ENTRYDN, 1); + } + return; } diff --git a/ldap/servers/slapd/entry.c b/ldap/servers/slapd/entry.c index 549f29467..59f2340e4 100644 --- a/ldap/servers/slapd/entry.c +++ b/ldap/servers/slapd/entry.c @@ -76,9 +76,27 @@ static void entry_vattr_free_nolock(Slapi_Entry *e); /* protected attributes which are not included in the flattened entry, * which will be stored in the db. */ -static char *protected_attrs_all [] = {PSEUDO_ATTR_UNHASHEDUSERPASSWORD, - SLAPI_ATTR_ENTRYDN, - NULL}; +static char **protected_attrs_all = NULL; + +/* + * add or delete attr to or from protected_attr_all list depending on the flag. + * flag: 0 -- add + * 1 -- delete + */ +void +set_attr_to_protected_list(char *attr, int flag) +{ + if (charray_inlist(protected_attrs_all, attr)) { /* attr is in the list */ + if (flag) { /* delete */ + charray_remove(protected_attrs_all, attr, 1); + } + } else { /* attr is not in the list */ + if (!flag) { /* add */ + charray_add(&protected_attrs_all, slapi_ch_strdup(attr)); + } + } +} + #if defined(USE_OLD_UNHASHED) static char *forbidden_attrs [] = {PSEUDO_ATTR_UNHASHEDUSERPASSWORD, NULL}; diff --git a/ldap/servers/slapd/init.c b/ldap/servers/slapd/init.c index 3bf0ea24e..e7c8ab8e5 100644 --- a/ldap/servers/slapd/init.c +++ b/ldap/servers/slapd/init.c @@ -83,6 +83,8 @@ slapd_init() exit( -1 ); } + /* Add PSEUDO_ATTR_UNHASHEDUSERPASSWORD to the protected attribute list */ + set_attr_to_protected_list(PSEUDO_ATTR_UNHASHEDUSERPASSWORD, 0); #ifndef HAVE_TIME_R if ((time_func_mutex = PR_NewLock()) == NULL ) { LDAPDebug( LDAP_DEBUG_ANY, diff --git a/ldap/servers/slapd/slapi-private.h b/ldap/servers/slapd/slapi-private.h index f459e9d43..2c60cc433 100644 --- a/ldap/servers/slapd/slapi-private.h +++ b/ldap/servers/slapd/slapi-private.h @@ -337,6 +337,7 @@ int entry_apply_mods( Slapi_Entry *e, LDAPMod **mods ); int is_type_protected(const char *type); int entry_apply_mods_ignore_error( Slapi_Entry *e, LDAPMod **mods, int ignore_error ); int slapi_entries_diff(Slapi_Entry **old_entries, Slapi_Entry **new_entries, int testall, const char *logging_prestr, const int force_update, void *plg_id); +void set_attr_to_protected_list(char *attr, int flag); /* entrywsi.c */ CSN* entry_assign_operation_csn ( Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *parententry );
0
92a3f9920211ec66bbe531cf3ba1d3e77abca34b
389ds/389-ds-base
Bug 750625 - Fix Coverity (11062) Resource leak https://bugzilla.redhat.com/show_bug.cgi?id=750625 plugins/chainingdb/cb_bind.c (chainingdb_bind) Bug Description: Variable "sdn" going out of scope leaks the storage it points to. If SLAPI_BIND_TARGET_SDN is not given (anonymous bind) to cb_bind, sizeof(Slapi_DN) leaks. Fix Description: Free sdn if the structure Slapi_DN is locally allocated.
commit 92a3f9920211ec66bbe531cf3ba1d3e77abca34b Author: Noriko Hosoi <[email protected]> Date: Thu Nov 3 10:08:26 2011 -0700 Bug 750625 - Fix Coverity (11062) Resource leak https://bugzilla.redhat.com/show_bug.cgi?id=750625 plugins/chainingdb/cb_bind.c (chainingdb_bind) Bug Description: Variable "sdn" going out of scope leaks the storage it points to. If SLAPI_BIND_TARGET_SDN is not given (anonymous bind) to cb_bind, sizeof(Slapi_DN) leaks. Fix Description: Free sdn if the structure Slapi_DN is locally allocated. diff --git a/ldap/servers/plugins/chainingdb/cb_bind.c b/ldap/servers/plugins/chainingdb/cb_bind.c index edc45f3ac..18e526af5 100644 --- a/ldap/servers/plugins/chainingdb/cb_bind.c +++ b/ldap/servers/plugins/chainingdb/cb_bind.c @@ -238,6 +238,7 @@ chainingdb_bind( Slapi_PBlock *pb ) Slapi_Backend *be; const char *dn = NULL; Slapi_DN *sdn = NULL; + Slapi_DN *mysdn = NULL; int method; struct berval *creds, **urls; char *matcheddn,*errmsg; @@ -246,20 +247,20 @@ chainingdb_bind( Slapi_PBlock *pb ) int freectrls=1; int bind_retry; - if ( LDAP_SUCCESS != (rc = cb_forward_operation(pb) )) { - cb_send_ldap_result( pb, rc, NULL, "Chaining forbidden", 0, NULL ); - return SLAPI_BIND_FAIL; - } + if ( LDAP_SUCCESS != (rc = cb_forward_operation(pb) )) { + cb_send_ldap_result( pb, rc, NULL, "Chaining forbidden", 0, NULL ); + return SLAPI_BIND_FAIL; + } ctrls=NULL; /* don't add proxy auth control. use this call to check for supported */ /* controls only. */ - if ( LDAP_SUCCESS != ( rc = cb_update_controls( pb, NULL, &ctrls, 0 )) ) { - cb_send_ldap_result( pb, rc, NULL, NULL, 0, NULL ); + if ( LDAP_SUCCESS != ( rc = cb_update_controls( pb, NULL, &ctrls, 0 )) ) { + cb_send_ldap_result( pb, rc, NULL, NULL, 0, NULL ); if (ctrls) ldap_controls_free(ctrls); - return SLAPI_BIND_FAIL; - } + return SLAPI_BIND_FAIL; + } if (ctrls) ldap_controls_free(ctrls); @@ -272,30 +273,32 @@ chainingdb_bind( Slapi_PBlock *pb ) cb = cb_get_instance(be); if ( NULL == sdn ) { - sdn = slapi_sdn_new_ndn_byval(""); + sdn = mysdn = slapi_sdn_new_ndn_byval(""); } dn = slapi_sdn_get_ndn(sdn); - /* always allow noauth simple binds */ - if (( method == LDAP_AUTH_SIMPLE) && creds->bv_len == 0 ) { - return( SLAPI_BIND_ANONYMOUS ); - } + /* always allow noauth simple binds */ + if (( method == LDAP_AUTH_SIMPLE) && creds->bv_len == 0 ) { + slapi_sdn_free(&mysdn); + return( SLAPI_BIND_ANONYMOUS ); + } - cb_update_monitor_info(pb,cb,SLAPI_OPERATION_BIND); + cb_update_monitor_info(pb,cb,SLAPI_OPERATION_BIND); matcheddn=errmsg=NULL; - allocated_errmsg = 0; + allocated_errmsg = 0; resctrls=NULL; urls=NULL; /* Check wether the chaining BE is available or not */ - if ( cb_check_availability( cb, pb ) == FARMSERVER_UNAVAILABLE ){ - return -1; - } + if ( cb_check_availability( cb, pb ) == FARMSERVER_UNAVAILABLE ){ + slapi_sdn_free(&mysdn); + return -1; + } - slapi_rwlock_rdlock(cb->rwl_config_lock); + slapi_rwlock_rdlock(cb->rwl_config_lock); bind_retry=cb->bind_retry; - slapi_rwlock_unlock(cb->rwl_config_lock); + slapi_rwlock_unlock(cb->rwl_config_lock); rc = cb_sasl_bind_s(pb, cb->bind_pool, bind_retry, dn, method, mechanism, creds, reqctrls, &matcheddn, &errmsg, @@ -322,17 +325,18 @@ chainingdb_bind( Slapi_PBlock *pb ) } } - if ( urls != NULL ) { - cb_free_bervals( urls ); - } - if ( freectrls && ( resctrls != NULL )) { - ldap_controls_free( resctrls ); - } - slapi_ch_free((void **)& matcheddn ); - if ( allocated_errmsg && errmsg != NULL ) { - slapi_ch_free((void **)& errmsg ); - } + if ( urls != NULL ) { + cb_free_bervals( urls ); + } + if ( freectrls && ( resctrls != NULL )) { + ldap_controls_free( resctrls ); + } + slapi_ch_free((void **)& matcheddn ); + if ( allocated_errmsg && errmsg != NULL ) { + slapi_ch_free((void **)& errmsg ); + } + slapi_sdn_free(&mysdn); return ((rc == LDAP_SUCCESS ) ? SLAPI_BIND_SUCCESS : SLAPI_BIND_FAIL ); }
0
67f960f7e5f12dc64e9e5e63af8eaa33cac617f4
389ds/389-ds-base
Ticket 49087 - lib389 resolve jenkins issues Bug Description: Our jenkins tests were failing. Resolve the issues. Fix Description: The db2ldif and ldif2db scripts were flakey in some prefix settings, so we avoid them as they are going to be removed eventually anyway. Fix localhost, as localhost.localdomain literally does not exist on platforms and it causes so many errors and pain. https://fedorahosted.org/389/ticket/49087 Author: wibrown Review by: nhosoi (Thanks!)
commit 67f960f7e5f12dc64e9e5e63af8eaa33cac617f4 Author: William Brown <[email protected]> Date: Thu Jan 12 12:46:23 2017 +1000 Ticket 49087 - lib389 resolve jenkins issues Bug Description: Our jenkins tests were failing. Resolve the issues. Fix Description: The db2ldif and ldif2db scripts were flakey in some prefix settings, so we avoid them as they are going to be removed eventually anyway. Fix localhost, as localhost.localdomain literally does not exist on platforms and it causes so many errors and pain. https://fedorahosted.org/389/ticket/49087 Author: wibrown Review by: nhosoi (Thanks!) diff --git a/src/lib389/lib389/__init__.py b/src/lib389/lib389/__init__.py index 0ce04634b..7b8d8eba1 100644 --- a/src/lib389/lib389/__init__.py +++ b/src/lib389/lib389/__init__.py @@ -1149,7 +1149,6 @@ class DirSrv(SimpleLDAPObject, object): count -= 1 if not pid_exists(pid): raise Exception("Failed to start DS") - self.open() def stop(self, timeout=120): @@ -2532,7 +2531,7 @@ class DirSrv(SimpleLDAPObject, object): @return - True if import succeeded """ DirSrvTools.lib389User(user=DEFAULT_USER) - prog = os.path.join(self.ds_paths.sbin_dir, LDIF2DB) + prog = os.path.join(self.ds_paths.sbin_dir, 'ns-slapd') if not bename and not suffixes: log.error("ldif2db: backend name or suffix missing") @@ -2543,7 +2542,7 @@ class DirSrv(SimpleLDAPObject, object): log.error("ldif2db: Can't find file: %s" % ldif) return False - cmd = '%s -Z %s' % (prog, self.serverid) + cmd = '%s ldif2db -D %s' % (prog, self.get_config_dir()) if bename: cmd = cmd + ' -n ' + bename if suffixes: @@ -2581,13 +2580,15 @@ class DirSrv(SimpleLDAPObject, object): @return - True if export succeeded """ DirSrvTools.lib389User(user=DEFAULT_USER) - prog = os.path.join(self.ds_paths.sbin_dir, DB2LDIF) + prog = os.path.join(self.ds_paths.sbin_dir, 'ns-slapd') if not bename and not suffixes: log.error("db2ldif: backend name or suffix missing") return False - cmd = '%s -Z %s' % (prog, self.serverid) + # The shell wrapper is not always reliable, so bypass it. We want to + # kill it off anyway! + cmd = '%s db2ldif -D %s' % (prog, self.get_config_dir()) if bename: cmd = cmd + ' -n ' + bename if suffixes: @@ -2598,10 +2599,10 @@ class DirSrv(SimpleLDAPObject, object): cmd = cmd + ' -x ' + excludeSuffix if encrypt: cmd = cmd + ' -E' - if outputfile: - cmd = cmd + ' -a ' + outputfile if repl_data: cmd = cmd + ' -r' + if outputfile: + cmd = cmd + ' -a ' + outputfile self.stop(timeout=10) log.info('Running script: %s' % cmd) @@ -2611,7 +2612,8 @@ class DirSrv(SimpleLDAPObject, object): except: log.error("db2ldif: error executing %s" % cmd) result = False - self.start(timeout=10) + # Why are we implicitly starting this?! + # self.start(timeout=10) return result diff --git a/src/lib389/lib389/_constants.py b/src/lib389/lib389/_constants.py index e247634f5..628959b0c 100644 --- a/src/lib389/lib389/_constants.py +++ b/src/lib389/lib389/_constants.py @@ -85,7 +85,9 @@ DIRSRV_STATE_OFFLINE = 3 DIRSRV_STATE_RUNNING = 4 DIRSRV_STATE_ONLINE = 5 -LOCALHOST = "localhost.localdomain" +# So uh .... localhost.localdomain doesn't always exist. Stop. Using. It. +# LOCALHOST = "localhost.localdomain" +LOCALHOST = "localhost" LOCALHOST_SHORT = "localhost" DEFAULT_PORT = 389 DEFAULT_SECURE_PORT = 636
0
2ff2c9da762d67628cf25a1ceda3b1dbb9218776
389ds/389-ds-base
Bug 610119 - fix coverify Defect Type: Null pointer dereferences issues 12167 - 12199 https://bugzilla.redhat.com/show_bug.cgi?id=610119 Resolves: bug 610119 Bug description: Fix coverify Defect Type: Null pointer dereferences issues 12167 - 12199 Fix description: Catch possible NULL pointer in stats_table_create_row().
commit 2ff2c9da762d67628cf25a1ceda3b1dbb9218776 Author: Endi S. Dewata <[email protected]> Date: Fri Jul 2 00:27:43 2010 -0500 Bug 610119 - fix coverify Defect Type: Null pointer dereferences issues 12167 - 12199 https://bugzilla.redhat.com/show_bug.cgi?id=610119 Resolves: bug 610119 Bug description: Fix coverify Defect Type: Null pointer dereferences issues 12167 - 12199 Fix description: Catch possible NULL pointer in stats_table_create_row(). diff --git a/ldap/servers/snmp/ldap-agent.c b/ldap/servers/snmp/ldap-agent.c index c51bfdc02..473a0e5ee 100644 --- a/ldap/servers/snmp/ldap-agent.c +++ b/ldap/servers/snmp/ldap-agent.c @@ -227,20 +227,25 @@ stats_table_create_row(unsigned long portnum) stats_table_context *ctx = SNMP_MALLOC_TYPEDEF(stats_table_context); oid *index_oid = (oid *)malloc(sizeof(oid) * MAX_OID_LEN); + if (!ctx || !index_oid) { + /* Error during malloc */ + snmp_log(LOG_ERR, "malloc failed in stats_table_create_row\n"); + goto error; + } + /* Create index using port number */ index_oid[0] = portnum; index.oids = index_oid; index.len = 1; /* Copy index into row structure */ - if (ctx && index_oid) { - memcpy(&ctx->index, &index, sizeof(index)); - return ctx; - } else { - /* Error during malloc */ - snmp_log(LOG_ERR, "malloc failed in stats_table_create_row\n"); - return NULL; - } + memcpy(&ctx->index, &index, sizeof(index)); + return ctx; + +error: + if (index_oid) free(index_oid); + if (ctx) SNMP_FREE(ctx); + return NULL; } /************************************************************
0
a498525a76aaafc7d2979f577d4e7a93a3158d0b
389ds/389-ds-base
Ticket 47862 - repl-monitor fails to convert "*" to default values Bug Description: When specifying the "connection" parameters, you are allowed to use asterisks to signify default values. This was not properly checked in the script, and the the default values were not applied. Fix Description: Properly check for "*" for the bind dn, bind password, and certdir. https://fedorahosted.org/389/ticket/47862 Reviewed by: nhosoi(Thanks!)
commit a498525a76aaafc7d2979f577d4e7a93a3158d0b Author: Mark Reynolds <[email protected]> Date: Thu Aug 7 12:18:57 2014 -0400 Ticket 47862 - repl-monitor fails to convert "*" to default values Bug Description: When specifying the "connection" parameters, you are allowed to use asterisks to signify default values. This was not properly checked in the script, and the the default values were not applied. Fix Description: Properly check for "*" for the bind dn, bind password, and certdir. https://fedorahosted.org/389/ticket/47862 Reviewed by: nhosoi(Thanks!) diff --git a/ldap/admin/src/scripts/repl-monitor.pl.in b/ldap/admin/src/scripts/repl-monitor.pl.in index 0ded096bd..3b1cf3065 100755 --- a/ldap/admin/src/scripts/repl-monitor.pl.in +++ b/ldap/admin/src/scripts/repl-monitor.pl.in @@ -1086,7 +1086,10 @@ sub add_server $hostnode = $1 if $host =~ /^(.+?)\./; # new host:port - if (!$binddn || $binddn eq "" || !$bindpwd || $bindpwd eq "" || !$bindcert || $bindcert eq "") { + if (!$binddn || $binddn eq "" || $binddn eq "*" || + !$bindpwd || $bindpwd eq "" || $bindpwd eq "*" || + !$bindcert || $bindcert eq "" || $bindcert eq "*" ) + { # # Look up connection parameter in the order of # host:port @@ -1120,7 +1123,7 @@ sub add_server $binddn = $d; } } - if($prompt eq "yes" && ($w eq "" || (!$bindpwd || $bindpwd eq ""))){ + if($prompt eq "yes" && ($w eq "" || (!$bindpwd || $bindpwd eq "" || $bindpwd eq "*"))){ $bindpwd = passwdPrompt($h, $p); } elsif ($passwd ne ""){ $bindpwd = $passwd;
0
056cc3551f59b9724b10e8ff21ec43e49d947280
389ds/389-ds-base
Add support for pre/post db transaction plugins There are two new plugin types: betxnpreoperation - these plugins are called just after the database calls txn_begin - they are passed in SLAPI_TXN the DB_TXN* just created - they can use this as a parent transaction in a nested transaction if the plugin wishes to cause the parent to abort, the plugin should return a non-zero return code, and set the ldap error code to a meaningful error value betxnpostoperation - called just before the database calls txn_commit The changelog code uses the betxnpostoperation to create a nested transaction for the changelog write Reviewed by: nhosoi (Thanks!)
commit 056cc3551f59b9724b10e8ff21ec43e49d947280 Author: Rich Megginson <[email protected]> Date: Mon Sep 12 09:48:17 2011 -0600 Add support for pre/post db transaction plugins There are two new plugin types: betxnpreoperation - these plugins are called just after the database calls txn_begin - they are passed in SLAPI_TXN the DB_TXN* just created - they can use this as a parent transaction in a nested transaction if the plugin wishes to cause the parent to abort, the plugin should return a non-zero return code, and set the ldap error code to a meaningful error value betxnpostoperation - called just before the database calls txn_commit The changelog code uses the betxnpostoperation to create a nested transaction for the changelog write Reviewed by: nhosoi (Thanks!) diff --git a/ldap/servers/plugins/replication/cl5_api.c b/ldap/servers/plugins/replication/cl5_api.c index 4c88ee578..c3604443a 100644 --- a/ldap/servers/plugins/replication/cl5_api.c +++ b/ldap/servers/plugins/replication/cl5_api.c @@ -93,7 +93,9 @@ #define HASH_BACKETS_COUNT 16 /* number of buckets in a hash table */ #if 1000*DB_VERSION_MAJOR + 100*DB_VERSION_MINOR >= 4100 -#define DEFAULT_DB_OP_FLAGS DB_AUTO_COMMIT +#define USE_DB_TXN 1 /* use transactions */ +#define DEFAULT_DB_ENV_OP_FLAGS DB_AUTO_COMMIT +#define DEFAULT_DB_OP_FLAGS 0 #define DB_OPEN(oflags, db, txnid, file, database, type, flags, mode, rval) \ { \ if (((oflags) & DB_INIT_TXN) && ((oflags) & DB_INIT_LOG)) \ @@ -303,6 +305,8 @@ static PRBool _cl5ReplicaInList (Object *replica, Object **replicas); static int _cl5Entry2DBData (const CL5Entry *entry, char **data, PRUint32 *len); static int _cl5WriteOperation(const char *replName, const char *replGen, const slapi_operation_parameters *op, PRBool local); +static int _cl5WriteOperationTxn(const char *replName, const char *replGen, + const slapi_operation_parameters *op, PRBool local, void *txn); static int _cl5GetFirstEntry (Object *obj, CL5Entry *entry, void **iterator, DB_TXN *txnid); static int _cl5GetNextEntry (CL5Entry *entry, void *iterator); static int _cl5CurrentDeleteEntry (void *iterator); @@ -1376,7 +1380,7 @@ void cl5DestroyIterator (void *iterator) slapi_ch_free ((void**)&it); } -/* Name: cl5WriteOperation +/* Name: cl5WriteOperationTxn Description: writes operation to changelog Parameters: replName - name of the replica to which operation applies replGen - replica generation for the operation @@ -1385,14 +1389,15 @@ void cl5DestroyIterator (void *iterator) is in progress (if the data is reloaded). !!! op - operation to write local - this is a non-replicated operation + txn - the transaction containing this operation Return: CL5_SUCCESS if function is successfull; CL5_BAD_DATA if invalid op is passed; CL5_BAD_STATE if db has not been initialized; CL5_MEMORY_ERROR if memory allocation failed; CL5_DB_ERROR if any other db error occured; */ -int cl5WriteOperation(const char *replName, const char *replGen, - const slapi_operation_parameters *op, PRBool local) +int cl5WriteOperationTxn(const char *replName, const char *replGen, + const slapi_operation_parameters *op, PRBool local, void *txn) { int rc; @@ -1421,7 +1426,7 @@ int cl5WriteOperation(const char *replName, const char *replGen, if (rc != CL5_SUCCESS) return rc; - rc = _cl5WriteOperation(replName, replGen, op, local); + rc = _cl5WriteOperationTxn(replName, replGen, op, local, txn); /* update the upper bound ruv vector */ if (rc == CL5_SUCCESS) @@ -1440,6 +1445,27 @@ int cl5WriteOperation(const char *replName, const char *replGen, return rc; } +/* Name: cl5WriteOperation + Description: writes operation to changelog + Parameters: replName - name of the replica to which operation applies + replGen - replica generation for the operation + !!!Note that we pass name and generation rather than + replica object since generation can change while operation + is in progress (if the data is reloaded). !!! + op - operation to write + local - this is a non-replicated operation + Return: CL5_SUCCESS if function is successfull; + CL5_BAD_DATA if invalid op is passed; + CL5_BAD_STATE if db has not been initialized; + CL5_MEMORY_ERROR if memory allocation failed; + CL5_DB_ERROR if any other db error occured; + */ +int cl5WriteOperation(const char *replName, const char *replGen, + const slapi_operation_parameters *op, PRBool local) +{ + return cl5WriteOperationTxn(replName, replGen, op, local, NULL); +} + /* Name: cl5CreateReplayIterator Description: creates an iterator that allows to retireve changes that should to be sent to the consumer identified by ruv. The iteration is peformed by @@ -2050,7 +2076,7 @@ static int _cl5DBOpen () PR_snprintf(fullpathname, MAXPATHLEN, "%s/%s", s_cl5Desc.dbDir, entry->name); rc = s_cl5Desc.dbEnv->dbremove(s_cl5Desc.dbEnv, 0, fullpathname, 0, - DEFAULT_DB_OP_FLAGS); + DEFAULT_DB_ENV_OP_FLAGS); if (rc != 0) { slapi_log_error(SLAPI_LOG_REPL, repl_plugin_name_cl, @@ -3248,7 +3274,7 @@ static int _cl5Delete (const char *clDir, int rmDir) } else { /* DB files */ rc = s_cl5Desc.dbEnv->dbremove(s_cl5Desc.dbEnv, 0, filename, 0, - DEFAULT_DB_OP_FLAGS); + DEFAULT_DB_ENV_OP_FLAGS); if (rc) { slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name_cl, "_cl5Delete: failed to remove \"%s\"; " @@ -4450,8 +4476,8 @@ _cl5LDIF2Operation (char *ldifEntry, slapi_operation_parameters *op, char **repl return rval; } -static int _cl5WriteOperation(const char *replName, const char *replGen, - const slapi_operation_parameters *op, PRBool local) +static int _cl5WriteOperationTxn(const char *replName, const char *replGen, + const slapi_operation_parameters *op, PRBool local, void *txn) { int rc; int cnt; @@ -4463,6 +4489,7 @@ static int _cl5WriteOperation(const char *replName, const char *replGen, CL5DBFile *file = NULL; Object *file_obj = NULL; DB_TXN *txnid = NULL; + DB_TXN *parent_txnid = (DB_TXN *)txn; rc = _cl5GetDBFileByReplicaName (replName, replGen, &file_obj); if (rc == CL5_NOTFOUND) @@ -4472,14 +4499,14 @@ static int _cl5WriteOperation(const char *replName, const char *replGen, if (rc != CL5_SUCCESS) { slapi_log_error(SLAPI_LOG_REPL, repl_plugin_name_cl, - "_cl5WriteOperation: failed to find or open DB object for replica %s\n", replName); + "_cl5WriteOperationTxn: failed to find or open DB object for replica %s\n", replName); return rc; } } else if (rc != CL5_SUCCESS) { slapi_log_error(SLAPI_LOG_REPL, repl_plugin_name_cl, - "_cl5WriteOperation: failed to get db file for target dn (%s)", + "_cl5WriteOperationTxn: failed to get db file for target dn (%s)", op->target_address.dn); return CL5_OBJSET_ERROR; } @@ -4499,7 +4526,7 @@ static int _cl5WriteOperation(const char *replName, const char *replGen, { char s[CSN_STRSIZE]; slapi_log_error(SLAPI_LOG_REPL, repl_plugin_name_cl, - "_cl5WriteOperation: failed to convert entry with csn (%s) " + "_cl5WriteOperationTxn: failed to convert entry with csn (%s) " "to db format\n", csn_as_string(op->csn,PR_FALSE,s)); goto done; } @@ -4514,7 +4541,7 @@ static int _cl5WriteOperation(const char *replName, const char *replGen, if (rc != 0) { slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name_cl, - "_cl5WriteOperation: failed to write entry; db error - %d %s\n", + "_cl5WriteOperationTxn: failed to write entry; db error - %d %s\n", rc, db_strerror(rc)); if (CL5_OS_ERR_IS_DISKFULL(rc)) { @@ -4533,13 +4560,13 @@ static int _cl5WriteOperation(const char *replName, const char *replGen, { if (cnt != 0) { -#if 1000*DB_VERSION_MAJOR + 100*DB_VERSION_MINOR < 4100 +#if USE_DB_TXN /* abort previous transaction */ - rc = txn_abort (txnid); + rc = TXN_ABORT (txnid); if (rc != 0) { slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name_cl, - "_cl5WriteOperation: failed to abort transaction; db error - %d %s\n", + "_cl5WriteOperationTxn: failed to abort transaction; db error - %d %s\n", rc, db_strerror(rc)); rc = CL5_DB_ERROR; goto done; @@ -4549,13 +4576,13 @@ static int _cl5WriteOperation(const char *replName, const char *replGen, interval = PR_MillisecondsToInterval(slapi_rand() % 100); DS_Sleep(interval); } -#if 1000*DB_VERSION_MAJOR + 100*DB_VERSION_MINOR < 4100 +#if USE_DB_TXN /* begin transaction */ - rc = txn_begin(s_cl5Desc.dbEnv, NULL /*pid*/, &txnid, 0); + rc = TXN_BEGIN(s_cl5Desc.dbEnv, parent_txnid, &txnid, 0); if (rc != 0) { slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name_cl, - "_cl5WriteOperation: failed to start transaction; db error - %d %s\n", + "_cl5WriteOperationTxn: failed to start transaction; db error - %d %s\n", rc, db_strerror(rc)); rc = CL5_DB_ERROR; goto done; @@ -4574,7 +4601,7 @@ static int _cl5WriteOperation(const char *replName, const char *replGen, if (CL5_OS_ERR_IS_DISKFULL(rc)) { slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name_cl, - "_cl5WriteOperation: changelog (%s) DISK FULL; db error - %d %s\n", + "_cl5WriteOperationTxn: changelog (%s) DISK FULL; db error - %d %s\n", s_cl5Desc.dbDir, rc, db_strerror(rc)); cl5_set_diskfull(); rc = CL5_DB_ERROR; @@ -4584,11 +4611,11 @@ static int _cl5WriteOperation(const char *replName, const char *replGen, { if (rc == 0) { - slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name_cl, "_cl5WriteOperation: retry (%d) the transaction (csn=%s) succeeded\n", cnt, (char*)key.data); + slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name_cl, "_cl5WriteOperationTxn: retry (%d) the transaction (csn=%s) succeeded\n", cnt, (char*)key.data); } else if ((cnt + 1) >= MAX_TRIALS) { - slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name_cl, "_cl5WriteOperation: retry (%d) the transaction (csn=%s) failed (rc=%d (%s))\n", cnt, (char*)key.data, rc, db_strerror(rc)); + slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name_cl, "_cl5WriteOperationTxn: retry (%d) the transaction (csn=%s) failed (rc=%d (%s))\n", cnt, (char*)key.data, rc, db_strerror(rc)); } } cnt ++; @@ -4596,23 +4623,23 @@ static int _cl5WriteOperation(const char *replName, const char *replGen, if (rc == 0) /* we successfully added entry */ { -#if 1000*DB_VERSION_MAJOR + 100*DB_VERSION_MINOR < 4100 - rc = txn_commit (txnid, 0); +#if USE_DB_TXN + rc = TXN_COMMIT (txnid, 0); #endif } else { char s[CSN_STRSIZE]; slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name_cl, - "_cl5WriteOperation: failed to write entry with csn (%s); " + "_cl5WriteOperationTxn: failed to write entry with csn (%s); " "db error - %d %s\n", csn_as_string(op->csn,PR_FALSE,s), rc, db_strerror(rc)); -#if 1000*DB_VERSION_MAJOR + 100*DB_VERSION_MINOR < 4100 - rc = txn_abort (txnid); +#if USE_DB_TXN + rc = TXN_ABORT (txnid); if (rc != 0) { slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name_cl, - "_cl5WriteOperation: failed to abort transaction; db error - %d %s\n", + "_cl5WriteOperationTxn: failed to abort transaction; db error - %d %s\n", rc, db_strerror(rc)); } #endif @@ -4627,7 +4654,7 @@ static int _cl5WriteOperation(const char *replName, const char *replGen, _cl5UpdateRUV (file_obj, op->csn, PR_TRUE, PR_TRUE); slapi_log_error(SLAPI_LOG_PLUGIN, repl_plugin_name_cl, - "cl5WriteOperation: successfully written entry with csn (%s)\n", csnStr); + "cl5WriteOperationTxn: successfully written entry with csn (%s)\n", csnStr); rc = CL5_SUCCESS; done: if (data->data) @@ -4640,6 +4667,12 @@ done: return rc; } +static int _cl5WriteOperation(const char *replName, const char *replGen, + const slapi_operation_parameters *op, PRBool local) +{ + return _cl5WriteOperationTxn(replName, replGen, op, local, NULL); +} + static int _cl5GetFirstEntry (Object *obj, CL5Entry *entry, void **iterator, DB_TXN *txnid) { int rc; @@ -5861,9 +5894,9 @@ static void _cl5DBCloseFile (void **data) * run into problems when we try to checkpoint transactions later. */ slapi_log_error(SLAPI_LOG_REPL, repl_plugin_name_cl, "_cl5DBCloseFile: " "removing the changelog %s (flag %d)\n", - file->name, DEFAULT_DB_OP_FLAGS); + file->name, DEFAULT_DB_ENV_OP_FLAGS); rc = s_cl5Desc.dbEnv->dbremove(s_cl5Desc.dbEnv, 0, file->name, 0, - DEFAULT_DB_OP_FLAGS); + DEFAULT_DB_ENV_OP_FLAGS); if (rc != 0) { slapi_log_error(SLAPI_LOG_REPL, repl_plugin_name_cl, "_cl5DBCloseFile: " diff --git a/ldap/servers/plugins/replication/cl5_api.h b/ldap/servers/plugins/replication/cl5_api.h index 3a59111ff..6f2552f05 100644 --- a/ldap/servers/plugins/replication/cl5_api.h +++ b/ldap/servers/plugins/replication/cl5_api.h @@ -321,6 +321,25 @@ int cl5GetNextOperation (slapi_operation_parameters *op, void *iterator); */ void cl5DestroyIterator (void *iterator); +/* Name: cl5WriteOperationTxn + Description: writes operation to changelog as part of a containing transaction + Parameters: repl_name - name of the replica to which operation applies + repl_gen - replica generation for the operation + !!!Note that we pass name and generation rather than + replica object since generation can change while operation + is in progress (if the data is reloaded). !!! + op - operation to write + local - this is a non-replicated operation + txn - the containing transaction + Return: CL5_SUCCESS if function is successfull; + CL5_BAD_DATA if invalid op is passed; + CL5_BAD_STATE if db has not been initialized; + CL5_MEMORY_ERROR if memory allocation failed; + CL5_DB_ERROR if any other db error occured; + */ +int cl5WriteOperationTxn(const char *repl_name, const char *repl_gen, + const slapi_operation_parameters *op, PRBool local, void *txn); + /* Name: cl5WriteOperation Description: writes operation to changelog Parameters: repl_name - name of the replica to which operation applies diff --git a/ldap/servers/plugins/replication/repl5.h b/ldap/servers/plugins/replication/repl5.h index 1a3672fa0..79abb7a4e 100644 --- a/ldap/servers/plugins/replication/repl5.h +++ b/ldap/servers/plugins/replication/repl5.h @@ -199,6 +199,10 @@ int multimaster_postop_add (Slapi_PBlock *pb); int multimaster_postop_delete (Slapi_PBlock *pb); int multimaster_postop_modify (Slapi_PBlock *pb); int multimaster_postop_modrdn (Slapi_PBlock *pb); +int multimaster_betxnpostop_modrdn (Slapi_PBlock *pb); +int multimaster_betxnpostop_delete (Slapi_PBlock *pb); +int multimaster_betxnpostop_add (Slapi_PBlock *pb); +int multimaster_betxnpostop_modify (Slapi_PBlock *pb); /* In repl5_init.c */ char* get_thread_private_agmtname (); diff --git a/ldap/servers/plugins/replication/repl5_init.c b/ldap/servers/plugins/replication/repl5_init.c index 2b6ebdfba..883c3e61f 100644 --- a/ldap/servers/plugins/replication/repl5_init.c +++ b/ldap/servers/plugins/replication/repl5_init.c @@ -133,6 +133,7 @@ static Slapi_PluginDesc multimasterinternalpreopdesc = {"replication-multimaster static Slapi_PluginDesc multimasterinternalpostopdesc = {"replication-multimaster-internalpostop", VENDOR, DS_PACKAGE_VERSION, "Multimaster replication internal post-operation plugin"}; static Slapi_PluginDesc multimasterbepreopdesc = {"replication-multimaster-bepreop", VENDOR, DS_PACKAGE_VERSION, "Multimaster replication bepre-operation plugin"}; static Slapi_PluginDesc multimasterbepostopdesc = {"replication-multimaster-bepostop", VENDOR, DS_PACKAGE_VERSION, "Multimaster replication bepost-operation plugin"}; +static Slapi_PluginDesc multimasterbetxnpostopdesc = {"replication-multimaster-betxnpostop", VENDOR, DS_PACKAGE_VERSION, "Multimaster replication be transaction post-operation plugin"}; static Slapi_PluginDesc multimasterextopdesc = { "replication-multimaster-extop", VENDOR, DS_PACKAGE_VERSION, "Multimaster replication extended-operation plugin" }; static int multimaster_stopped_flag; /* A flag which is set when all the plugin threads are to stop */ @@ -328,6 +329,25 @@ multimaster_bepostop_init( Slapi_PBlock *pb ) return rc; } +int +multimaster_betxnpostop_init( Slapi_PBlock *pb ) +{ + int rc= 0; /* OK */ + + if( slapi_pblock_set( pb, SLAPI_PLUGIN_VERSION, SLAPI_PLUGIN_VERSION_01 ) != 0 || + slapi_pblock_set( pb, SLAPI_PLUGIN_DESCRIPTION, (void *)&multimasterbetxnpostopdesc ) != 0 || + slapi_pblock_set( pb, SLAPI_PLUGIN_BE_TXN_POST_MODRDN_FN, (void *) multimaster_betxnpostop_modrdn ) != 0 || + slapi_pblock_set( pb, SLAPI_PLUGIN_BE_TXN_POST_DELETE_FN, (void *) multimaster_betxnpostop_delete ) != 0 || + slapi_pblock_set( pb, SLAPI_PLUGIN_BE_TXN_POST_ADD_FN, (void *) multimaster_betxnpostop_modrdn ) != 0 || + slapi_pblock_set( pb, SLAPI_PLUGIN_BE_TXN_POST_MODIFY_FN, (void *) multimaster_betxnpostop_delete ) != 0 ) + { + slapi_log_error( SLAPI_LOG_PLUGIN, repl_plugin_name, "multimaster_betxnpostop_init failed\n" ); + rc= -1; + } + + return rc; +} + int multimaster_start_extop_init( Slapi_PBlock *pb ) { @@ -591,6 +611,7 @@ int replication_multimaster_plugin_init(Slapi_PBlock *pb) rc= slapi_register_plugin("postoperation", 1 /* Enabled */, "multimaster_postop_init", multimaster_postop_init, "Multimaster replication postoperation plugin", NULL, identity); rc= slapi_register_plugin("bepreoperation", 1 /* Enabled */, "multimaster_bepreop_init", multimaster_bepreop_init, "Multimaster replication bepreoperation plugin", NULL, identity); rc= slapi_register_plugin("bepostoperation", 1 /* Enabled */, "multimaster_bepostop_init", multimaster_bepostop_init, "Multimaster replication bepostoperation plugin", NULL, identity); + rc= slapi_register_plugin("betxnpostoperation", 1 /* Enabled */, "multimaster_betxnpostop_init", multimaster_betxnpostop_init, "Multimaster replication betxnpostoperation plugin", NULL, identity); rc= slapi_register_plugin("internalpreoperation", 1 /* Enabled */, "multimaster_internalpreop_init", multimaster_internalpreop_init, "Multimaster replication internal preoperation plugin", NULL, identity); rc= slapi_register_plugin("internalpostoperation", 1 /* Enabled */, "multimaster_internalpostop_init", multimaster_internalpostop_init, "Multimaster replication internal postoperation plugin", NULL, identity); rc= slapi_register_plugin("extendedop", 1 /* Enabled */, "multimaster_start_extop_init", multimaster_start_extop_init, "Multimaster replication start extended operation plugin", NULL, identity); diff --git a/ldap/servers/plugins/replication/repl5_plugins.c b/ldap/servers/plugins/replication/repl5_plugins.c index 8b4701137..a036b0ab3 100644 --- a/ldap/servers/plugins/replication/repl5_plugins.c +++ b/ldap/servers/plugins/replication/repl5_plugins.c @@ -851,6 +851,29 @@ multimaster_postop_modrdn (Slapi_PBlock *pb) return process_postop(pb); } +int +multimaster_betxnpostop_delete (Slapi_PBlock *pb) +{ + return write_changelog_and_ruv(pb); +} + +int +multimaster_betxnpostop_modrdn (Slapi_PBlock *pb) +{ + return write_changelog_and_ruv(pb); +} + +int +multimaster_betxnpostop_add (Slapi_PBlock *pb) +{ + return write_changelog_and_ruv(pb); +} + +int +multimaster_betxnpostop_modify (Slapi_PBlock *pb) +{ + return write_changelog_and_ruv(pb); +} /* Helper functions */ @@ -943,42 +966,63 @@ update_ruv_component(Replica *replica, CSN *opcsn, Slapi_PBlock *pb) static int write_changelog_and_ruv (Slapi_PBlock *pb) { + Slapi_Operation *op = NULL; int rc; slapi_operation_parameters *op_params = NULL; - Object *repl_obj; + Object *repl_obj; int return_value = 0; - Replica *r; + Replica *r; + Slapi_Backend *be; + int is_replicated_operation = 0; - /* we only log changes for operations applied to a replica */ + /* we just let fixup operations through */ + slapi_pblock_get( pb, SLAPI_OPERATION, &op ); + if ((operation_is_flag_set(op, OP_FLAG_REPL_FIXUP)) || + (operation_is_flag_set(op, OP_FLAG_TOMBSTONE_ENTRY))) + { + return 0; + } + + /* ignore operations intended for chaining backends - they will be + replicated back to us or should be ignored anyway + replicated operations should be processed normally, as they should + be going to a local backend */ + is_replicated_operation= operation_is_flag_set(op,OP_FLAG_REPLICATED); + slapi_pblock_get(pb, SLAPI_BACKEND, &be); + if (!is_replicated_operation && + slapi_be_is_flag_set(be,SLAPI_BE_FLAG_REMOTE_DATA)) + { + return 0; + } + + /* we only log changes for operations applied to a replica */ repl_obj = replica_get_replica_for_op (pb); - if (repl_obj == NULL) - return 0; + if (repl_obj == NULL) + return 0; - r = (Replica*)object_get_data (repl_obj); - PR_ASSERT (r); + r = (Replica*)object_get_data (repl_obj); + PR_ASSERT (r); if (replica_is_flag_set (r, REPLICA_LOG_CHANGES) && (cl5GetState () == CL5_STATE_OPEN)) { - supplier_operation_extension *opext = NULL; - const char *repl_name; - char *repl_gen; - Slapi_Operation *op; + supplier_operation_extension *opext = NULL; + const char *repl_name; + char *repl_gen; - slapi_pblock_get(pb, SLAPI_OPERATION, &op); opext = (supplier_operation_extension*) repl_sup_get_ext (REPL_SUP_EXT_OP, op); PR_ASSERT (opext); - /* get replica generation and replica name to pass to the write function */ - repl_name = replica_get_name (r); - repl_gen = opext->repl_gen; - PR_ASSERT (repl_name && repl_gen); + /* get replica generation and replica name to pass to the write function */ + repl_name = replica_get_name (r); + repl_gen = opext->repl_gen; + PR_ASSERT (repl_name && repl_gen); /* for replicated operations, we log the original, non-urp data which is saved in the operation extension */ if (operation_is_flag_set(op,OP_FLAG_REPLICATED)) { - PR_ASSERT (opext->operation_parameters); + PR_ASSERT (opext->operation_parameters); op_params = opext->operation_parameters; } else /* since client operations don't go through urp, we log the operation data in pblock */ @@ -1013,21 +1057,23 @@ write_changelog_and_ruv (Slapi_PBlock *pb) op_params->target_address.uniqueid = slapi_ch_strdup (uniqueid); } - /* we might have stripped all the mods - in that case we do not - log the operation */ - if (op_params->operation_type != SLAPI_OPERATION_MODIFY || - op_params->p.p_modify.modify_mods != NULL) - { + /* we might have stripped all the mods - in that case we do not + log the operation */ + if (op_params->operation_type != SLAPI_OPERATION_MODIFY || + op_params->p.p_modify.modify_mods != NULL) + { + void *txn = NULL; if (cl5_is_diskfull() && !cl5_diskspace_is_available()) { slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name, - "write_changelog_and_ruv: Skipped due to DISKFULL\n"); + "write_changelog_and_ruv: Skipped due to DISKFULL\n"); return 0; } - rc = cl5WriteOperation(repl_name, repl_gen, op_params, - !operation_is_flag_set(op, OP_FLAG_REPLICATED)); - if (rc != CL5_SUCCESS) - { + slapi_pblock_get(pb, SLAPI_TXN, &txn); + rc = cl5WriteOperationTxn(repl_name, repl_gen, op_params, + !operation_is_flag_set(op, OP_FLAG_REPLICATED), txn); + if (rc != CL5_SUCCESS) + { char csn_str[CSN_STRSIZE]; /* ONREPL - log error */ slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name, @@ -1036,10 +1082,10 @@ write_changelog_and_ruv (Slapi_PBlock *pb) op_params->target_address.dn, op_params->target_address.uniqueid, op_params->operation_type, - csn_as_string(op_params->csn, PR_FALSE, csn_str)); - return_value = 1; - } - } + csn_as_string(op_params->csn, PR_FALSE, csn_str)); + return_value = 1; + } + } if (!operation_is_flag_set(op,OP_FLAG_REPLICATED)) { @@ -1056,7 +1102,6 @@ write_changelog_and_ruv (Slapi_PBlock *pb) just read from the changelog in either the supplier or consumer ruv */ if (0 == return_value) { - Slapi_Operation *op; CSN *opcsn; slapi_pblock_get( pb, SLAPI_OPERATION, &op ); @@ -1107,24 +1152,9 @@ process_postop (Slapi_PBlock *pb) get_repl_session_id (pb, sessionid, &opcsn); slapi_pblock_get(pb, SLAPI_RESULT_CODE, &rc); - /* - * Don't abandon writing changelog since we'd do everything - * possible to keep the changelog in sync with the backend - * db which was committed before this function was called. - * - * if (rc == LDAP_SUCCESS && !slapi_op_abandoned(pb)) - */ if (rc == LDAP_SUCCESS) { - rc = write_changelog_and_ruv(pb); - if (rc == 0) - { - agmtlist_notify_all(pb); - } - else - { - slapi_log_error(SLAPI_LOG_REPL, repl_plugin_name, "%s process postop: error writing changelog and ruv\n", sessionid); - } + agmtlist_notify_all(pb); } else if (opcsn) { diff --git a/ldap/servers/slapd/back-ldbm/ldbm_add.c b/ldap/servers/slapd/back-ldbm/ldbm_add.c index 20578ba05..274e3d72a 100644 --- a/ldap/servers/slapd/back-ldbm/ldbm_add.c +++ b/ldap/servers/slapd/back-ldbm/ldbm_add.c @@ -671,6 +671,18 @@ ldbm_back_add( Slapi_PBlock *pb ) ldap_result_code= LDAP_OPERATIONS_ERROR; goto error_return; } + + /* stash the transaction */ + slapi_pblock_set(pb, SLAPI_TXN, (void *)txn.back_txn_txn); + + /* call the transaction pre add plugins just after creating the transaction */ + if ((retval = plugin_call_plugins(pb, SLAPI_PLUGIN_BE_TXN_PRE_ADD_FN))) { + LDAPDebug1Arg( LDAP_DEBUG_ANY, "SLAPI_PLUGIN_BE_TXN_PRE_ADD_FN plugin " + "returned error code %d\n", retval ); + slapi_pblock_get(pb, SLAPI_RESULT_CODE, &ldap_result_code); + goto error_return; + } + retval = id2entry_add( be, addingentry, &txn ); if (DB_LOCK_DEADLOCK == retval) { @@ -883,6 +895,14 @@ ldbm_back_add( Slapi_PBlock *pb ) } } + /* call the transaction post add plugins just before the commit */ + if ((retval = plugin_call_plugins(pb, SLAPI_PLUGIN_BE_TXN_POST_ADD_FN))) { + LDAPDebug1Arg( LDAP_DEBUG_ANY, "SLAPI_PLUGIN_BE_TXN_POST_ADD_FN plugin " + "returned error code %d\n", retval ); + slapi_pblock_get(pb, SLAPI_RESULT_CODE, &ldap_result_code); + goto error_return; + } + retval = dblayer_txn_commit(li,&txn); if (0 != retval) { diff --git a/ldap/servers/slapd/back-ldbm/ldbm_delete.c b/ldap/servers/slapd/back-ldbm/ldbm_delete.c index f2edf1e9a..088d1b53e 100644 --- a/ldap/servers/slapd/back-ldbm/ldbm_delete.c +++ b/ldap/servers/slapd/back-ldbm/ldbm_delete.c @@ -467,6 +467,18 @@ ldbm_back_delete( Slapi_PBlock *pb ) ldap_result_code= LDAP_OPERATIONS_ERROR; goto error_return; } + + /* stash the transaction */ + slapi_pblock_set(pb, SLAPI_TXN, (void *)txn.back_txn_txn); + + /* call the transaction pre delete plugins just after creating the transaction */ + if ((retval = plugin_call_plugins(pb, SLAPI_PLUGIN_BE_TXN_PRE_DELETE_FN))) { + LDAPDebug1Arg( LDAP_DEBUG_ANY, "SLAPI_PLUGIN_BE_TXN_PRE_DELETE_FN plugin " + "returned error code %d\n", retval ); + slapi_pblock_get(pb, SLAPI_RESULT_CODE, &ldap_result_code); + goto error_return; + } + if(create_tombstone_entry) { /* @@ -868,6 +880,14 @@ ldbm_back_delete( Slapi_PBlock *pb ) goto error_return; } + /* call the transaction post delete plugins just before the commit */ + if ((retval = plugin_call_plugins(pb, SLAPI_PLUGIN_BE_TXN_POST_DELETE_FN))) { + LDAPDebug1Arg( LDAP_DEBUG_ANY, "SLAPI_PLUGIN_BE_TXN_POST_DELETE_FN plugin " + "returned error code %d\n", retval ); + slapi_pblock_get(pb, SLAPI_RESULT_CODE, &ldap_result_code); + goto error_return; + } + retval = dblayer_txn_commit(li,&txn); if (0 != retval) { diff --git a/ldap/servers/slapd/back-ldbm/ldbm_modify.c b/ldap/servers/slapd/back-ldbm/ldbm_modify.c index f3b351f0a..9dbf2b992 100644 --- a/ldap/servers/slapd/back-ldbm/ldbm_modify.c +++ b/ldap/servers/slapd/back-ldbm/ldbm_modify.c @@ -422,6 +422,17 @@ ldbm_back_modify( Slapi_PBlock *pb ) goto error_return; } + /* stash the transaction */ + slapi_pblock_set(pb, SLAPI_TXN, (void *)txn.back_txn_txn); + + /* call the transaction pre modify plugins just after creating the transaction */ + if ((retval = plugin_call_plugins(pb, SLAPI_PLUGIN_BE_TXN_PRE_MODIFY_FN))) { + LDAPDebug1Arg( LDAP_DEBUG_ANY, "SLAPI_PLUGIN_BE_TXN_PRE_MODIFY_FN plugin " + "returned error code %d\n", retval ); + slapi_pblock_get(pb, SLAPI_RESULT_CODE, &ldap_result_code); + goto error_return; + } + /* * Update the ID to Entry index. * Note that id2entry_add replaces the entry, so the Entry ID stays the same. @@ -537,6 +548,14 @@ ldbm_back_modify( Slapi_PBlock *pb ) */ e = NULL; + /* call the transaction post modify plugins just before the commit */ + if ((retval = plugin_call_plugins(pb, SLAPI_PLUGIN_BE_TXN_POST_MODIFY_FN))) { + LDAPDebug1Arg( LDAP_DEBUG_ANY, "SLAPI_PLUGIN_BE_TXN_POST_MODIFY_FN plugin " + "returned error code %d\n", retval ); + slapi_pblock_get(pb, SLAPI_RESULT_CODE, &ldap_result_code); + goto error_return; + } + retval = dblayer_txn_commit(li,&txn); if (0 != retval) { if (LDBM_OS_ERR_IS_DISKFULL(retval)) disk_full = 1; diff --git a/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c b/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c index 54b3125c8..be5787a03 100644 --- a/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c +++ b/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c @@ -695,6 +695,17 @@ ldbm_back_modrdn( Slapi_PBlock *pb ) goto error_return; } + /* stash the transaction */ + slapi_pblock_set(pb, SLAPI_TXN, (void *)txn.back_txn_txn); + + /* call the transaction pre modrdn plugins just after creating the transaction */ + if ((retval = plugin_call_plugins(pb, SLAPI_PLUGIN_BE_TXN_PRE_MODRDN_FN))) { + LDAPDebug1Arg( LDAP_DEBUG_ANY, "SLAPI_PLUGIN_BE_TXN_PRE_MODRDN_FN plugin " + "returned error code %d\n", retval ); + slapi_pblock_get(pb, SLAPI_RESULT_CODE, &ldap_result_code); + goto error_return; + } + /* * Update the indexes for the entry. */ @@ -898,6 +909,14 @@ ldbm_back_modrdn( Slapi_PBlock *pb ) modify_switch_entries( &newparent_modify_context,be); } + /* call the transaction post modrdn plugins just before the commit */ + if ((retval = plugin_call_plugins(pb, SLAPI_PLUGIN_BE_TXN_POST_MODRDN_FN))) { + LDAPDebug1Arg( LDAP_DEBUG_ANY, "SLAPI_PLUGIN_BE_TXN_POST_MODRDN_FN plugin " + "returned error code %d\n", retval ); + slapi_pblock_get(pb, SLAPI_RESULT_CODE, &ldap_result_code); + goto error_return; + } + retval = dblayer_txn_commit(li,&txn); if (0 != retval) { diff --git a/ldap/servers/slapd/pblock.c b/ldap/servers/slapd/pblock.c index 215b7dffd..d068132ef 100644 --- a/ldap/servers/slapd/pblock.c +++ b/ldap/servers/slapd/pblock.c @@ -1062,6 +1062,58 @@ slapi_pblock_get( Slapi_PBlock *pblock, int arg, void *value ) (*(IFP *)value) = pblock->pb_plugin->plg_internal_post_delete; break; + /* backend pre txn operation plugin */ + case SLAPI_PLUGIN_BE_TXN_PRE_MODIFY_FN: + if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_BETXNPREOPERATION) { + return( -1 ); + } + (*(IFP *)value) = pblock->pb_plugin->plg_betxnpremodify; + break; + case SLAPI_PLUGIN_BE_TXN_PRE_MODRDN_FN: + if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_BETXNPREOPERATION) { + return( -1 ); + } + (*(IFP *)value) = pblock->pb_plugin->plg_betxnpremodrdn; + break; + case SLAPI_PLUGIN_BE_TXN_PRE_ADD_FN: + if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_BETXNPREOPERATION) { + return( -1 ); + } + (*(IFP *)value) = pblock->pb_plugin->plg_betxnpreadd; + break; + case SLAPI_PLUGIN_BE_TXN_PRE_DELETE_FN: + if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_BETXNPREOPERATION) { + return( -1 ); + } + (*(IFP *)value) = pblock->pb_plugin->plg_betxnpredelete; + break; + + /* backend post txn operation plugin */ + case SLAPI_PLUGIN_BE_TXN_POST_MODIFY_FN: + if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_BETXNPOSTOPERATION) { + return( -1 ); + } + (*(IFP *)value) = pblock->pb_plugin->plg_betxnpostmodify; + break; + case SLAPI_PLUGIN_BE_TXN_POST_MODRDN_FN: + if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_BETXNPOSTOPERATION) { + return( -1 ); + } + (*(IFP *)value) = pblock->pb_plugin->plg_betxnpostmodrdn; + break; + case SLAPI_PLUGIN_BE_TXN_POST_ADD_FN: + if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_BETXNPOSTOPERATION) { + return( -1 ); + } + (*(IFP *)value) = pblock->pb_plugin->plg_betxnpostadd; + break; + case SLAPI_PLUGIN_BE_TXN_POST_DELETE_FN: + if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_BETXNPOSTOPERATION) { + return( -1 ); + } + (*(IFP *)value) = pblock->pb_plugin->plg_betxnpostdelete; + break; + /* target address & controls for all operations should be normalized */ case SLAPI_TARGET_ADDRESS: if(pblock->pb_op!=NULL) @@ -2497,6 +2549,58 @@ slapi_pblock_set( Slapi_PBlock *pblock, int arg, void *value ) pblock->pb_plugin->plg_internal_post_delete = (IFP) value; break; + /* backend preoperation plugin - called just after creating transaction */ + case SLAPI_PLUGIN_BE_TXN_PRE_MODIFY_FN: + if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_BETXNPREOPERATION) { + return( -1 ); + } + pblock->pb_plugin->plg_betxnpremodify = (IFP) value; + break; + case SLAPI_PLUGIN_BE_TXN_PRE_MODRDN_FN: + if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_BETXNPREOPERATION) { + return( -1 ); + } + pblock->pb_plugin->plg_betxnpremodrdn = (IFP) value; + break; + case SLAPI_PLUGIN_BE_TXN_PRE_ADD_FN: + if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_BETXNPREOPERATION) { + return( -1 ); + } + pblock->pb_plugin->plg_betxnpreadd = (IFP) value; + break; + case SLAPI_PLUGIN_BE_TXN_PRE_DELETE_FN: + if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_BETXNPREOPERATION) { + return( -1 ); + } + pblock->pb_plugin->plg_betxnpredelete = (IFP) value; + break; + + /* backend postoperation plugin - called just before committing transaction */ + case SLAPI_PLUGIN_BE_TXN_POST_MODIFY_FN: + if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_BETXNPOSTOPERATION) { + return( -1 ); + } + pblock->pb_plugin->plg_betxnpostmodify = (IFP) value; + break; + case SLAPI_PLUGIN_BE_TXN_POST_MODRDN_FN: + if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_BETXNPOSTOPERATION) { + return( -1 ); + } + pblock->pb_plugin->plg_betxnpostmodrdn = (IFP) value; + break; + case SLAPI_PLUGIN_BE_TXN_POST_ADD_FN: + if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_BETXNPOSTOPERATION) { + return( -1 ); + } + pblock->pb_plugin->plg_betxnpostadd = (IFP) value; + break; + case SLAPI_PLUGIN_BE_TXN_POST_DELETE_FN: + if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_BETXNPOSTOPERATION) { + return( -1 ); + } + pblock->pb_plugin->plg_betxnpostdelete = (IFP) value; + break; + /* syntax plugin functions */ case SLAPI_PLUGIN_SYNTAX_FILTER_AVA: if ( pblock->pb_plugin->plg_type != SLAPI_PLUGIN_SYNTAX ) { diff --git a/ldap/servers/slapd/plugin.c b/ldap/servers/slapd/plugin.c index a8a52cd2e..c145eff2e 100644 --- a/ldap/servers/slapd/plugin.c +++ b/ldap/servers/slapd/plugin.c @@ -373,6 +373,20 @@ plugin_call_plugins( Slapi_PBlock *pb, int whichfunction ) case SLAPI_PLUGIN_INTERNAL_POST_DELETE_FN: plugin_list_number= PLUGIN_LIST_INTERNAL_POSTOPERATION; break; + case SLAPI_PLUGIN_BE_TXN_PRE_MODIFY_FN: + case SLAPI_PLUGIN_BE_TXN_PRE_MODRDN_FN: + case SLAPI_PLUGIN_BE_TXN_PRE_ADD_FN: + case SLAPI_PLUGIN_BE_TXN_PRE_DELETE_FN: + plugin_list_number= PLUGIN_LIST_BETXNPREOPERATION; + do_op = 1; /* always allow backend callbacks (even during startup) */ + break; + case SLAPI_PLUGIN_BE_TXN_POST_MODIFY_FN: + case SLAPI_PLUGIN_BE_TXN_POST_MODRDN_FN: + case SLAPI_PLUGIN_BE_TXN_POST_ADD_FN: + case SLAPI_PLUGIN_BE_TXN_POST_DELETE_FN: + plugin_list_number= PLUGIN_LIST_BETXNPOSTOPERATION; + do_op = 1; /* always allow backend callbacks (even during startup) */ + break; } if(plugin_list_number!=-1 && do_op) { @@ -1440,6 +1454,7 @@ plugin_call_func (struct slapdplugin *list, int operation, Slapi_PBlock *pb, int { if (SLAPI_PLUGIN_PREOPERATION == list->plg_type || SLAPI_PLUGIN_INTERNAL_PREOPERATION == list->plg_type || + SLAPI_PLUGIN_BETXNPREOPERATION == list->plg_type || SLAPI_PLUGIN_START_FN == operation ) { /* @@ -1653,6 +1668,12 @@ plugin_get_type_and_list( } else if ( strcasecmp( plugintype, "bepostoperation" ) == 0 ) { *type = SLAPI_PLUGIN_BEPOSTOPERATION; plugin_list_index= PLUGIN_LIST_BEPOSTOPERATION; + } else if ( strcasecmp( plugintype, "betxnpreoperation" ) == 0 ) { + *type = SLAPI_PLUGIN_BETXNPREOPERATION; + plugin_list_index= PLUGIN_LIST_BETXNPREOPERATION; + } else if ( strcasecmp( plugintype, "betxnpostoperation" ) == 0 ) { + *type = SLAPI_PLUGIN_BETXNPOSTOPERATION; + plugin_list_index= PLUGIN_LIST_BETXNPOSTOPERATION; } else if ( strcasecmp( plugintype, "internalpreoperation" ) == 0 ) { *type = SLAPI_PLUGIN_INTERNAL_PREOPERATION; plugin_list_index= PLUGIN_LIST_INTERNAL_PREOPERATION; diff --git a/ldap/servers/slapd/slap.h b/ldap/servers/slapd/slap.h index ce95b1bd1..4dce91533 100644 --- a/ldap/servers/slapd/slap.h +++ b/ldap/servers/slapd/slap.h @@ -702,7 +702,9 @@ struct matchingRuleList { #define PLUGIN_LIST_REVER_PWD_STORAGE_SCHEME 16 #define PLUGIN_LIST_LDBM_ENTRY_FETCH_STORE 17 #define PLUGIN_LIST_INDEX 18 -#define PLUGIN_LIST_GLOBAL_MAX 19 +#define PLUGIN_LIST_BETXNPREOPERATION 19 +#define PLUGIN_LIST_BETXNPOSTOPERATION 20 +#define PLUGIN_LIST_GLOBAL_MAX 21 /* plugin configuration attributes */ #define ATTR_PLUGIN_PATH "nsslapd-pluginPath" @@ -1108,6 +1110,31 @@ struct slapdplugin { } plg_un_entry_fetch_store; #define plg_entryfetchfunc plg_un.plg_un_entry_fetch_store.plg_un_entry_fetch_func #define plg_entrystorefunc plg_un.plg_un_entry_fetch_store.plg_un_entry_store_func + + /* backend txn pre-operation plugin structure */ + struct plg_un_betxnpre_operation { + IFP plg_un_betxnpre_modify; /* modify */ + IFP plg_un_betxnpre_modrdn; /* modrdn */ + IFP plg_un_betxnpre_add; /* add */ + IFP plg_un_betxnpre_delete; /* delete */ + } plg_un_betxnpre; +#define plg_betxnpremodify plg_un.plg_un_betxnpre.plg_un_betxnpre_modify +#define plg_betxnpremodrdn plg_un.plg_un_betxnpre.plg_un_betxnpre_modrdn +#define plg_betxnpreadd plg_un.plg_un_betxnpre.plg_un_betxnpre_add +#define plg_betxnpredelete plg_un.plg_un_betxnpre.plg_un_betxnpre_delete + + /* backend txn post-operation plugin structure */ + struct plg_un_betxnpost_operation { + IFP plg_un_betxnpost_modify; /* modify */ + IFP plg_un_betxnpost_modrdn; /* modrdn */ + IFP plg_un_betxnpost_add; /* add */ + IFP plg_un_betxnpost_delete; /* delete */ + } plg_un_betxnpost; +#define plg_betxnpostmodify plg_un.plg_un_betxnpost.plg_un_betxnpost_modify +#define plg_betxnpostmodrdn plg_un.plg_un_betxnpost.plg_un_betxnpost_modrdn +#define plg_betxnpostadd plg_un.plg_un_betxnpost.plg_un_betxnpost_add +#define plg_betxnpostdelete plg_un.plg_un_betxnpost.plg_un_betxnpost_delete + } plg_un; }; diff --git a/ldap/servers/slapd/slapi-plugin.h b/ldap/servers/slapd/slapi-plugin.h index 1c7cc7da1..4764e502b 100644 --- a/ldap/servers/slapd/slapi-plugin.h +++ b/ldap/servers/slapd/slapi-plugin.h @@ -5778,6 +5778,8 @@ time_t slapi_current_time( void ); #define SLAPI_PLUGIN_REVER_PWD_STORAGE_SCHEME 16 #define SLAPI_PLUGIN_LDBM_ENTRY_FETCH_STORE 17 #define SLAPI_PLUGIN_INDEX 18 +#define SLAPI_PLUGIN_BETXNPREOPERATION 19 +#define SLAPI_PLUGIN_BETXNPOSTOPERATION 20 /* * special return values for extended operation plugins (zero or positive @@ -5926,6 +5928,12 @@ typedef struct slapi_plugindesc { #define SLAPI_PLUGIN_BE_PRE_CLOSE_FN 454 #define SLAPI_PLUGIN_BE_PRE_BACKUP_FN 455 +/* preoperation plugin to the backend - just after transaction creation */ +#define SLAPI_PLUGIN_BE_TXN_PRE_ADD_FN 460 +#define SLAPI_PLUGIN_BE_TXN_PRE_MODIFY_FN 461 +#define SLAPI_PLUGIN_BE_TXN_PRE_MODRDN_FN 462 +#define SLAPI_PLUGIN_BE_TXN_PRE_DELETE_FN 463 + /* postoperation plugin functions */ #define SLAPI_PLUGIN_POST_BIND_FN 501 #define SLAPI_PLUGIN_POST_UNBIND_FN 502 @@ -5955,6 +5963,12 @@ typedef struct slapi_plugindesc { #define SLAPI_PLUGIN_BE_POST_OPEN_FN 554 #define SLAPI_PLUGIN_BE_POST_BACKUP_FN 555 +/* postoperation plugin to the backend - just before transaction commit */ +#define SLAPI_PLUGIN_BE_TXN_POST_ADD_FN 560 +#define SLAPI_PLUGIN_BE_TXN_POST_MODIFY_FN 561 +#define SLAPI_PLUGIN_BE_TXN_POST_MODRDN_FN 562 +#define SLAPI_PLUGIN_BE_TXN_POST_DELETE_FN 563 + /* matching rule plugin functions */ #define SLAPI_PLUGIN_MR_FILTER_CREATE_FN 600 #define SLAPI_PLUGIN_MR_INDEXER_CREATE_FN 601
0
03df576c87dc2b0bc28939f4b1166e2e079cab24
389ds/389-ds-base
Ticket 48007 - CI test to test changelog trimming interval Description: CI test to verify that changes to the trimming interval takes effect immediately https://pagure.io/389-ds-base/issue/48007 Reviewed by: firstyear(Thanks!)
commit 03df576c87dc2b0bc28939f4b1166e2e079cab24 Author: Mark Reynolds <[email protected]> Date: Tue Oct 31 10:59:07 2017 -0400 Ticket 48007 - CI test to test changelog trimming interval Description: CI test to verify that changes to the trimming interval takes effect immediately https://pagure.io/389-ds-base/issue/48007 Reviewed by: firstyear(Thanks!) diff --git a/dirsrvtests/tests/suites/replication/changelog_trimming_test.py b/dirsrvtests/tests/suites/replication/changelog_trimming_test.py new file mode 100644 index 000000000..0c39ea99c --- /dev/null +++ b/dirsrvtests/tests/suites/replication/changelog_trimming_test.py @@ -0,0 +1,173 @@ +import logging +import pytest +import os +import ldap +import time +from lib389._constants import * +from lib389.properties import * +from lib389.topologies import create_topology + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + + +def do_mods(master, num): + """Perform a num of mods on the default suffix + """ + for i in xrange(num): + try: + master.modify_s(DEFAULT_SUFFIX, [(ldap.MOD_REPLACE, + "description", + "new")]) + except ldap.LDAPError as e: + log.fatal("Failed to make modify: " + str(e)) + assert False + + [email protected](scope="module") +def setup_max_entries(topo, request): + """Configure logging and changelog max entries + """ + master = topo.ms["master1"] + + master.config.loglevel((LOG_REPLICA,), 'error') + try: + master.modify_s(DN_CHANGELOG, [(ldap.MOD_REPLACE, CL_MAX_ENTRIES, "2"), + (ldap.MOD_REPLACE, CL_TRIM_INTERVAL, "300")]) + except ldap.LDAPError as e: + log.fatal("Failed to set change log config: " + str(e)) + assert False + + [email protected](scope="module") +def setup_max_age(topo, request): + """Configure logging and changelog max age + """ + master = topo.ms["master1"] + master.config.loglevel((LOG_REPLICA,), 'error') + try: + master.modify_s(DN_CHANGELOG, [(ldap.MOD_REPLACE, CL_MAXAGE, "5"), + (ldap.MOD_REPLACE, CL_TRIM_INTERVAL, "300")]) + except ldap.LDAPError as e: + log.fatal("Failed to set change log config: " + str(e)) + assert False + + [email protected](scope="module") +def topo(request): + """Create a topology with 1 masters""" + + topology = create_topology({ + ReplicaRole.MASTER: 1, + }) + # You can write replica test here. Just uncomment the block and choose instances + # replicas = Replicas(topology.ms["master1"]) + # replicas.test(DEFAULT_SUFFIX, topology.cs["consumer1"]) + + def fin(): + """If we are debugging just stop the instances, otherwise remove them""" + + if DEBUGGING: + map(lambda inst: inst.stop(), topology.all_insts.values()) + else: + map(lambda inst: inst.delete(), topology.all_insts.values()) + + request.addfinalizer(fin) + + return topology + + +def test_max_age(topo, setup_max_age): + """Test changing the trimming interval works with max age + + :id: b5de04a5-4d92-49ea-a725-1d278a1c647c + :setup: single master + :steps: + 1. Perform modification to populate changelog + 2. Adjust the changelog trimming interval + 3. Check is trimming occurrs within the new interval + + :expectedresults: + 1. Modifications are successful + 2. The changelog trimming interval is correctly lowered + 3. Trimming occurs + + """ + log.info("Testing changelog triming interval with max age...") + + master = topo.ms["master1"] + + # Do mods to build if cl entries + do_mods(master, 10) + time.sleep(6) # 5 seconds + 1 for good measure + + if master.searchErrorsLog("Trimmed") is True: + log.fatal('Trimming event unexpectedly occurred') + assert False + + try: + master.modify_s(DN_CHANGELOG, [(ldap.MOD_REPLACE, CL_TRIM_INTERVAL, "5")]) + except ldap.LDAPError as e: + log.fatal("Failed to set chance log trim interval: " + str(e)) + assert False + + time.sleep(6) # Trimming should have occured + + if master.searchErrorsLog("Trimmed") is False: + log.fatal('Trimming event did not occur') + assert False + + +def test_max_entries(topo, setup_max_entries): + """Test changing the trimming interval works with max entries + + :id: b5de04a5-4d92-49ea-a725-1d278a1c647d + :setup: single master + :steps: + 1. Perform modification to populate changelog + 2. Adjust the changelog trimming interval + 3. Check is trimming occurrs within the new interval + + :expectedresults: + 1. Modifications are successful + 2. The changelog trimming interval is correctly lowered + 3. Trimming occurs + + """ + + log.info("Testing changelog triming interval with max entries...") + master = topo.ms["master1"] + + # reset errors log + master.deleteErrorLogs() + + # Do mods to build if cl entries + do_mods(master, 10) + + if master.searchErrorsLog("Trimmed") is True: + log.fatal('Trimming event unexpectedly occurred') + assert False + + try: + master.modify_s(DN_CHANGELOG, [(ldap.MOD_REPLACE, CL_TRIM_INTERVAL, "5")]) + except ldap.LDAPError as e: + log.fatal("Failed to set chance log trim interval: " + str(e)) + assert False + + time.sleep(6) # Trimming should have occured + + if master.searchErrorsLog("Trimmed") is False: + log.fatal('Trimming event did not occur') + assert False + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) +
0
9061b4204d5e96742699d718e27cb92f47dd8550
389ds/389-ds-base
Issue 5237 - audit-ci: Cannot convert undefined or null to object Description: Update audit-ci to the latest version that works with NPM >=7. Fixes: https://github.com/389ds/389-ds-base/issues/5237 Reviewed by: @mreynolds389 (Thanks!)
commit 9061b4204d5e96742699d718e27cb92f47dd8550 Author: Viktor Ashirov <[email protected]> Date: Fri Mar 25 15:23:12 2022 +0100 Issue 5237 - audit-ci: Cannot convert undefined or null to object Description: Update audit-ci to the latest version that works with NPM >=7. Fixes: https://github.com/389ds/389-ds-base/issues/5237 Reviewed by: @mreynolds389 (Thanks!) diff --git a/src/cockpit/389-console/package.json b/src/cockpit/389-console/package.json index 78d66096e..f4ab79ffc 100644 --- a/src/cockpit/389-console/package.json +++ b/src/cockpit/389-console/package.json @@ -21,7 +21,7 @@ "@babel/eslint-parser": "^7.13.14", "@babel/preset-env": "^7.5.4", "@babel/preset-react": "^7.0.0", - "audit-ci": "^3.1.1", + "audit-ci": "^6.1.1", "babel-loader": "^8.0.6", "cockpit": "^0.1.1", "compression-webpack-plugin": "^9.0.0",
0
deef3c534ed6d5e83c03564a2a609df04ce0aa8d
389ds/389-ds-base
bump version to 1.3.5.12
commit deef3c534ed6d5e83c03564a2a609df04ce0aa8d Author: Noriko Hosoi <[email protected]> Date: Thu Aug 4 15:50:30 2016 -0700 bump version to 1.3.5.12 diff --git a/VERSION.sh b/VERSION.sh index fb72bed58..ebcf385af 100644 --- a/VERSION.sh +++ b/VERSION.sh @@ -10,7 +10,7 @@ vendor="389 Project" # PACKAGE_VERSION is constructed from these VERSION_MAJOR=1 VERSION_MINOR=3 -VERSION_MAINT=5.11 +VERSION_MAINT=5.12 # NOTE: VERSION_PREREL is automatically set for builds made out of a git tree VERSION_PREREL= VERSION_DATE=`date -u +%Y%m%d%H%M%S`
0
9d02126770da84e2a48e9b3c0c36536131c4d71a
389ds/389-ds-base
Bug 568196 - Install DS8.2 on Solaris fails https://bugzilla.redhat.com/show_bug.cgi?id=568196 Resolves: bug 568196 Bug Description: Install DS8.2 on Solaris fails Reviewed by: nhosoi (Thanks!) Branch: HEAD Fix Description: Add pcre_libdir to script wrappers for programs that use pcre Platforms tested: RHEL5 x86_64 Flag Day: no Doc impact: no (cherry picked from commit 3d27198f45b4b25df667d3f86dce66a44f4bc65d)
commit 9d02126770da84e2a48e9b3c0c36536131c4d71a Author: Rich Megginson <[email protected]> Date: Thu Feb 25 11:10:53 2010 -0700 Bug 568196 - Install DS8.2 on Solaris fails https://bugzilla.redhat.com/show_bug.cgi?id=568196 Resolves: bug 568196 Bug Description: Install DS8.2 on Solaris fails Reviewed by: nhosoi (Thanks!) Branch: HEAD Fix Description: Add pcre_libdir to script wrappers for programs that use pcre Platforms tested: RHEL5 x86_64 Flag Day: no Doc impact: no (cherry picked from commit 3d27198f45b4b25df667d3f86dce66a44f4bc65d) diff --git a/Makefile.am b/Makefile.am index 7b903ae5c..c3ec2f6f7 100644 --- a/Makefile.am +++ b/Makefile.am @@ -1291,6 +1291,7 @@ fixupcmd = sed \ -e 's,@sasl_libdir\@,$(libdir),g' \ -e 's,@sasl_path\@,$(libdir)/sasl2,g' \ -e 's,@netsnmp_libdir\@,$(libdir),g' \ + -e 's,@pcre_libdir\@,$(libdir),g' \ -e 's,@propertydir\@,$(propertydir),g' \ -e 's,@datadir\@,$(datadir),g' \ -e 's,@schemadir\@,$(schemadir),g' \ @@ -1344,6 +1345,7 @@ fixupcmd = sed \ -e 's,@sasl_libdir\@,$(sasl_libdir),g' \ -e 's,@sasl_path\@,@sasl_path@,g' \ -e 's,@netsnmp_libdir\@,$(netsnmp_libdir),g' \ + -e 's,@pcre_libdir\@,$(pcre_libdir),g' \ -e 's,@propertydir\@,$(propertydir),g' \ -e 's,@datadir\@,$(datadir),g' \ -e 's,@schemadir\@,$(schemadir),g' \ diff --git a/Makefile.in b/Makefile.in index 0a8b5abbd..bc4b1a615 100644 --- a/Makefile.in +++ b/Makefile.in @@ -2270,6 +2270,7 @@ rsearch_bin_LDADD = $(NSPR_LINK) $(NSS_LINK) $(LDAPSDK_LINK) $(SASL_LINK) $(LIBS @BUNDLE_FALSE@ -e 's,@sasl_libdir\@,$(sasl_libdir),g' \ @BUNDLE_FALSE@ -e 's,@sasl_path\@,@sasl_path@,g' \ @BUNDLE_FALSE@ -e 's,@netsnmp_libdir\@,$(netsnmp_libdir),g' \ +@BUNDLE_FALSE@ -e 's,@pcre_libdir\@,$(pcre_libdir),g' \ @BUNDLE_FALSE@ -e 's,@propertydir\@,$(propertydir),g' \ @BUNDLE_FALSE@ -e 's,@datadir\@,$(datadir),g' \ @BUNDLE_FALSE@ -e 's,@schemadir\@,$(schemadir),g' \ @@ -2333,6 +2334,7 @@ rsearch_bin_LDADD = $(NSPR_LINK) $(NSS_LINK) $(LDAPSDK_LINK) $(SASL_LINK) $(LIBS @BUNDLE_TRUE@ -e 's,@sasl_libdir\@,$(libdir),g' \ @BUNDLE_TRUE@ -e 's,@sasl_path\@,$(libdir)/sasl2,g' \ @BUNDLE_TRUE@ -e 's,@netsnmp_libdir\@,$(libdir),g' \ +@BUNDLE_TRUE@ -e 's,@pcre_libdir\@,$(libdir),g' \ @BUNDLE_TRUE@ -e 's,@propertydir\@,$(propertydir),g' \ @BUNDLE_TRUE@ -e 's,@datadir\@,$(datadir),g' \ @BUNDLE_TRUE@ -e 's,@schemadir\@,$(schemadir),g' \ diff --git a/ldap/admin/src/scripts/start-dirsrv.in b/ldap/admin/src/scripts/start-dirsrv.in index fb9bfdb08..46c48d712 100755 --- a/ldap/admin/src/scripts/start-dirsrv.in +++ b/ldap/admin/src/scripts/start-dirsrv.in @@ -22,9 +22,9 @@ start_instance() { fi prefix="$DS_ROOT" - LD_LIBRARY_PATH=$prefix$SERVER_DIR:$prefix@nss_libdir@:$prefix@libdir@:@nss_libdir@ + LD_LIBRARY_PATH=$prefix$SERVER_DIR:$prefix@nss_libdir@:$prefix@libdir@:@nss_libdir@:@pcre_libdir@ export LD_LIBRARY_PATH - SHLIB_PATH=$prefix$SERVER_DIR:$prefix@nss_libdir@:$prefix@libdir@:@nss_libdir@ + SHLIB_PATH=$prefix$SERVER_DIR:$prefix@nss_libdir@:$prefix@libdir@:@nss_libdir@:@pcre_libdir@ export SHLIB_PATH DS_CONFIG_DIR=$CONFIG_DIR diff --git a/wrappers/migratecred.in b/wrappers/migratecred.in index c1aeabf27..dd44800fb 100755 --- a/wrappers/migratecred.in +++ b/wrappers/migratecred.in @@ -4,7 +4,7 @@ ## (1) Specify variables used by this script. ## ############################################################################### -LIB_DIR=@serverdir@:@nss_libdir@:@nspr_libdir@:@ldapsdk_libdir@:@sasl_libdir@ +LIB_DIR=@serverdir@:@nss_libdir@:@nspr_libdir@:@ldapsdk_libdir@:@sasl_libdir@:@pcre_libdir@ BIN_DIR=@bindir@ COMMAND=migratecred-bin diff --git a/wrappers/mmldif.in b/wrappers/mmldif.in index 3a89ce4ca..a11932e34 100755 --- a/wrappers/mmldif.in +++ b/wrappers/mmldif.in @@ -4,7 +4,7 @@ ## (1) Specify variables used by this script. ## ############################################################################### -LIB_DIR=@serverdir@:@nss_libdir@:@nspr_libdir@:@ldapsdk_libdir@:@sasl_libdir@ +LIB_DIR=@serverdir@:@nss_libdir@:@nspr_libdir@:@ldapsdk_libdir@:@sasl_libdir@:@pcre_libdir@ BIN_DIR=@bindir@ COMMAND=mmldif-bin diff --git a/wrappers/pwdhash.in b/wrappers/pwdhash.in index ab84914b9..b3ef3fa9e 100755 --- a/wrappers/pwdhash.in +++ b/wrappers/pwdhash.in @@ -4,7 +4,7 @@ ## (1) Specify variables used by this script. ## ############################################################################### -LIB_DIR=@serverdir@:@nss_libdir@:@nspr_libdir@:@ldapsdk_libdir@:@sasl_libdir@ +LIB_DIR=@serverdir@:@nss_libdir@:@nspr_libdir@:@ldapsdk_libdir@:@sasl_libdir@:@pcre_libdir@ BIN_DIR=@bindir@ COMMAND=pwdhash-bin
0
93a29584ddae52497b898b451c2c810244627acb
389ds/389-ds-base
Ticket 49305 - Need to wrap atomic calls Bug Description: Some RHEL 7.5 platforms (ppc 32bit) still do not support all the gcc built-in atomics. This breaks the downstream builds. Fix Description: Use wrapper functions for the atomic's using #define's to detect if builtin atomics are supported, otherwise use the generic nspr atomic functions. https://pagure.io/389-ds-base/issue/49305 Reviewed by: tbordaz, lkrispen, and wibrown(Thanks!!!)
commit 93a29584ddae52497b898b451c2c810244627acb Author: Mark Reynolds <[email protected]> Date: Thu Sep 28 10:38:20 2017 -0400 Ticket 49305 - Need to wrap atomic calls Bug Description: Some RHEL 7.5 platforms (ppc 32bit) still do not support all the gcc built-in atomics. This breaks the downstream builds. Fix Description: Use wrapper functions for the atomic's using #define's to detect if builtin atomics are supported, otherwise use the generic nspr atomic functions. https://pagure.io/389-ds-base/issue/49305 Reviewed by: tbordaz, lkrispen, and wibrown(Thanks!!!) diff --git a/ldap/servers/slapd/attrsyntax.c b/ldap/servers/slapd/attrsyntax.c index a0a60c43c..1a9efef39 100644 --- a/ldap/servers/slapd/attrsyntax.c +++ b/ldap/servers/slapd/attrsyntax.c @@ -274,7 +274,7 @@ attr_syntax_get_by_oid_locking_optional(const char *oid, PRBool use_lock, PRUint } asi = (struct asyntaxinfo *)PL_HashTableLookup_const(ht, oid); if (asi) { - slapi_atomic_incr(&(asi->asi_refcnt), __ATOMIC_RELEASE, ATOMIC_LONG); + slapi_atomic_incr_64(&(asi->asi_refcnt), __ATOMIC_RELEASE); } if (use_lock) { AS_UNLOCK_READ(oid2asi_lock); @@ -371,7 +371,7 @@ attr_syntax_get_by_name_locking_optional(const char *name, PRBool use_lock, PRUi } asi = (struct asyntaxinfo *)PL_HashTableLookup_const(ht, name); if (NULL != asi) { - slapi_atomic_incr(&(asi->asi_refcnt), __ATOMIC_RELEASE, ATOMIC_LONG); + slapi_atomic_incr_64(&(asi->asi_refcnt), __ATOMIC_RELEASE); } if (use_lock) { AS_UNLOCK_READ(name2asi_lock); @@ -406,7 +406,7 @@ attr_syntax_return_locking_optional(struct asyntaxinfo *asi, PRBool use_lock) } if (NULL != asi) { PRBool delete_it = PR_FALSE; - if (0 == slapi_atomic_decr(&(asi->asi_refcnt), __ATOMIC_ACQ_REL, ATOMIC_LONG)) { + if (0 == slapi_atomic_decr_64(&(asi->asi_refcnt), __ATOMIC_ACQ_REL)) { delete_it = asi->asi_marked_for_delete; } @@ -540,7 +540,7 @@ attr_syntax_delete_no_lock(struct asyntaxinfo *asi, PL_HashTableRemove(ht, asi->asi_aliases[i]); } } - if (slapi_atomic_load(&(asi->asi_refcnt), __ATOMIC_ACQUIRE, ATOMIC_LONG) > 0) { + if (slapi_atomic_load_64(&(asi->asi_refcnt), __ATOMIC_ACQUIRE) > 0) { asi->asi_marked_for_delete = PR_TRUE; } else { /* This is ok, but the correct thing is to call delete first, diff --git a/ldap/servers/slapd/back-ldbm/dblayer.c b/ldap/servers/slapd/back-ldbm/dblayer.c index c4c4959c7..9e557a24a 100644 --- a/ldap/servers/slapd/back-ldbm/dblayer.c +++ b/ldap/servers/slapd/back-ldbm/dblayer.c @@ -2880,7 +2880,7 @@ dblayer_get_index_file(backend *be, struct attrinfo *a, DB **ppDB, int open_flag /* it's like a semaphore -- when count > 0, any file handle that's in * the attrinfo will remain valid from here on. */ - slapi_atomic_incr(&(a->ai_dblayer_count), __ATOMIC_RELEASE, ATOMIC_LONG); + slapi_atomic_incr_64(&(a->ai_dblayer_count), __ATOMIC_RELEASE); if (a->ai_dblayer && ((dblayer_handle *)(a->ai_dblayer))->dblayer_dbp) { /* This means that the pointer is valid, so we should return it. */ @@ -2938,7 +2938,7 @@ dblayer_get_index_file(backend *be, struct attrinfo *a, DB **ppDB, int open_flag /* some sort of error -- we didn't open a handle at all. * decrement the refcount back to where it was. */ - slapi_atomic_decr(&(a->ai_dblayer_count), __ATOMIC_RELEASE, ATOMIC_LONG); + slapi_atomic_decr_64(&(a->ai_dblayer_count), __ATOMIC_RELEASE); } return return_value; @@ -2950,7 +2950,7 @@ dblayer_get_index_file(backend *be, struct attrinfo *a, DB **ppDB, int open_flag int dblayer_release_index_file(backend *be __attribute__((unused)), struct attrinfo *a, DB *pDB __attribute__((unused))) { - slapi_atomic_decr(&(a->ai_dblayer_count), __ATOMIC_RELEASE, ATOMIC_LONG); + slapi_atomic_decr_64(&(a->ai_dblayer_count), __ATOMIC_RELEASE); return 0; } @@ -3057,7 +3057,7 @@ dblayer_erase_index_file_ex(backend *be, struct attrinfo *a, PRBool use_lock, in dblayer_release_index_file(be, a, db); - while (slapi_atomic_load(&(a->ai_dblayer_count), __ATOMIC_ACQUIRE, ATOMIC_LONG) > 0) { + while (slapi_atomic_load_64(&(a->ai_dblayer_count), __ATOMIC_ACQUIRE) > 0) { /* someone is using this index file */ /* ASSUMPTION: you have already set the INDEX_OFFLINE flag, because * you intend to mess with this index. therefore no new requests diff --git a/ldap/servers/slapd/entry.c b/ldap/servers/slapd/entry.c index 289a149fa..fbbc8faa0 100644 --- a/ldap/servers/slapd/entry.c +++ b/ldap/servers/slapd/entry.c @@ -2249,14 +2249,14 @@ static int32_t g_virtual_watermark = 0; /* good enough to init */ int slapi_entry_vattrcache_watermark_isvalid(const Slapi_Entry *e) { - return e->e_virtual_watermark == slapi_atomic_load(&g_virtual_watermark, __ATOMIC_ACQUIRE, ATOMIC_INT); + return e->e_virtual_watermark == slapi_atomic_load_32(&g_virtual_watermark, __ATOMIC_ACQUIRE); } void slapi_entry_vattrcache_watermark_set(Slapi_Entry *e) { - e->e_virtual_watermark = slapi_atomic_load(&g_virtual_watermark, __ATOMIC_ACQUIRE, ATOMIC_INT); + e->e_virtual_watermark = slapi_atomic_load_32(&g_virtual_watermark, __ATOMIC_ACQUIRE); } void @@ -2269,8 +2269,8 @@ void slapi_entrycache_vattrcache_watermark_invalidate() { /* Make sure the value is never 0 */ - if (slapi_atomic_incr(&g_virtual_watermark, __ATOMIC_RELEASE, ATOMIC_INT) == 0) { - slapi_atomic_incr(&g_virtual_watermark, __ATOMIC_RELEASE, ATOMIC_INT); + if (slapi_atomic_incr_32(&g_virtual_watermark, __ATOMIC_RELEASE) == 0) { + slapi_atomic_incr_32(&g_virtual_watermark, __ATOMIC_RELEASE); } } diff --git a/ldap/servers/slapd/libglobs.c b/ldap/servers/slapd/libglobs.c index 4c54cf703..1ba30002f 100644 --- a/ldap/servers/slapd/libglobs.c +++ b/ldap/servers/slapd/libglobs.c @@ -1335,19 +1335,19 @@ static uint64_t active_threads = 0; void g_incr_active_threadcnt(void) { - slapi_atomic_incr(&active_threads, __ATOMIC_RELEASE, ATOMIC_LONG); + slapi_atomic_incr_64(&active_threads, __ATOMIC_RELEASE); } void g_decr_active_threadcnt(void) { - slapi_atomic_decr(&active_threads, __ATOMIC_RELEASE, ATOMIC_LONG); + slapi_atomic_decr_64(&active_threads, __ATOMIC_RELEASE); } uint64_t g_get_active_threadcnt(void) { - return slapi_atomic_load(&active_threads, __ATOMIC_RELEASE, ATOMIC_LONG); + return slapi_atomic_load_64(&active_threads, __ATOMIC_RELEASE); } /* @@ -1936,7 +1936,7 @@ config_set_ndn_cache_max_size(const char *attrname, char *value, char *errorbuf, size = NDN_DEFAULT_SIZE; } if (apply) { - slapi_atomic_store(&(slapdFrontendConfig->ndn_cache_max_size), &size, __ATOMIC_RELEASE, ATOMIC_LONG); + slapi_atomic_store_64(&(slapdFrontendConfig->ndn_cache_max_size), size, __ATOMIC_RELEASE); } return retVal; @@ -3476,7 +3476,7 @@ int32_t config_get_dynamic_plugins(void) { slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); - return slapi_atomic_load(&(slapdFrontendConfig->dynamic_plugins), __ATOMIC_ACQUIRE, ATOMIC_INT); + return slapi_atomic_load_32(&(slapdFrontendConfig->dynamic_plugins), __ATOMIC_ACQUIRE); } @@ -3499,7 +3499,7 @@ int32_t config_get_cn_uses_dn_syntax_in_dns() { slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); - return slapi_atomic_load(&(slapdFrontendConfig->cn_uses_dn_syntax_in_dns), __ATOMIC_ACQUIRE, ATOMIC_INT); + return slapi_atomic_load_32(&(slapdFrontendConfig->cn_uses_dn_syntax_in_dns), __ATOMIC_ACQUIRE); } int32_t @@ -3544,7 +3544,7 @@ config_set_onoff(const char *attrname, char *value, int32_t *configvalue, char * newval = LDAP_OFF; } - slapi_atomic_store(configvalue, &newval, __ATOMIC_RELEASE, ATOMIC_INT); + slapi_atomic_store_32(configvalue, newval, __ATOMIC_RELEASE); return retVal; } @@ -3916,7 +3916,7 @@ config_set_threadnumber(const char *attrname, char *value, char *errorbuf, int a retVal = LDAP_OPERATIONS_ERROR; } if (apply) { - slapi_atomic_store(&(slapdFrontendConfig->threadnumber), &threadnum, __ATOMIC_RELAXED, ATOMIC_INT); + slapi_atomic_store_32(&(slapdFrontendConfig->threadnumber), threadnum, __ATOMIC_RELAXED); } return retVal; } @@ -3925,7 +3925,7 @@ int config_set_maxthreadsperconn(const char *attrname, char *value, char *errorbuf, int apply) { int retVal = LDAP_SUCCESS; - long maxthreadnum = 0; + int32_t maxthreadnum = 0; char *endp = NULL; slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); @@ -3935,7 +3935,7 @@ config_set_maxthreadsperconn(const char *attrname, char *value, char *errorbuf, } errno = 0; - maxthreadnum = strtol(value, &endp, 10); + maxthreadnum = (int32_t)strtol(value, &endp, 10); if (*endp != '\0' || errno == ERANGE || maxthreadnum < 1 || maxthreadnum > 65535) { slapi_create_errormsg(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, @@ -3945,7 +3945,7 @@ config_set_maxthreadsperconn(const char *attrname, char *value, char *errorbuf, } if (apply) { - slapi_atomic_store(&(slapdFrontendConfig->maxthreadsperconn), &maxthreadnum, __ATOMIC_RELEASE, ATOMIC_INT); + slapi_atomic_store_32(&(slapdFrontendConfig->maxthreadsperconn), maxthreadnum, __ATOMIC_RELEASE); } return retVal; } @@ -4083,7 +4083,7 @@ int config_set_ioblocktimeout(const char *attrname, char *value, char *errorbuf, int apply) { int retVal = LDAP_SUCCESS; - long nValue = 0; + int32_t nValue = 0; char *endp = NULL; slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); @@ -4093,7 +4093,7 @@ config_set_ioblocktimeout(const char *attrname, char *value, char *errorbuf, int } errno = 0; - nValue = strtol(value, &endp, 10); + nValue = (int32_t)strtol(value, &endp, 10); if (*endp != '\0' || errno == ERANGE || nValue < 0) { slapi_create_errormsg(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, "%s: invalid value \"%s\", I/O block timeout must range from 0 to %lld", @@ -4103,7 +4103,7 @@ config_set_ioblocktimeout(const char *attrname, char *value, char *errorbuf, int } if (apply) { - slapi_atomic_store(&(slapdFrontendConfig->ioblocktimeout), &nValue, __ATOMIC_RELEASE, ATOMIC_INT); + slapi_atomic_store_32(&(slapdFrontendConfig->ioblocktimeout), nValue, __ATOMIC_RELEASE); } return retVal; } @@ -4607,7 +4607,7 @@ int32_t config_get_sasl_mapping_fallback() { slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); - return slapi_atomic_load(&(slapdFrontendConfig->sasl_mapping_fallback), __ATOMIC_ACQUIRE, ATOMIC_INT); + return slapi_atomic_load_32(&(slapdFrontendConfig->sasl_mapping_fallback), __ATOMIC_ACQUIRE); } @@ -4615,14 +4615,14 @@ int32_t config_get_disk_monitoring() { slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); - return slapi_atomic_load(&(slapdFrontendConfig->disk_monitoring), __ATOMIC_ACQUIRE, ATOMIC_INT); + return slapi_atomic_load_32(&(slapdFrontendConfig->disk_monitoring), __ATOMIC_ACQUIRE); } int32_t config_get_disk_logging_critical() { slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); - return slapi_atomic_load(&(slapdFrontendConfig->disk_logging_critical), __ATOMIC_ACQUIRE, ATOMIC_INT); + return slapi_atomic_load_32(&(slapdFrontendConfig->disk_logging_critical), __ATOMIC_ACQUIRE); } int @@ -4669,14 +4669,14 @@ int32_t config_get_ldapi_switch() { slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); - return slapi_atomic_load(&(slapdFrontendConfig->ldapi_switch), __ATOMIC_ACQUIRE, ATOMIC_INT); + return slapi_atomic_load_32(&(slapdFrontendConfig->ldapi_switch), __ATOMIC_ACQUIRE); } int32_t config_get_ldapi_bind_switch() { slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); - return slapi_atomic_load(&(slapdFrontendConfig->ldapi_bind_switch), __ATOMIC_ACQUIRE, ATOMIC_INT); + return slapi_atomic_load_32(&(slapdFrontendConfig->ldapi_bind_switch), __ATOMIC_ACQUIRE); } char * @@ -4695,7 +4695,7 @@ int config_get_ldapi_map_entries() { slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); - return slapi_atomic_load(&(slapdFrontendConfig->ldapi_map_entries), __ATOMIC_ACQUIRE, ATOMIC_INT); + return slapi_atomic_load_32(&(slapdFrontendConfig->ldapi_map_entries), __ATOMIC_ACQUIRE); } char * @@ -4765,7 +4765,7 @@ int32_t config_get_slapi_counters() { slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); - return slapi_atomic_load(&(slapdFrontendConfig->slapi_counters), __ATOMIC_ACQUIRE, ATOMIC_INT); + return slapi_atomic_load_32(&(slapdFrontendConfig->slapi_counters), __ATOMIC_ACQUIRE); } @@ -4948,7 +4948,7 @@ int32_t config_get_pw_change(void) { slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); - return slapi_atomic_load(&(slapdFrontendConfig->pw_policy.pw_change), __ATOMIC_ACQUIRE, ATOMIC_INT); + return slapi_atomic_load_32(&(slapdFrontendConfig->pw_policy.pw_change), __ATOMIC_ACQUIRE); } @@ -4956,7 +4956,7 @@ int32_t config_get_pw_history(void) { slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); - return slapi_atomic_load(&(slapdFrontendConfig->pw_policy.pw_history), __ATOMIC_ACQUIRE, ATOMIC_INT); + return slapi_atomic_load_32(&(slapdFrontendConfig->pw_policy.pw_history), __ATOMIC_ACQUIRE); } @@ -4964,21 +4964,21 @@ int32_t config_get_pw_must_change(void) { slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); - return slapi_atomic_load(&(slapdFrontendConfig->pw_policy.pw_must_change), __ATOMIC_ACQUIRE, ATOMIC_INT); + return slapi_atomic_load_32(&(slapdFrontendConfig->pw_policy.pw_must_change), __ATOMIC_ACQUIRE); } int32_t config_get_allow_hashed_pw(void) { slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); - return slapi_atomic_load(&(slapdFrontendConfig->allow_hashed_pw), __ATOMIC_ACQUIRE, ATOMIC_INT); + return slapi_atomic_load_32(&(slapdFrontendConfig->allow_hashed_pw), __ATOMIC_ACQUIRE); } int32_t config_get_pw_syntax(void) { slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); - return slapi_atomic_load(&(slapdFrontendConfig->pw_policy.pw_syntax), __ATOMIC_ACQUIRE, ATOMIC_INT); + return slapi_atomic_load_32(&(slapdFrontendConfig->pw_policy.pw_syntax), __ATOMIC_ACQUIRE); } @@ -5167,21 +5167,21 @@ int32_t config_get_pw_is_global_policy(void) { slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); - return slapi_atomic_load(&(slapdFrontendConfig->pw_is_global_policy), __ATOMIC_ACQUIRE, ATOMIC_INT); + return slapi_atomic_load_32(&(slapdFrontendConfig->pw_is_global_policy), __ATOMIC_ACQUIRE); } int32_t config_get_pw_is_legacy_policy(void) { slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); - return slapi_atomic_load(&(slapdFrontendConfig->pw_policy.pw_is_legacy), __ATOMIC_ACQUIRE, ATOMIC_INT); + return slapi_atomic_load_32(&(slapdFrontendConfig->pw_policy.pw_is_legacy), __ATOMIC_ACQUIRE); } int32_t config_get_pw_exp(void) { slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); - return slapi_atomic_load(&(slapdFrontendConfig->pw_policy.pw_exp), __ATOMIC_ACQUIRE, ATOMIC_INT); + return slapi_atomic_load_32(&(slapdFrontendConfig->pw_policy.pw_exp), __ATOMIC_ACQUIRE); } @@ -5189,14 +5189,14 @@ int32_t config_get_pw_unlock(void) { slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); - return slapi_atomic_load(&(slapdFrontendConfig->pw_policy.pw_unlock), __ATOMIC_ACQUIRE, ATOMIC_INT); + return slapi_atomic_load_32(&(slapdFrontendConfig->pw_policy.pw_unlock), __ATOMIC_ACQUIRE); } int32_t config_get_pw_lockout() { slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); - return slapi_atomic_load(&(slapdFrontendConfig->pw_policy.pw_lockout), __ATOMIC_ACQUIRE, ATOMIC_INT); + return slapi_atomic_load_32(&(slapdFrontendConfig->pw_policy.pw_lockout), __ATOMIC_ACQUIRE); } int @@ -5216,112 +5216,112 @@ int32_t config_get_lastmod() { slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); - return slapi_atomic_load(&(slapdFrontendConfig->lastmod), __ATOMIC_ACQUIRE, ATOMIC_INT); + return slapi_atomic_load_32(&(slapdFrontendConfig->lastmod), __ATOMIC_ACQUIRE); } int32_t config_get_enquote_sup_oc() { slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); - return slapi_atomic_load(&(slapdFrontendConfig->enquote_sup_oc), __ATOMIC_ACQUIRE, ATOMIC_INT); + return slapi_atomic_load_32(&(slapdFrontendConfig->enquote_sup_oc), __ATOMIC_ACQUIRE); } int32_t config_get_nagle(void) { slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); - return slapi_atomic_load(&(slapdFrontendConfig->nagle), __ATOMIC_ACQUIRE, ATOMIC_INT); + return slapi_atomic_load_32(&(slapdFrontendConfig->nagle), __ATOMIC_ACQUIRE); } int32_t config_get_accesscontrol(void) { slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); - return slapi_atomic_load(&(slapdFrontendConfig->accesscontrol), __ATOMIC_ACQUIRE, ATOMIC_INT); + return slapi_atomic_load_32(&(slapdFrontendConfig->accesscontrol), __ATOMIC_ACQUIRE); } int32_t config_get_return_exact_case(void) { slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); - return slapi_atomic_load(&(slapdFrontendConfig->return_exact_case), __ATOMIC_ACQUIRE, ATOMIC_INT); + return slapi_atomic_load_32(&(slapdFrontendConfig->return_exact_case), __ATOMIC_ACQUIRE); } int32_t config_get_result_tweak(void) { slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); - return slapi_atomic_load(&(slapdFrontendConfig->result_tweak), __ATOMIC_ACQUIRE, ATOMIC_INT); + return slapi_atomic_load_32(&(slapdFrontendConfig->result_tweak), __ATOMIC_ACQUIRE); } int32_t config_get_moddn_aci(void) { slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); - return slapi_atomic_load(&(slapdFrontendConfig->moddn_aci), __ATOMIC_ACQUIRE, ATOMIC_INT); + return slapi_atomic_load_32(&(slapdFrontendConfig->moddn_aci), __ATOMIC_ACQUIRE); } int32_t config_get_security(void) { slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); - return slapi_atomic_load(&(slapdFrontendConfig->security), __ATOMIC_ACQUIRE, ATOMIC_INT); + return slapi_atomic_load_32(&(slapdFrontendConfig->security), __ATOMIC_ACQUIRE); } int32_t slapi_config_get_readonly(void) { slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); - return slapi_atomic_load(&(slapdFrontendConfig->readonly), __ATOMIC_ACQUIRE, ATOMIC_INT); + return slapi_atomic_load_32(&(slapdFrontendConfig->readonly), __ATOMIC_ACQUIRE); } int32_t config_get_schemacheck(void) { slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); - return slapi_atomic_load(&(slapdFrontendConfig->schemacheck), __ATOMIC_ACQUIRE, ATOMIC_INT); + return slapi_atomic_load_32(&(slapdFrontendConfig->schemacheck), __ATOMIC_ACQUIRE); } int32_t config_get_schemamod(void) { slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); - return slapi_atomic_load(&(slapdFrontendConfig->schemamod), __ATOMIC_ACQUIRE, ATOMIC_INT); + return slapi_atomic_load_32(&(slapdFrontendConfig->schemamod), __ATOMIC_ACQUIRE); } int32_t config_get_syntaxcheck(void) { slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); - return slapi_atomic_load(&(slapdFrontendConfig->syntaxcheck), __ATOMIC_ACQUIRE, ATOMIC_INT); + return slapi_atomic_load_32(&(slapdFrontendConfig->syntaxcheck), __ATOMIC_ACQUIRE); } int32_t config_get_syntaxlogging(void) { slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); - return slapi_atomic_load(&(slapdFrontendConfig->syntaxlogging), __ATOMIC_ACQUIRE, ATOMIC_INT); + return slapi_atomic_load_32(&(slapdFrontendConfig->syntaxlogging), __ATOMIC_ACQUIRE); } int32_t config_get_dn_validate_strict(void) { slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); - return slapi_atomic_load(&(slapdFrontendConfig->dn_validate_strict), __ATOMIC_ACQUIRE, ATOMIC_INT); + return slapi_atomic_load_32(&(slapdFrontendConfig->dn_validate_strict), __ATOMIC_ACQUIRE); } int32_t config_get_ds4_compatible_schema(void) { slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); - return slapi_atomic_load(&(slapdFrontendConfig->ds4_compatible_schema), __ATOMIC_ACQUIRE, ATOMIC_INT); + return slapi_atomic_load_32(&(slapdFrontendConfig->ds4_compatible_schema), __ATOMIC_ACQUIRE); } int32_t config_get_schema_ignore_trailing_spaces(void) { slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); - return slapi_atomic_load(&(slapdFrontendConfig->schema_ignore_trailing_spaces), __ATOMIC_ACQUIRE, ATOMIC_INT); + return slapi_atomic_load_32(&(slapdFrontendConfig->schema_ignore_trailing_spaces), __ATOMIC_ACQUIRE); } char * @@ -5405,7 +5405,7 @@ config_get_threadnumber(void) slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); int32_t retVal; - retVal = slapi_atomic_load(&(slapdFrontendConfig->threadnumber), __ATOMIC_RELAXED, ATOMIC_INT); + retVal = slapi_atomic_load_32(&(slapdFrontendConfig->threadnumber), __ATOMIC_RELAXED); if (retVal <= 0) { retVal = util_get_hardware_threads(); @@ -5423,7 +5423,7 @@ int32_t config_get_maxthreadsperconn() { slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); - return slapi_atomic_load(&(slapdFrontendConfig->maxthreadsperconn), __ATOMIC_ACQUIRE, ATOMIC_INT); + return slapi_atomic_load_32(&(slapdFrontendConfig->maxthreadsperconn), __ATOMIC_ACQUIRE); } int @@ -5455,7 +5455,7 @@ int32_t config_get_ioblocktimeout() { slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); - return slapi_atomic_load(&(slapdFrontendConfig->ioblocktimeout), __ATOMIC_ACQUIRE, ATOMIC_INT); + return slapi_atomic_load_32(&(slapdFrontendConfig->ioblocktimeout), __ATOMIC_ACQUIRE); } int @@ -5772,21 +5772,21 @@ int32_t config_get_unauth_binds_switch(void) { slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); - return slapi_atomic_load(&(slapdFrontendConfig->allow_unauth_binds), __ATOMIC_ACQUIRE, ATOMIC_INT); + return slapi_atomic_load_32(&(slapdFrontendConfig->allow_unauth_binds), __ATOMIC_ACQUIRE); } int32_t config_get_require_secure_binds(void) { slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); - return slapi_atomic_load(&(slapdFrontendConfig->require_secure_binds), __ATOMIC_ACQUIRE, ATOMIC_INT); + return slapi_atomic_load_32(&(slapdFrontendConfig->require_secure_binds), __ATOMIC_ACQUIRE); } int32_t config_get_anon_access_switch(void) { slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); - return slapi_atomic_load(&(slapdFrontendConfig->allow_anon_access), __ATOMIC_ACQUIRE, ATOMIC_INT); + return slapi_atomic_load_32(&(slapdFrontendConfig->allow_anon_access), __ATOMIC_ACQUIRE); } int @@ -6028,7 +6028,7 @@ int32_t config_get_minssf_exclude_rootdse() { slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); - return slapi_atomic_load(&(slapdFrontendConfig->minssf_exclude_rootdse), __ATOMIC_ACQUIRE, ATOMIC_INT); + return slapi_atomic_load_32(&(slapdFrontendConfig->minssf_exclude_rootdse), __ATOMIC_ACQUIRE); } @@ -6057,7 +6057,7 @@ config_set_max_filter_nest_level(const char *attrname, char *value, char *errorb return retVal; } - slapi_atomic_store(&(slapdFrontendConfig->max_filter_nest_level), &level, __ATOMIC_RELEASE, ATOMIC_INT); + slapi_atomic_store_32(&(slapdFrontendConfig->max_filter_nest_level), level, __ATOMIC_RELEASE); return retVal; } @@ -6065,28 +6065,28 @@ int32_t config_get_max_filter_nest_level() { slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); - return slapi_atomic_load(&(slapdFrontendConfig->max_filter_nest_level), __ATOMIC_ACQUIRE, ATOMIC_INT); + return slapi_atomic_load_32(&(slapdFrontendConfig->max_filter_nest_level), __ATOMIC_ACQUIRE); } uint64_t config_get_ndn_cache_size() { slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); - return slapi_atomic_load(&(slapdFrontendConfig->ndn_cache_max_size), __ATOMIC_ACQUIRE, ATOMIC_LONG); + return slapi_atomic_load_64(&(slapdFrontendConfig->ndn_cache_max_size), __ATOMIC_ACQUIRE); } int32_t config_get_ndn_cache_enabled() { slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); - return slapi_atomic_load(&(slapdFrontendConfig->ndn_cache_enabled), __ATOMIC_ACQUIRE, ATOMIC_INT); + return slapi_atomic_load_32(&(slapdFrontendConfig->ndn_cache_enabled), __ATOMIC_ACQUIRE); } int32_t config_get_return_orig_type_switch() { slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); - return slapi_atomic_load(&(slapdFrontendConfig->return_orig_type), __ATOMIC_ACQUIRE, ATOMIC_INT); + return slapi_atomic_load_32(&(slapdFrontendConfig->return_orig_type), __ATOMIC_ACQUIRE); } char * @@ -6788,7 +6788,7 @@ int32_t config_get_force_sasl_external(void) { slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); - return slapi_atomic_load(&(slapdFrontendConfig->force_sasl_external), __ATOMIC_ACQUIRE, ATOMIC_INT); + return slapi_atomic_load_32(&(slapdFrontendConfig->force_sasl_external), __ATOMIC_ACQUIRE); } int32_t @@ -6810,7 +6810,7 @@ int32_t config_get_entryusn_global(void) { slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); - return slapi_atomic_load(&(slapdFrontendConfig->entryusn_global), __ATOMIC_ACQUIRE, ATOMIC_INT); + return slapi_atomic_load_32(&(slapdFrontendConfig->entryusn_global), __ATOMIC_ACQUIRE); } int32_t @@ -7048,21 +7048,21 @@ int32_t config_get_enable_turbo_mode(void) { slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); - return slapi_atomic_load(&(slapdFrontendConfig->enable_turbo_mode), __ATOMIC_ACQUIRE, ATOMIC_INT); + return slapi_atomic_load_32(&(slapdFrontendConfig->enable_turbo_mode), __ATOMIC_ACQUIRE); } int32_t config_get_connection_nocanon(void) { slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); - return slapi_atomic_load(&(slapdFrontendConfig->connection_nocanon), __ATOMIC_ACQUIRE, ATOMIC_INT); + return slapi_atomic_load_32(&(slapdFrontendConfig->connection_nocanon), __ATOMIC_ACQUIRE); } int32_t config_get_plugin_logging(void) { slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); - return slapi_atomic_load(&(slapdFrontendConfig->plugin_logging), __ATOMIC_ACQUIRE, ATOMIC_INT); + return slapi_atomic_load_32(&(slapdFrontendConfig->plugin_logging), __ATOMIC_ACQUIRE); } int32_t @@ -7075,21 +7075,21 @@ int32_t config_get_unhashed_pw_switch() { slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); - return slapi_atomic_load(&(slapdFrontendConfig->unhashed_pw_switch), __ATOMIC_ACQUIRE, ATOMIC_INT); + return slapi_atomic_load_32(&(slapdFrontendConfig->unhashed_pw_switch), __ATOMIC_ACQUIRE); } int32_t config_get_ignore_time_skew(void) { slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); - return slapi_atomic_load(&(slapdFrontendConfig->ignore_time_skew), __ATOMIC_ACQUIRE, ATOMIC_INT); + return slapi_atomic_load_32(&(slapdFrontendConfig->ignore_time_skew), __ATOMIC_ACQUIRE); } int32_t config_get_global_backend_lock() { slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); - return slapi_atomic_load(&(slapdFrontendConfig->global_backend_lock), __ATOMIC_ACQUIRE, ATOMIC_INT); + return slapi_atomic_load_32(&(slapdFrontendConfig->global_backend_lock), __ATOMIC_ACQUIRE); } int32_t @@ -7185,7 +7185,7 @@ config_set_connection_buffer(const char *attrname, char *value, char *errorbuf, } val = atoi(value); - slapi_atomic_store(&(slapdFrontendConfig->connection_buffer), &val, __ATOMIC_RELEASE, ATOMIC_INT); + slapi_atomic_store_32(&(slapdFrontendConfig->connection_buffer), val, __ATOMIC_RELEASE); return retVal; } @@ -7209,7 +7209,7 @@ config_set_listen_backlog_size(const char *attrname, char *value, char *errorbuf } if (apply) { - slapi_atomic_store(&(slapdFrontendConfig->listen_backlog_size), &size, __ATOMIC_RELEASE, ATOMIC_INT); + slapi_atomic_store_32(&(slapdFrontendConfig->listen_backlog_size), size, __ATOMIC_RELEASE); } return LDAP_SUCCESS; } @@ -7622,7 +7622,7 @@ config_set_accesslog_enabled(int value) char errorbuf[SLAPI_DSE_RETURNTEXT_SIZE]; errorbuf[0] = '\0'; - slapi_atomic_store(&(slapdFrontendConfig->accesslog_logging_enabled), &value, __ATOMIC_RELEASE, ATOMIC_INT); + slapi_atomic_store_32(&(slapdFrontendConfig->accesslog_logging_enabled), value, __ATOMIC_RELEASE); if (value) { log_set_logging(CONFIG_ACCESSLOG_LOGGING_ENABLED_ATTRIBUTE, "on", SLAPD_ACCESS_LOG, errorbuf, CONFIG_APPLY); } else { @@ -7640,7 +7640,7 @@ config_set_auditlog_enabled(int value) char errorbuf[SLAPI_DSE_RETURNTEXT_SIZE]; errorbuf[0] = '\0'; - slapi_atomic_store(&(slapdFrontendConfig->auditlog_logging_enabled), &value, __ATOMIC_RELEASE, ATOMIC_INT); + slapi_atomic_store_32(&(slapdFrontendConfig->auditlog_logging_enabled), value, __ATOMIC_RELEASE); if (value) { log_set_logging(CONFIG_AUDITLOG_LOGGING_ENABLED_ATTRIBUTE, "on", SLAPD_AUDIT_LOG, errorbuf, CONFIG_APPLY); } else { @@ -7658,7 +7658,7 @@ config_set_auditfaillog_enabled(int value) char errorbuf[SLAPI_DSE_RETURNTEXT_SIZE]; errorbuf[0] = '\0'; - slapi_atomic_store(&(slapdFrontendConfig->auditfaillog_logging_enabled), &value, __ATOMIC_RELEASE, ATOMIC_INT); + slapi_atomic_store_32(&(slapdFrontendConfig->auditfaillog_logging_enabled), value, __ATOMIC_RELEASE); if (value) { log_set_logging(CONFIG_AUDITFAILLOG_LOGGING_ENABLED_ATTRIBUTE, "on", SLAPD_AUDITFAIL_LOG, errorbuf, CONFIG_APPLY); } else { @@ -7736,7 +7736,7 @@ config_set_malloc_mxfast(const char *attrname, char *value, char *errorbuf, int { slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); int max = 80 * (sizeof(size_t) / 4); - int mxfast; + int32_t mxfast; char *endp = NULL; if (config_value_is_null(attrname, value, errorbuf, 0)) { @@ -7749,7 +7749,7 @@ config_set_malloc_mxfast(const char *attrname, char *value, char *errorbuf, int value, CONFIG_MALLOC_MXFAST, max); return LDAP_OPERATIONS_ERROR; } - slapi_atomic_store(&(slapdFrontendConfig->malloc_mxfast), &mxfast, __ATOMIC_RELEASE, ATOMIC_INT); + slapi_atomic_store_32(&(slapdFrontendConfig->malloc_mxfast), mxfast, __ATOMIC_RELEASE); if ((mxfast >= 0) && (mxfast <= max)) { mallopt(M_MXFAST, mxfast); @@ -7775,7 +7775,7 @@ int config_set_malloc_trim_threshold(const char *attrname, char *value, char *errorbuf, int apply __attribute__((unused))) { slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); - int trim_threshold; + int32_t trim_threshold; char *endp = NULL; if (config_value_is_null(attrname, value, errorbuf, 0)) { @@ -7789,7 +7789,7 @@ config_set_malloc_trim_threshold(const char *attrname, char *value, char *errorb return LDAP_OPERATIONS_ERROR; } - slapi_atomic_store(&(slapdFrontendConfig->malloc_trim_threshold), &trim_threshold, __ATOMIC_RELEASE, ATOMIC_INT); + slapi_atomic_store_32(&(slapdFrontendConfig->malloc_trim_threshold), trim_threshold, __ATOMIC_RELEASE); if (trim_threshold >= -1) { mallopt(M_TRIM_THRESHOLD, trim_threshold); @@ -7836,7 +7836,7 @@ config_set_malloc_mmap_threshold(const char *attrname, char *value, char *errorb return LDAP_OPERATIONS_ERROR; } - slapi_atomic_store(&(slapdFrontendConfig->malloc_mmap_threshold), &mmap_threshold, __ATOMIC_RELEASE, ATOMIC_INT); + slapi_atomic_store_32(&(slapdFrontendConfig->malloc_mmap_threshold), mmap_threshold, __ATOMIC_RELEASE); if ((mmap_threshold >= 0) && (mmap_threshold <= max)) { mallopt(M_MMAP_THRESHOLD, mmap_threshold); diff --git a/ldap/servers/slapd/log.c b/ldap/servers/slapd/log.c index 4d44c87cb..e16d89cc5 100644 --- a/ldap/servers/slapd/log.c +++ b/ldap/servers/slapd/log.c @@ -4942,13 +4942,12 @@ static LogBufferInfo * log_create_buffer(size_t sz) { LogBufferInfo *lbi; - uint64_t init_val = 0; lbi = (LogBufferInfo *)slapi_ch_malloc(sizeof(LogBufferInfo)); lbi->top = (char *)slapi_ch_malloc(sz); lbi->current = lbi->top; lbi->maxsize = sz; - slapi_atomic_store(&(lbi->refcount), &init_val, __ATOMIC_RELEASE, ATOMIC_LONG); + slapi_atomic_store_64(&(lbi->refcount), 0, __ATOMIC_RELEASE); return lbi; } @@ -5010,7 +5009,7 @@ log_append_buffer2(time_t tnl, LogBufferInfo *lbi, char *msg1, size_t size1, cha insert_point = lbi->current; lbi->current += size; /* Increment the copy refcount */ - slapi_atomic_incr(&(lbi->refcount), __ATOMIC_RELEASE, ATOMIC_LONG); + slapi_atomic_incr_64(&(lbi->refcount), __ATOMIC_RELEASE); PR_Unlock(lbi->lock); /* Now we can copy without holding the lock */ @@ -5018,7 +5017,7 @@ log_append_buffer2(time_t tnl, LogBufferInfo *lbi, char *msg1, size_t size1, cha memcpy(insert_point + size1, msg2, size2); /* Decrement the copy refcount */ - slapi_atomic_decr(&(lbi->refcount), __ATOMIC_RELEASE, ATOMIC_LONG); + slapi_atomic_decr_64(&(lbi->refcount), __ATOMIC_RELEASE); /* If we are asked to sync to disk immediately, do so */ if (!slapdFrontendConfig->accesslogbuffering) { @@ -5038,7 +5037,7 @@ log_flush_buffer(LogBufferInfo *lbi, int type, int sync_now) if (type == SLAPD_ACCESS_LOG) { /* It is only safe to flush once any other threads which are copying are finished */ - while (slapi_atomic_load(&(lbi->refcount), __ATOMIC_ACQUIRE, ATOMIC_LONG) > 0) { + while (slapi_atomic_load_64(&(lbi->refcount), __ATOMIC_ACQUIRE) > 0) { /* It's ok to sleep for a while because we only flush every second or so */ DS_Sleep(PR_MillisecondsToInterval(1)); } diff --git a/ldap/servers/slapd/mapping_tree.c b/ldap/servers/slapd/mapping_tree.c index 6621cebd5..8cc531834 100644 --- a/ldap/servers/slapd/mapping_tree.c +++ b/ldap/servers/slapd/mapping_tree.c @@ -1647,7 +1647,7 @@ mapping_tree_init() /* we call this function from a single thread, so it should be ok */ - if (slapi_atomic_load(&mapping_tree_freed, __ATOMIC_RELAXED, ATOMIC_INT)) { + if (slapi_atomic_load_32(&mapping_tree_freed, __ATOMIC_RELAXED)) { /* shutdown has been detected */ return 0; } @@ -1759,8 +1759,6 @@ mtn_free_node(mapping_tree_node **node) void mapping_tree_free() { - int init_val = 1; - /* unregister dse callbacks */ slapi_config_remove_callback(SLAPI_OPERATION_MODIFY, DSE_FLAG_PREOP, MAPPING_TREE_BASE_DN, LDAP_SCOPE_BASE, "(objectclass=*)", mapping_tree_entry_modify_callback); slapi_config_remove_callback(SLAPI_OPERATION_ADD, DSE_FLAG_PREOP, MAPPING_TREE_BASE_DN, LDAP_SCOPE_BASE, "(objectclass=*)", mapping_tree_entry_add_callback); @@ -1773,7 +1771,7 @@ mapping_tree_free() slapi_unregister_backend_state_change_all(); /* recursively free tree nodes */ mtn_free_node(&mapping_tree_root); - slapi_atomic_store(&mapping_tree_freed, &init_val, __ATOMIC_RELAXED, ATOMIC_INT); + slapi_atomic_store_32(&mapping_tree_freed, 1, __ATOMIC_RELAXED); } /* This function returns the first node to parse when a search is done @@ -2024,7 +2022,7 @@ slapi_dn_write_needs_referral(Slapi_DN *target_sdn, Slapi_Entry **referral) mapping_tree_node *target_node = NULL; int ret = 0; - if (slapi_atomic_load(&mapping_tree_freed, __ATOMIC_RELAXED, ATOMIC_INT)) { + if (slapi_atomic_load_32(&mapping_tree_freed, __ATOMIC_RELAXED)) { /* shutdown detected */ goto done; } @@ -2095,7 +2093,7 @@ slapi_mapping_tree_select(Slapi_PBlock *pb, Slapi_Backend **be, Slapi_Entry **re int fixup = 0; - if (slapi_atomic_load(&mapping_tree_freed, __ATOMIC_RELAXED, ATOMIC_INT)) { + if (slapi_atomic_load_32(&mapping_tree_freed, __ATOMIC_RELAXED)) { /* shutdown detected */ return LDAP_OPERATIONS_ERROR; } @@ -2200,7 +2198,7 @@ slapi_mapping_tree_select_all(Slapi_PBlock *pb, Slapi_Backend **be_list, Slapi_E int flag_partial_result = 0; int op_type; - if (slapi_atomic_load(&mapping_tree_freed, __ATOMIC_RELAXED, ATOMIC_INT)) { + if (slapi_atomic_load_32(&mapping_tree_freed, __ATOMIC_RELAXED)) { return LDAP_OPERATIONS_ERROR; } @@ -2360,7 +2358,7 @@ slapi_mapping_tree_select_and_check(Slapi_PBlock *pb, char *newdn, Slapi_Backend int ret; int need_unlock = 0; - if (slapi_atomic_load(&mapping_tree_freed, __ATOMIC_RELAXED, ATOMIC_INT)) { + if (slapi_atomic_load_32(&mapping_tree_freed, __ATOMIC_RELAXED)) { return LDAP_OPERATIONS_ERROR; } @@ -2526,7 +2524,7 @@ mtn_get_be(mapping_tree_node *target_node, Slapi_PBlock *pb, Slapi_Backend **be, int flag_stop = 0; struct slapi_componentid *cid = NULL; - if (slapi_atomic_load(&mapping_tree_freed, __ATOMIC_RELAXED, ATOMIC_INT)) { + if (slapi_atomic_load_32(&mapping_tree_freed, __ATOMIC_RELAXED)) { /* shut down detected */ return LDAP_OPERATIONS_ERROR; } @@ -2714,7 +2712,7 @@ best_matching_child(mapping_tree_node *parent, mapping_tree_node *highest_match_node = NULL; mapping_tree_node *current; - if (slapi_atomic_load(&mapping_tree_freed, __ATOMIC_RELAXED, ATOMIC_INT)) { + if (slapi_atomic_load_32(&mapping_tree_freed, __ATOMIC_RELAXED)) { /* shutdown detected */ return NULL; } @@ -2741,7 +2739,7 @@ mtn_get_mapping_tree_node_by_entry(mapping_tree_node *node, const Slapi_DN *dn) { mapping_tree_node *found_node = NULL; - if (slapi_atomic_load(&mapping_tree_freed, __ATOMIC_RELAXED, ATOMIC_INT)) { + if (slapi_atomic_load_32(&mapping_tree_freed, __ATOMIC_RELAXED)) { /* shutdown detected */ return NULL; } @@ -2784,7 +2782,7 @@ slapi_get_mapping_tree_node_by_dn(const Slapi_DN *dn) mapping_tree_node *current_best_match = mapping_tree_root; mapping_tree_node *next_best_match = mapping_tree_root; - if (slapi_atomic_load(&mapping_tree_freed, __ATOMIC_RELAXED, ATOMIC_INT)) { + if (slapi_atomic_load_32(&mapping_tree_freed, __ATOMIC_RELAXED)) { /* shutdown detected */ return NULL; } @@ -2818,7 +2816,7 @@ get_mapping_tree_node_by_name(mapping_tree_node *node, char *be_name) int i; mapping_tree_node *found_node = NULL; - if (slapi_atomic_load(&mapping_tree_freed, __ATOMIC_RELAXED, ATOMIC_INT)) { + if (slapi_atomic_load_32(&mapping_tree_freed, __ATOMIC_RELAXED)) { /* shutdown detected */ return NULL; } @@ -2865,7 +2863,7 @@ slapi_get_mapping_tree_node_configdn(const Slapi_DN *root) { char *dn = NULL; - if (slapi_atomic_load(&mapping_tree_freed, __ATOMIC_RELAXED, ATOMIC_INT)) { + if (slapi_atomic_load_32(&mapping_tree_freed, __ATOMIC_RELAXED)) { /* shutdown detected */ return NULL; } @@ -2892,7 +2890,7 @@ slapi_get_mapping_tree_node_configsdn(const Slapi_DN *root) char *dn = NULL; Slapi_DN *sdn = NULL; - if (slapi_atomic_load(&mapping_tree_freed, __ATOMIC_RELAXED, ATOMIC_INT)) { + if (slapi_atomic_load_32(&mapping_tree_freed, __ATOMIC_RELAXED)) { /* shutdown detected */ return NULL; } diff --git a/ldap/servers/slapd/object.c b/ldap/servers/slapd/object.c index 6a1a9a56b..8e55a16f8 100644 --- a/ldap/servers/slapd/object.c +++ b/ldap/servers/slapd/object.c @@ -43,12 +43,11 @@ Object * object_new(void *user_data, FNFree destructor) { Object *o; - uint64_t init_val = 1; o = (object *)slapi_ch_malloc(sizeof(object)); o->destructor = destructor; o->data = user_data; - slapi_atomic_store(&(o->refcnt), &init_val, __ATOMIC_RELEASE, ATOMIC_LONG); + slapi_atomic_store_64(&(o->refcnt), 1, __ATOMIC_RELEASE); return o; } @@ -62,7 +61,7 @@ void object_acquire(Object *o) { PR_ASSERT(NULL != o); - slapi_atomic_incr(&(o->refcnt), __ATOMIC_RELEASE, ATOMIC_LONG); + slapi_atomic_incr_64(&(o->refcnt), __ATOMIC_RELEASE); } @@ -77,7 +76,7 @@ object_release(Object *o) PRInt32 refcnt_after_release; PR_ASSERT(NULL != o); - refcnt_after_release = slapi_atomic_decr(&(o->refcnt), __ATOMIC_ACQ_REL, ATOMIC_LONG); + refcnt_after_release = slapi_atomic_decr_64(&(o->refcnt), __ATOMIC_ACQ_REL); if (refcnt_after_release == 0) { /* Object can be destroyed */ if (o->destructor) diff --git a/ldap/servers/slapd/psearch.c b/ldap/servers/slapd/psearch.c index 70c530b69..e0dd2bf89 100644 --- a/ldap/servers/slapd/psearch.c +++ b/ldap/servers/slapd/psearch.c @@ -134,7 +134,7 @@ ps_stop_psearch_system() if (PS_IS_INITIALIZED()) { PSL_LOCK_WRITE(); for (ps = psearch_list->pl_head; NULL != ps; ps = ps->ps_next) { - slapi_atomic_incr(&(ps->ps_complete), __ATOMIC_RELEASE, ATOMIC_LONG); + slapi_atomic_incr_64(&(ps->ps_complete), __ATOMIC_RELEASE); } PSL_UNLOCK_WRITE(); ps_wakeup_all(); @@ -285,7 +285,7 @@ ps_send_results(void *arg) PR_Lock(psearch_list->pl_cvarlock); - while ((conn_acq_flag == 0) && slapi_atomic_load(&(ps->ps_complete), __ATOMIC_ACQUIRE, ATOMIC_LONG) == 0) { + while ((conn_acq_flag == 0) && slapi_atomic_load_64(&(ps->ps_complete), __ATOMIC_ACQUIRE) == 0) { /* Check for an abandoned operation */ if (pb_op == NULL || slapi_op_abandoned(ps->ps_pblock)) { slapi_log_err(SLAPI_LOG_CONNS, "ps_send_results", @@ -427,7 +427,6 @@ static PSearch * psearch_alloc(void) { PSearch *ps; - uint64_t init_val = 0; ps = (PSearch *)slapi_ch_calloc(1, sizeof(PSearch)); @@ -438,7 +437,7 @@ psearch_alloc(void) slapi_ch_free((void **)&ps); return (NULL); } - slapi_atomic_store(&(ps->ps_complete), &init_val, __ATOMIC_RELEASE, ATOMIC_LONG); + slapi_atomic_store_64(&(ps->ps_complete), 0, __ATOMIC_RELEASE); ps->ps_eq_head = ps->ps_eq_tail = (PSEQNode *)NULL; ps->ps_lasttime = (time_t)0L; ps->ps_next = NULL; diff --git a/ldap/servers/slapd/slapi-plugin.h b/ldap/servers/slapd/slapi-plugin.h index c434add67..4566202d3 100644 --- a/ldap/servers/slapd/slapi-plugin.h +++ b/ldap/servers/slapd/slapi-plugin.h @@ -8202,56 +8202,87 @@ void slapi_operation_time_initiated(Slapi_Operation *o, struct timespec *initiat */ #endif -/* See: https://gcc.gnu.org/ml/gcc/2016-11/txt6ZlA_JS27i.txt */ -#define ATOMIC_GENERIC 0 -#define ATOMIC_INT 4 -#define ATOMIC_LONG 8 -#define ATOMIC_INT128 16 /* Future */ +/** + * Store a 32bit integral value atomicly + * + * \param ptr - integral pointer + * \param val - pointer to integral value (use integral type int32_t with ATOMIC_INT, or uint64_t + * with ATOMIC_LONG & ATOMIC_GENERIC) + * \param memorder - __ATOMIC_RELAXED, __ATOMIC_CONSUME, __ATOMIC_ACQUIRE, + * __ATOMIC_RELEASE, __ATOMIC_ACQ_REL, __ATOMIC_SEQ_CST + */ +void slapi_atomic_store_32(int32_t *ptr, int32_t val, int memorder); /** - * Store an integral value atomicly + * Store a 64bit integral value atomicly * * \param ptr - integral pointer * \param val - pointer to integral value (use integral type int32_t with ATOMIC_INT, or uint64_t * with ATOMIC_LONG & ATOMIC_GENERIC) * \param memorder - __ATOMIC_RELAXED, __ATOMIC_CONSUME, __ATOMIC_ACQUIRE, * __ATOMIC_RELEASE, __ATOMIC_ACQ_REL, __ATOMIC_SEQ_CST - * \param type - "ptr" type: ATOMIC_GENERIC, ATOMIC_INT, or ATOMIC_LONG */ -void slapi_atomic_store(void *ptr, void *val, int memorder, int type); +void slapi_atomic_store_64(uint64_t *ptr, uint64_t val, int memorder); /** - * Get an integral value atomicly + * Get a 32bit integral value atomicly * * \param ptr - integral pointer * \param memorder - __ATOMIC_RELAXED, __ATOMIC_CONSUME, __ATOMIC_ACQUIRE, * __ATOMIC_RELEASE, __ATOMIC_ACQ_REL, __ATOMIC_SEQ_CST - * \param type - "ptr" type: ATOMIC_GENERIC, ATOMIC_INT, or ATOMIC_LONG * \return - */ -uint64_t slapi_atomic_load(void *ptr, int memorder, int type); +int32_t slapi_atomic_load_32(int32_t *ptr, int memorder); /** - * Increment integral atomicly + * Get a 64bit integral value atomicly + * + * \param ptr - integral pointer + * \param memorder - __ATOMIC_RELAXED, __ATOMIC_CONSUME, __ATOMIC_ACQUIRE, + * __ATOMIC_RELEASE, __ATOMIC_ACQ_REL, __ATOMIC_SEQ_CST + * \return ptr value + */ +uint64_t slapi_atomic_load_64(uint64_t *ptr, int memorder); + +/** + * Increment a 32bit integral atomicly * * \param ptr - pointer to integral to increment * \param memorder - __ATOMIC_RELAXED, __ATOMIC_CONSUME, __ATOMIC_ACQUIRE, * __ATOMIC_RELEASE, __ATOMIC_ACQ_REL, __ATOMIC_SEQ_CST - * \param type - "ptr" type: ATOMIC_GENERIC, ATOMIC_INT, or ATOMIC_LONG * \return - new value of ptr */ -uint64_t slapi_atomic_incr(void *ptr, int memorder, int type); +int32_t slapi_atomic_incr_32(int32_t *ptr, int memorder); + +/** + * Increment a 64bitintegral atomicly + * + * \param ptr - pointer to integral to increment + * \param memorder - __ATOMIC_RELAXED, __ATOMIC_CONSUME, __ATOMIC_ACQUIRE, + * __ATOMIC_RELEASE, __ATOMIC_ACQ_REL, __ATOMIC_SEQ_CST + * \return - new value of ptr + */ +uint64_t slapi_atomic_incr_64(uint64_t *ptr, int memorder); + +/** + * Decrement a 32bit integral atomicly + * + * \param ptr - pointer to integral to decrement + * \param memorder - __ATOMIC_RELAXED, __ATOMIC_CONSUME, __ATOMIC_ACQUIRE, + * __ATOMIC_RELEASE, __ATOMIC_ACQ_REL, __ATOMIC_SEQ_CST + * \return - new value of ptr + */ +int32_t slapi_atomic_decr_32(int32_t *ptr, int memorder); /** - * Decrement integral atomicly + * Decrement a 64bitintegral atomicly * * \param ptr - pointer to integral to decrement * \param memorder - __ATOMIC_RELAXED, __ATOMIC_CONSUME, __ATOMIC_ACQUIRE, * __ATOMIC_RELEASE, __ATOMIC_ACQ_REL, __ATOMIC_SEQ_CST - * \param type - "ptr" type: ATOMIC_GENERIC, ATOMIC_INT, or ATOMIC_LONG * \return - new value of ptr */ -uint64_t slapi_atomic_decr(void *ptr, int memorder, int type); +uint64_t slapi_atomic_decr_64(uint64_t *ptr, int memorder); #ifdef __cplusplus diff --git a/ldap/servers/slapd/slapi_counter.c b/ldap/servers/slapd/slapi_counter.c index 9e705b39b..c5cae2703 100644 --- a/ldap/servers/slapd/slapi_counter.c +++ b/ldap/servers/slapd/slapi_counter.c @@ -295,53 +295,41 @@ slapi_counter_get_value(Slapi_Counter *counter) * __ATOMIC_RELEASE, __ATOMIC_ACQ_REL, or __ATOMIC_SEQ_CST * * See: https://gcc.gnu.org/onlinedocs/gcc-4.9.2/gcc/_005f_005fatomic-Builtins.html - * - * type_size - ATOMIC_GENERIC, ATOMIC_INT, or ATOMIC_LONG, see slapi-plugin.h for more info - * - * Future: - * If we need to support ATOMIC_INT128 (not available on 32bit systems): - * __atomic_store_16((uint64_t *)&ptr, val, memorder); - * __atomic_load_16((uint64_t *)&ptr, memorder); - * __atomic_add_fetch_16((uint64_t *)&ptr, 1, memorder); - * __atomic_sub_fetch_16((uint64_t *)&ptr, 1, memorder); */ /* - * "val" must be either int32_t or uint64_t + * atomic store functions (32bit and 64bit) */ void -slapi_atomic_store(void *ptr, void *val, int memorder, int type_size) +slapi_atomic_store_32(int32_t *ptr, int32_t val, int memorder) { #ifdef ATOMIC_64BIT_OPERATIONS - if (type_size == ATOMIC_INT) { - __atomic_store_4((int32_t *)ptr, *(int32_t *)val, memorder); - } else if (type_size == ATOMIC_LONG) { - __atomic_store_8((uint64_t *)ptr, *(uint64_t *)val, memorder); - } else { - /* ATOMIC_GENERIC or unknown size */ - __atomic_store((uint64_t *)&ptr, (uint64_t *)val, memorder); - } + __atomic_store_4(ptr, val, memorder); #else PRInt32 *pr_ptr = (PRInt32 *)ptr; - PR_AtomicSet(pr_ptr, *(PRInt32 *)val); + PR_AtomicSet(pr_ptr, (PRInt32)val); #endif } -uint64_t -slapi_atomic_load(void *ptr, int memorder, int type_size) +void +slapi_atomic_store_64(uint64_t *ptr, uint64_t val, int memorder) { #ifdef ATOMIC_64BIT_OPERATIONS - uint64_t ret; + __atomic_store_8(ptr, val, memorder); +#else + PRInt32 *pr_ptr = (PRInt32 *)ptr; + PR_AtomicSet(pr_ptr, (PRInt32)val); +#endif +} - if (type_size == ATOMIC_INT) { - return __atomic_load_4((int32_t *)ptr, memorder); - } else if (type_size == ATOMIC_LONG) { - return __atomic_load_8((uint64_t *)ptr, memorder); - } else { - /* ATOMIC_GENERIC or unknown size */ - __atomic_load((uint64_t *)ptr, &ret, memorder); - return ret; - } +/* + * atomic load functions (32bit and 64bit) + */ +int32_t +slapi_atomic_load_32(int32_t *ptr, int memorder) +{ +#ifdef ATOMIC_64BIT_OPERATIONS + return __atomic_load_4(ptr, memorder); #else PRInt32 *pr_ptr = (PRInt32 *)ptr; return PR_AtomicAdd(pr_ptr, 0); @@ -349,17 +337,24 @@ slapi_atomic_load(void *ptr, int memorder, int type_size) } uint64_t -slapi_atomic_incr(void *ptr, int memorder, int type_size) +slapi_atomic_load_64(uint64_t *ptr, int memorder) { #ifdef ATOMIC_64BIT_OPERATIONS - if (type_size == ATOMIC_INT) { - return __atomic_add_fetch_4((int32_t *)ptr, 1, memorder); - } else if (type_size == ATOMIC_LONG) { - return __atomic_add_fetch_8((uint64_t *)ptr, 1, memorder); - } else { - /* ATOMIC_GENERIC or unknown size */ - return __atomic_add_fetch((uint64_t *)ptr, 1, memorder); - } + return __atomic_load_8(ptr, memorder); +#else + PRInt32 *pr_ptr = (PRInt32 *)ptr; + return PR_AtomicAdd(pr_ptr, 0); +#endif +} + +/* + * atomic increment functions (32bit and 64bit) + */ +int32_t +slapi_atomic_incr_32(int32_t *ptr, int memorder) +{ +#ifdef ATOMIC_64BIT_OPERATIONS + return __atomic_add_fetch_4(ptr, 1, memorder); #else PRInt32 *pr_ptr = (PRInt32 *)ptr; return PR_AtomicIncrement(pr_ptr); @@ -367,17 +362,35 @@ slapi_atomic_incr(void *ptr, int memorder, int type_size) } uint64_t -slapi_atomic_decr(void *ptr, int memorder, int type_size) +slapi_atomic_incr_64(uint64_t *ptr, int memorder) { #ifdef ATOMIC_64BIT_OPERATIONS - if (type_size == ATOMIC_INT) { - return __atomic_sub_fetch_4((int32_t *)ptr, 1, memorder); - } else if (type_size == ATOMIC_LONG) { - return __atomic_sub_fetch_8((uint64_t *)ptr, 1, memorder); - } else { - /* ATOMIC_GENERIC or unknown size */ - return __atomic_sub_fetch((uint64_t *)ptr, 1, memorder); - } + return __atomic_add_fetch_8(ptr, 1, memorder); +#else + PRInt32 *pr_ptr = (PRInt32 *)ptr; + return PR_AtomicIncrement(pr_ptr); +#endif +} + +/* + * atomic decrement functions (32bit and 64bit) + */ +int32_t +slapi_atomic_decr_32(int32_t *ptr, int memorder) +{ +#ifdef ATOMIC_64BIT_OPERATIONS + return __atomic_sub_fetch_4(ptr, 1, memorder); +#else + PRInt32 *pr_ptr = (PRInt32 *)ptr; + return PR_AtomicDecrement(pr_ptr); +#endif +} + +uint64_t +slapi_atomic_decr_64(uint64_t *ptr, int memorder) +{ +#ifdef ATOMIC_64BIT_OPERATIONS + return __atomic_sub_fetch_8(ptr, 1, memorder); #else PRInt32 *pr_ptr = (PRInt32 *)ptr; return PR_AtomicDecrement(pr_ptr);
0
3c90702240c189df2ac0384c73dd203b0e5d8c72
389ds/389-ds-base
Issue 5429 - healthcheck - add checks for MemberOf group attrs being indexed Description: Add a lint chek to make sure the group attributes used in the memberOf plugin are indexed relates: https://github.com/389ds/389-ds-base/issues/5429 Reviewed by: spichugi & tbordaz (Thanks!!)
commit 3c90702240c189df2ac0384c73dd203b0e5d8c72 Author: Mark Reynolds <[email protected]> Date: Tue Nov 1 15:50:07 2022 -0400 Issue 5429 - healthcheck - add checks for MemberOf group attrs being indexed Description: Add a lint chek to make sure the group attributes used in the memberOf plugin are indexed relates: https://github.com/389ds/389-ds-base/issues/5429 Reviewed by: spichugi & tbordaz (Thanks!!) diff --git a/dirsrvtests/tests/suites/healthcheck/health_config_test.py b/dirsrvtests/tests/suites/healthcheck/health_config_test.py index d6e9bb273..6a011a70e 100644 --- a/dirsrvtests/tests/suites/healthcheck/health_config_test.py +++ b/dirsrvtests/tests/suites/healthcheck/health_config_test.py @@ -1,5 +1,5 @@ # --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2020 Red Hat, Inc. +# Copyright (C) 2022 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). @@ -16,7 +16,7 @@ from lib389.cos import CosTemplates, CosPointerDefinitions from lib389.dbgen import dbgen_users from lib389.idm.account import Accounts from lib389.index import Index -from lib389.plugins import ReferentialIntegrityPlugin +from lib389.plugins import ReferentialIntegrityPlugin, MemberOfPlugin from lib389.utils import * from lib389._constants import * from lib389.cli_base import FakeArgs @@ -37,7 +37,7 @@ def run_healthcheck_and_flush_log(topology, instance, searched_code, json, searc args.verbose = instance.verbose args.list_errors = False args.list_checks = False - args.check = ['config', 'refint', 'backends', 'monitor-disk-space', 'logs'] + args.check = ['config', 'refint', 'backends', 'monitor-disk-space', 'logs', 'memberof'] args.dry_run = False if json: @@ -224,6 +224,53 @@ def test_healthcheck_RI_plugin_missing_indexes(topology_st): run_healthcheck_and_flush_log(topology_st, standalone, json=True, searched_code=JSON_OUTPUT) +def test_healthcheck_MO_plugin_missing_indexes(topology_st): + """Check if HealthCheck returns DSMOLE0002 code + + :id: 236b0ec2-13da-48fb-b65a-db7406d56d5d + :setup: Standalone instance + :steps: + 1. Create DS instance + 2. Configure the instance with MO Plugin with two memberOfGroupAttrs + 3. Use HealthCheck without --json option + 4. Use HealthCheck with --json option + 5. Add index for new group attr + 6. Use HealthCheck without --json option + 7. Use HealthCheck with --json option + :expectedresults: + 1. Success + 2. Success + 3. Healthcheck reports DSMOLE0002 code and related details + 4. Healthcheck reports DSMOLE0002 code and related details + 5. Success + 6. Healthcheck reports no issue found + 7. Healthcheck reports no issue found + """ + + RET_CODE = 'DSMOLE0001' + MO_GROUP_ATTR = 'creatorsname' + + standalone = topology_st.standalone + + log.info('Enable MO plugin') + plugin = MemberOfPlugin(standalone) + plugin.disable() + plugin.enable() + plugin.add('memberofgroupattr', MO_GROUP_ATTR) + time.sleep(.5) + + run_healthcheck_and_flush_log(topology_st, standalone, json=False, searched_code=RET_CODE) + run_healthcheck_and_flush_log(topology_st, standalone, json=True, searched_code=RET_CODE) + + log.info('Add the missing "eq" index') + be = Backends(standalone).get('userRoot') + be.add_index(MO_GROUP_ATTR, "eq", None) + time.sleep(.5) + + run_healthcheck_and_flush_log(topology_st, standalone, json=False, searched_code=CMD_OUTPUT) + run_healthcheck_and_flush_log(topology_st, standalone, json=True, searched_code=JSON_OUTPUT) + + @pytest.mark.ds50873 @pytest.mark.bz1685160 @pytest.mark.xfail(ds_is_older("1.4.1"), reason="Not implemented") diff --git a/src/lib389/lib389/cli_ctl/health.py b/src/lib389/lib389/cli_ctl/health.py index 20a56991f..8888cf170 100644 --- a/src/lib389/lib389/cli_ctl/health.py +++ b/src/lib389/lib389/cli_ctl/health.py @@ -33,6 +33,7 @@ CHECK_OBJECTS = [ Encryption, FSChecks, plugins.ReferentialIntegrityPlugin, + plugins.MemberOfPlugin, MonitorDiskSpace, Replica, DSEldif, diff --git a/src/lib389/lib389/lint.py b/src/lib389/lib389/lint.py index 108946c0b..ce23d5c12 100644 --- a/src/lib389/lib389/lint.py +++ b/src/lib389/lib389/lint.py @@ -1,5 +1,5 @@ # --- BEGIN COPYRIGHT BLOCK --- -# Copyright (C) 2021 Red Hat, Inc. +# Copyright (C) 2022 Red Hat, Inc. # All rights reserved. # # License: GPL (version 3 or any later version). @@ -186,6 +186,25 @@ example using dsconf: """ } +# MemberOf plugin checks +DSMOLE0001 = { + 'dsle': 'DSMOLE0001', + 'severity': 'HIGH', + 'description': 'MemberOf operations can become very slow', + 'items': ['cn=memberof plugin,cn=plugins,cn=config', ], + 'detail': """The MemberOf plugin does internal searches when updating a group, or running the fixup task. +These internal searches will be unindexed leading to poor performance and high CPU. + +We advise that you index the memberOf group attributes for equality searches. +""", + 'fix': """Check the attributes set in "memberofgroupattr" to make sure they have +an index defined that has equality "eq" index type. You will need to reindex the +database after adding the missing index type. Here is an example using dsconf: + + # dsconf slapd-YOUR_INSTANCE backend index add --attr=ATTR --index-type=eq --reindex BACKEND +""" +} + # Disk Space check. Note - PARTITION is replaced by the calling function DSDSLE0001 = { 'dsle': 'DSDSLE0001', diff --git a/src/lib389/lib389/plugins.py b/src/lib389/lib389/plugins.py index 335f0290f..5e1155ef7 100644 --- a/src/lib389/lib389/plugins.py +++ b/src/lib389/lib389/plugins.py @@ -12,7 +12,7 @@ import copy import os.path from lib389 import tasks from lib389._mapped_object import DSLdapObjects, DSLdapObject -from lib389.lint import DSRILE0001, DSRILE0002 +from lib389.lint import DSRILE0001, DSRILE0002, DSMOLE0001 from lib389.utils import ensure_str, ensure_list_bytes from lib389.schema import Schema from lib389._constants import ( @@ -775,6 +775,58 @@ class MemberOfPlugin(Plugin): self._create_objectclasses.extend(['extensibleObject']) self._must_attributes.extend(['memberOfGroupAttr', 'memberOfAttr']) + @classmethod + def lint_uid(cls): + return 'memberof' + + def _lint_member_attr_indexes(self): + if self.status(): + from lib389.backend import Backends + backends = Backends(self._instance).list() + attrs = self.get_attr_vals_utf8_l("memberofgroupattr") + container = self.get_attr_val_utf8_l("nsslapd-plugincontainerscope") + for backend in backends: + suffix = backend.get_attr_val_utf8_l('nsslapd-suffix') + if suffix == "cn=changelog": + # Always skip retro changelog + continue + if container is not None: + # Check if this backend is in the scope + if not container.endswith(suffix): + # skip this backend that is not in the scope + continue + indexes = backend.get_indexes() + for attr in attrs: + report = copy.deepcopy(DSMOLE0001) + try: + index = indexes.get(attr) + types = index.get_attr_vals_utf8_l("nsIndexType") + valid = False + if "eq" in types: + valid = True + + if not valid: + report['detail'] = report['detail'].replace('ATTR', attr) + report['detail'] = report['detail'].replace('BACKEND', suffix) + report['fix'] = report['fix'].replace('ATTR', attr) + report['fix'] = report['fix'].replace('BACKEND', suffix) + report['fix'] = report['fix'].replace('YOUR_INSTANCE', self._instance.serverid) + report['items'].append(suffix) + report['items'].append(attr) + report['check'] = f'memberof:attr_indexes' + yield report + except: + # No index at all, bad + report['detail'] = report['detail'].replace('ATTR', attr) + report['detail'] = report['detail'].replace('BACKEND', suffix) + report['fix'] = report['fix'].replace('ATTR', attr) + report['fix'] = report['fix'].replace('BACKEND', suffix) + report['fix'] = report['fix'].replace('YOUR_INSTANCE', self._instance.serverid) + report['items'].append(suffix) + report['items'].append(attr) + report['check'] = f'memberof:attr_indexes' + yield report + def get_attr(self): """Get memberofattr attribute"""
0
d38ae06d3e43ee429e5128bca263187a0949a96b
389ds/389-ds-base
Bug 625950 - hash nsslapd-rootpw changes in audit log This patch makes the Directory Manager password value be hashed in the audit log for changes to the nsslapd-rootpw attribute. Prior to this patch, the value was in the clear. This is consistent with the way the userPassword attribute is handled.
commit d38ae06d3e43ee429e5128bca263187a0949a96b Author: Nathan Kinder <[email protected]> Date: Mon Nov 22 11:46:45 2010 -0800 Bug 625950 - hash nsslapd-rootpw changes in audit log This patch makes the Directory Manager password value be hashed in the audit log for changes to the nsslapd-rootpw attribute. Prior to this patch, the value was in the clear. This is consistent with the way the userPassword attribute is handled. diff --git a/ldap/servers/slapd/modify.c b/ldap/servers/slapd/modify.c index d15310a14..c569ae4e7 100644 --- a/ldap/servers/slapd/modify.c +++ b/ldap/servers/slapd/modify.c @@ -33,7 +33,7 @@ * * Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. * Copyright (C) 2009 Red Hat, Inc. - * Copyright (C) 2009 Hewlett-Packard Development Company, L.P. + * Copyright (C) 2009, 2010 Hewlett-Packard Development Company, L.P. * All rights reserved. * * Contributors: @@ -77,6 +77,7 @@ static int modify_internal_pb (Slapi_PBlock *pb); static void op_shared_modify (Slapi_PBlock *pb, int pw_change, char *old_pw); static void remove_mod (Slapi_Mods *smods, const char *type, Slapi_Mods *smod_unhashed); static int op_shared_allow_pw_change (Slapi_PBlock *pb, LDAPMod *mod, char **old_pw, Slapi_Mods *smods); +static int hash_rootpw (LDAPMod **mods); #ifdef LDAP_DEBUG static const char* @@ -821,6 +822,20 @@ static void op_shared_modify (Slapi_PBlock *pb, int pw_change, char *old_pw) { int rc; + /* + * Hash any rootpw attribute values. We hash them after pre-op + * plugins are called in case any pre-op plugin needs the clear value. + * They do need to be hashed here so they wont get audit logged in the + * clear. Note that config_set_rootpw will also do hashing if needed, + * but it will detect that the password is already hashed. + */ + slapi_pblock_get (pb, SLAPI_MODIFY_MODS, &mods); + if (hash_rootpw (mods) != 0) { + send_ldap_result(pb, LDAP_UNWILLING_TO_PERFORM, NULL, + "Failed to hash root user's password", 0, NULL); + goto free_and_return; + } + slapi_pblock_set(pb, SLAPI_PLUGIN, be->be_database); set_db_default_result_handlers(pb); @@ -828,7 +843,6 @@ static void op_shared_modify (Slapi_PBlock *pb, int pw_change, char *old_pw) /* to db access */ if (pw_change) { - slapi_pblock_get (pb, SLAPI_MODIFY_MODS, &mods); slapi_mods_init_passin (&smods, mods); remove_mod (&smods, unhashed_pw_attr, &unhashed_pw_smod); slapi_pblock_set (pb, SLAPI_MODIFY_MODS, @@ -1169,3 +1183,45 @@ done: slapi_ch_free_string(&proxystr); return rc; } + +/* + * Hashes any nsslapd-rootpw attribute values using the password storage + * scheme specified in cn=config:nsslapd-rootpwstoragescheme. + * Note: This is only done for modify, because rootdn's password lives + * in cn=config, which is never added. + */ +static int +hash_rootpw (LDAPMod **mods) +{ + int i, j; + slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); + + if (strcasecmp(slapdFrontendConfig->rootpwstoragescheme->pws_name, "clear") == 0) { + /* No work to do if the rootpw storage scheme is clear */ + return 0; + } + + for (i=0; mods[i] != NULL; i++) { + LDAPMod *mod = mods[i]; + if (strcasecmp (mod->mod_type, CONFIG_ROOTPW_ATTRIBUTE) != 0) + continue; + + for (j = 0; mod->mod_bvalues[j] != NULL; j++) { + char *val = mod->mod_bvalues[j]->bv_val; + char *hashedval = NULL; + if (pw_val2scheme (val, NULL, 0)) { + /* Value is pre-hashed, no work to do for this value */ + continue; + } else if (! slapd_nss_is_initialized() ) { + /* We need to hash a value but NSS is not initialized; bail */ + return -1; + } + hashedval=(slapdFrontendConfig->rootpwstoragescheme->pws_enc)(val); + slapi_ch_free_string (&val); + mod->mod_bvalues[j]->bv_val = hashedval; + mod->mod_bvalues[j]->bv_len = strlen (hashedval); + } + } + return 0; +} +
0
a7dfa383d103944dfd27fff98152138c1b0fd0f7
389ds/389-ds-base
Ticket 49995 - Fix issues with internal op logging Bug Description: ----------------- At server startup the server's internal operatons performed by bootstrapping occurred before the thread data was initialized. This caused random values in the logging counters [1]. It was also observed that nested operations(and nested-nested operations, etc) were not properly logged [2]. Fix Description: ----------------- [1] Move the thread initialization higher up in main() [2] Changed the way we log nested internal operations. Instead, we keep the internal op number the same for nested ops but also now display the nested level. https://pagure.io/389-ds-base/issue/49995 Reviewed by: lkrispenz, amsharma, firstyear (Thanks!)
commit a7dfa383d103944dfd27fff98152138c1b0fd0f7 Author: Mark Reynolds <[email protected]> Date: Fri Oct 26 15:20:08 2018 -0400 Ticket 49995 - Fix issues with internal op logging Bug Description: ----------------- At server startup the server's internal operatons performed by bootstrapping occurred before the thread data was initialized. This caused random values in the logging counters [1]. It was also observed that nested operations(and nested-nested operations, etc) were not properly logged [2]. Fix Description: ----------------- [1] Move the thread initialization higher up in main() [2] Changed the way we log nested internal operations. Instead, we keep the internal op number the same for nested ops but also now display the nested level. https://pagure.io/389-ds-base/issue/49995 Reviewed by: lkrispenz, amsharma, firstyear (Thanks!) diff --git a/ldap/servers/slapd/add.c b/ldap/servers/slapd/add.c index 65e9b56b6..942f22516 100644 --- a/ldap/servers/slapd/add.c +++ b/ldap/servers/slapd/add.c @@ -467,13 +467,15 @@ op_shared_add(Slapi_PBlock *pb) uint64_t connid; int32_t op_id; int32_t op_internal_id; - get_internal_conn_op(&connid, &op_id, &op_internal_id); + int32_t op_nested_count; + get_internal_conn_op(&connid, &op_id, &op_internal_id, &op_nested_count); slapi_log_access(LDAP_DEBUG_ARGS, - connid==0 ? "conn=Internal(%" PRId64 ") op=%d(%d) ADD dn=\"%s\"\n" : - "conn=%" PRId64 " (Internal) op=%d(%d) ADD dn=\"%s\"\n", + connid==0 ? "conn=Internal(%" PRId64 ") op=%d(%d)(%d) ADD dn=\"%s\"\n" : + "conn=%" PRId64 " (Internal) op=%d(%d)(%d) ADD dn=\"%s\"\n", connid, op_id, op_internal_id, + op_nested_count, slapi_entry_get_dn_const(e)); } } diff --git a/ldap/servers/slapd/delete.c b/ldap/servers/slapd/delete.c index e740eb216..c0e61adf1 100644 --- a/ldap/servers/slapd/delete.c +++ b/ldap/servers/slapd/delete.c @@ -272,13 +272,15 @@ op_shared_delete(Slapi_PBlock *pb) uint64_t connid; int32_t op_id; int32_t op_internal_id; - get_internal_conn_op(&connid, &op_id, &op_internal_id); + int32_t op_nested_count; + get_internal_conn_op(&connid, &op_id, &op_internal_id, &op_nested_count); slapi_log_access(LDAP_DEBUG_ARGS, - connid==0 ? "conn=Internal(%" PRId64 ") op=%d(%d) DEL dn=\"%s\"%s\n" : - "conn=%" PRId64 " (Internal) op=%d(%d) DEL dn=\"%s\"%s\n", + connid==0 ? "conn=Internal(%" PRId64 ") op=%d(%d)(%d) DEL dn=\"%s\"%s\n" : + "conn=%" PRId64 " (Internal) op=%d(%d)(%d) DEL dn=\"%s\"%s\n", connid, op_id, op_internal_id, + op_nested_count, slapi_sdn_get_dn(sdn), proxystr ? proxystr : ""); } diff --git a/ldap/servers/slapd/main.c b/ldap/servers/slapd/main.c index d175dcdc2..8423641ff 100644 --- a/ldap/servers/slapd/main.c +++ b/ldap/servers/slapd/main.c @@ -754,6 +754,15 @@ main(int argc, char **argv) g_log_init(); vattr_init(); + /* + * init the thread data indexes. Nothing should be creating their + * own thread data, and should be using this function instead + * as we may swap to context based storage in the future rather + * than direct thread-local accesses (especially important with + * consideration of rust etc) + */ + slapi_td_init(); + if (mcfg.slapd_exemode == SLAPD_EXEMODE_REFERRAL) { slapdFrontendConfig = getFrontendConfig(); /* make up the config stuff */ @@ -941,17 +950,6 @@ main(int argc, char **argv) goto cleanup; } - /* - * init the thread data indexes. Nothing should be creating their - * own thread data, and should be using this function instead - * as we may swap to context based storage in the future rather - * than direct thread-local accesses (especially important with - * consideration of rust etc) - * - * DOES THIS NEED TO BE BEFORE OR AFTER NS? - */ - slapi_td_init(); - /* * Create our thread pool here for tasks to utilise. */ diff --git a/ldap/servers/slapd/modify.c b/ldap/servers/slapd/modify.c index 01cf72269..de46746c5 100644 --- a/ldap/servers/slapd/modify.c +++ b/ldap/servers/slapd/modify.c @@ -676,13 +676,15 @@ op_shared_modify(Slapi_PBlock *pb, int pw_change, char *old_pw) uint64_t connid; int32_t op_id; int32_t op_internal_id; - get_internal_conn_op(&connid, &op_id, &op_internal_id); + int32_t op_nested_count; + get_internal_conn_op(&connid, &op_id, &op_internal_id, &op_nested_count); slapi_log_access(LDAP_DEBUG_ARGS, - connid==0 ? "conn=Internal(%" PRId64 ") op=%d(%d) MOD dn=\"%s\"%s\n" : - "conn=%" PRId64 " (Internal) op=%d(%d) MOD dn=\"%s\"%s\n", + connid==0 ? "conn=Internal(%" PRId64 ") op=%d(%d)(%d) MOD dn=\"%s\"%s\n" : + "conn=%" PRId64 " (Internal) op=%d(%d)(%d) MOD dn=\"%s\"%s\n", connid, op_id, op_internal_id, + op_nested_count, slapi_sdn_get_dn(sdn), proxystr ? proxystr : ""); } diff --git a/ldap/servers/slapd/modrdn.c b/ldap/servers/slapd/modrdn.c index e22997ed9..3efe584a7 100644 --- a/ldap/servers/slapd/modrdn.c +++ b/ldap/servers/slapd/modrdn.c @@ -479,13 +479,15 @@ op_shared_rename(Slapi_PBlock *pb, int passin_args) uint64_t connid; int32_t op_id; int32_t op_internal_id; - get_internal_conn_op(&connid, &op_id, &op_internal_id); + int32_t op_nested_count; + get_internal_conn_op(&connid, &op_id, &op_internal_id, &op_nested_count); slapi_log_access(LDAP_DEBUG_ARGS, - connid==0 ? "conn=Internal(%" PRId64 ") op=%d(%d) MODRDN dn=\"%s\" newrdn=\"%s\" newsuperior=\"%s\"%s\n" : - "conn=%" PRId64 " (Internal) op=%d(%d) MODRDN dn=\"%s\" newrdn=\"%s\" newsuperior=\"%s\"%s\n", + connid==0 ? "conn=Internal(%" PRId64 ") op=%d(%d)(%d) MODRDN dn=\"%s\" newrdn=\"%s\" newsuperior=\"%s\"%s\n" : + "conn=%" PRId64 " (Internal) op=%d(%d)(%d) MODRDN dn=\"%s\" newrdn=\"%s\" newsuperior=\"%s\"%s\n", connid, op_id, op_internal_id, + op_nested_count, dn, newrdn ? newrdn : "(null)", newsuperior ? newsuperior : "(null)", diff --git a/ldap/servers/slapd/opshared.c b/ldap/servers/slapd/opshared.c index 9ec758d1b..18bf3a54b 100644 --- a/ldap/servers/slapd/opshared.c +++ b/ldap/servers/slapd/opshared.c @@ -295,15 +295,16 @@ op_shared_search(Slapi_PBlock *pb, int send_result) uint64_t connid; int32_t op_id; int32_t op_internal_id; + int32_t op_nested_count; #define SLAPD_SEARCH_FMTSTR_BASE "conn=%" PRIu64 " op=%d SRCH base=\"%s\" scope=%d " -#define SLAPD_SEARCH_FMTSTR_BASE_INT_INT "conn=Internal(%" PRIu64 ") op=%d(%d) SRCH base=\"%s\" scope=%d " -#define SLAPD_SEARCH_FMTSTR_BASE_EXT_INT "conn=%" PRIu64 " (Internal) op=%d(%d) SRCH base=\"%s\" scope=%d " +#define SLAPD_SEARCH_FMTSTR_BASE_INT_INT "conn=Internal(%" PRIu64 ") op=%d(%d)(%d) SRCH base=\"%s\" scope=%d " +#define SLAPD_SEARCH_FMTSTR_BASE_EXT_INT "conn=%" PRIu64 " (Internal) op=%d(%d)(%d) SRCH base=\"%s\" scope=%d " #define SLAPD_SEARCH_FMTSTR_REMAINDER " attrs=%s%s%s\n" PR_ASSERT(fstr); if (internal_op) { - get_internal_conn_op(&connid, &op_id, &op_internal_id); + get_internal_conn_op(&connid, &op_id, &op_internal_id, &op_nested_count); } if (strlen(fstr) > 1024) { /* @@ -357,6 +358,7 @@ op_shared_search(Slapi_PBlock *pb, int send_result) connid, op_id, op_internal_id, + op_nested_count, normbase, scope, fstr, attrliststr, flag_psearch ? " options=persistent" : "", diff --git a/ldap/servers/slapd/result.c b/ldap/servers/slapd/result.c index 0570f31e3..a03ca43e2 100644 --- a/ldap/servers/slapd/result.c +++ b/ldap/servers/slapd/result.c @@ -1914,8 +1914,9 @@ log_result(Slapi_PBlock *pb, Operation *op, int err, ber_tag_t tag, int nentries uint64_t connid; int32_t op_id; int32_t op_internal_id; + int32_t op_nested_count; - get_internal_conn_op(&connid, &op_id, &op_internal_id); + get_internal_conn_op(&connid, &op_id, &op_internal_id, &op_nested_count); slapi_pblock_get(pb, SLAPI_PAGED_RESULTS_INDEX, &pr_idx); slapi_pblock_get(pb, SLAPI_PAGED_RESULTS_COOKIE, &pr_cookie); @@ -1946,8 +1947,8 @@ log_result(Slapi_PBlock *pb, Operation *op, int err, ber_tag_t tag, int nentries } } -#define LOG_CONN_OP_FMT_INT_INT "conn=Internal(%" PRIu64 ") op=%d(%d) RESULT err=%d" -#define LOG_CONN_OP_FMT_EXT_INT "conn=%" PRIu64 " (Internal) op=%d(%d) RESULT err=%d" +#define LOG_CONN_OP_FMT_INT_INT "conn=Internal(%" PRIu64 ") op=%d(%d)(%d) RESULT err=%d" +#define LOG_CONN_OP_FMT_EXT_INT "conn=%" PRIu64 " (Internal) op=%d(%d)(%d) RESULT err=%d" if (op->o_tag == LDAP_REQ_BIND && err == LDAP_SASL_BIND_IN_PROGRESS) { /* * Not actually an error. @@ -1972,6 +1973,7 @@ log_result(Slapi_PBlock *pb, Operation *op, int err, ber_tag_t tag, int nentries connid, op_id, op_internal_id, + op_nested_count, err, tag, nentries, etime, notes_str, csn_str); @@ -2002,6 +2004,7 @@ log_result(Slapi_PBlock *pb, Operation *op, int err, ber_tag_t tag, int nentries connid, op_id, op_internal_id, + op_nested_count, err, tag, nentries, etime, notes_str, csn_str, dn ? dn : ""); @@ -2027,6 +2030,7 @@ log_result(Slapi_PBlock *pb, Operation *op, int err, ber_tag_t tag, int nentries connid, op_id, op_internal_id, + op_nested_count, err, tag, nentries, etime, notes_str, csn_str, pr_idx, pr_cookie); @@ -2061,6 +2065,7 @@ log_result(Slapi_PBlock *pb, Operation *op, int err, ber_tag_t tag, int nentries connid, op_id, op_internal_id, + op_nested_count, err, tag, nentries, etime, notes_str, csn_str); @@ -2117,13 +2122,15 @@ log_entry(Operation *op, Slapi_Entry *e) uint64_t connid; int32_t op_id; int32_t op_internal_id; - get_internal_conn_op(&connid, &op_id, &op_internal_id); + int32_t op_nested_count; + get_internal_conn_op(&connid, &op_id, &op_internal_id, &op_nested_count); slapi_log_access(LDAP_DEBUG_ARGS, - connid == 0 ? "conn=Internal(%" PRIu64 ") op=%d(%d) ENTRY dn=\"%s\"\n" : - "conn=%" PRIu64 " (Internal) op=%d(%d) ENTRY dn=\"%s\"\n", + connid == 0 ? "conn=Internal(%" PRIu64 ") op=%d(%d)(%d) ENTRY dn=\"%s\"\n" : + "conn=%" PRIu64 " (Internal) op=%d(%d)(%d) ENTRY dn=\"%s\"\n", connid, op_id, op_internal_id, + op_nested_count, slapi_entry_get_dn_const(e)); } } @@ -2145,11 +2152,12 @@ log_referral(Operation *op) uint64_t connid; int32_t op_id; int32_t op_internal_id; - get_internal_conn_op(&connid, &op_id, &op_internal_id); + int32_t op_nested_count; + get_internal_conn_op(&connid, &op_id, &op_internal_id, &op_nested_count); slapi_log_access(LDAP_DEBUG_ARGS, - connid == 0 ? "conn=Internal(%" PRIu64 ") op=%d(%d) REFERRAL\n" : - "conn=%" PRIu64 " (Internal) op=%d(%d) REFERRAL\n", - connid, op_id, op_internal_id); + connid == 0 ? "conn=Internal(%" PRIu64 ") op=%d(%d)(%d) REFERRAL\n" : + "conn=%" PRIu64 " (Internal) op=%d(%d)(%d) REFERRAL\n", + connid, op_id, op_internal_id, op_nested_count); } } } diff --git a/ldap/servers/slapd/slapi-private.h b/ldap/servers/slapd/slapi-private.h index 21f883dc3..416100268 100644 --- a/ldap/servers/slapd/slapi-private.h +++ b/ldap/servers/slapd/slapi-private.h @@ -1176,7 +1176,7 @@ char *slapd_get_tmp_dir(void); typedef enum _slapi_op_nest_state { OP_STATE_NOTNESTED = 0, OP_STATE_NESTED = 1, - OP_STATE_UNNESTED = 2, + OP_STATE_PREV_NESTED = 2, } slapi_log_nest_state; @@ -1208,7 +1208,7 @@ const char *escape_string(const char *str, char buf[BUFSIZ]); const char *escape_string_with_punctuation(const char *str, char buf[BUFSIZ]); const char *escape_string_for_filename(const char *str, char buf[BUFSIZ]); void strcpy_unescape_value(char *d, const char *s); -void get_internal_conn_op (uint64_t *connid, int32_t *op_id, int32_t *op_internal_id); +void get_internal_conn_op (uint64_t *connid, int32_t *op_id, int32_t *op_internal_id, int32_t *op_nested_count); char *slapi_berval_get_string_copy(const struct berval *bval); /* lenstr stuff */ diff --git a/ldap/servers/slapd/thread_data.c b/ldap/servers/slapd/thread_data.c index ec145b510..7babe36c0 100644 --- a/ldap/servers/slapd/thread_data.c +++ b/ldap/servers/slapd/thread_data.c @@ -121,11 +121,9 @@ slapi_td_get_log_op_state() { /* - * Increment the internal operation count. Since internal operations - * can be nested via plugins calling plugins we need to keep track of - * this. If we become nested, and finally become unnested (back to the - * original internal op), then we have to bump the op id number twice - * for the next new (unnested) internal op. + * Increment the internal operation count. Unless we are nested, in that case + * do not update the internal op counter. If we just became "unnested" then + * update the state to keep the counters on track. */ void slapi_td_internal_op_start(void) @@ -142,38 +140,26 @@ slapi_td_internal_op_start(void) } } - /* increment the internal op id counter */ - op_state->op_int_id += 1; - - /* - * Bump the nested count so we can maintain our counts after plugins call - * plugins, etc. - */ + /* Bump the nested count */ op_state->op_nest_count += 1; - /* Now check for special cases in the nested count */ - if (op_state->op_nest_count == 2){ - /* We are now nested, mark it as so */ - /* THERE IS A BETTER WAY! We should track parent op structs instead! */ + if (op_state->op_nest_count > 1){ + /* We are nested */ op_state->op_nest_state = OP_STATE_NESTED; - } else if (op_state->op_nest_count == 1) { - /* - * Back to the beginning, but if we were previously nested then the - * internal op id count is off - */ - if (op_state->op_nest_state == OP_STATE_UNNESTED){ - /* We were nested but anymore, need to bump the internal id count again */ - op_state->op_nest_state = OP_STATE_NOTNESTED; /* reset nested state */ - op_state->op_int_id += 1; + } else { + /* We are not nested */ + op_state->op_int_id += 1; + if (op_state->op_nest_state == OP_STATE_PREV_NESTED) { + /* But we were just previously nested, so update the state */ + op_state->op_nest_state = OP_STATE_NOTNESTED; } } } /* - * Decrement the nested count. If we were actually nested (2 levels deep or more) - * then we need to lower the op id. If we were nested and are now unnested we need - * to mark this in the TD so on the next new internal op we set the its op id to the - * correct/expected/next-sequential value. + * Decrement the nested count. If we were nested and we are NOW unnested + * then we need to reset the state so on the next new internal op we set the + * counters to the correct/expected/next-sequential value. */ void slapi_td_internal_op_finish(void) @@ -190,24 +176,13 @@ slapi_td_internal_op_finish(void) return; } } - - if ( op_state->op_nest_count > 1 ){ - /* Nested op just finished, decr op id */ - op_state->op_int_id -= 1; - - if ( (op_state->op_nest_count - 1) == 1 ){ - /* - * Okay we are back to the beginning, We were nested but not - * anymore. So when we start the next internal op on this - * conn we need to double increment the internal op id to - * maintain the correct op id sequence. Set the nested state - * to "unnested". - */ - op_state->op_nest_state = OP_STATE_UNNESTED; - } - } /* decrement nested count */ op_state->op_nest_count -= 1; + + /* If we were nested, but NOT anymore, then update the state */ + if ( op_state->op_nest_state == OP_STATE_NESTED && op_state->op_nest_count == 1){ + op_state->op_nest_state = OP_STATE_PREV_NESTED; + } } void diff --git a/ldap/servers/slapd/util.c b/ldap/servers/slapd/util.c index ccc68adb9..1a8df2f52 100644 --- a/ldap/servers/slapd/util.c +++ b/ldap/servers/slapd/util.c @@ -1591,16 +1591,19 @@ slapi_create_errormsg( } void -get_internal_conn_op (uint64_t *connid, int32_t *op_id, int32_t *op_internal_id) { +get_internal_conn_op (uint64_t *connid, int32_t *op_id, int32_t *op_internal_id, int32_t *op_nested_count) { struct slapi_td_log_op_state_t *op_state = slapi_td_get_log_op_state(); if (op_state != NULL) { *connid = op_state->conn_id; *op_id = op_state->op_id; *op_internal_id = op_state->op_int_id; + *op_nested_count = op_state->op_nest_count; + } else { *connid = 0; *op_id = 0; *op_internal_id = 0; + *op_nested_count = 0; } }
0
3c510e0a26e321949b552b5e8c887634d9d7e63e
389ds/389-ds-base
Issue 4551 - Paged search impacts performance (#5838) * Issue 4551 - Paged search impacts performance Problem: Having a script looping doing a search with paged result impact greatly the performance of other clients (for example ldclt bind+search rate decreased by 80% in the test case) Cause: Page result field in connection were protected by the connection mutex that is also used by the listener thread, in some cases this cause contention that delays the handling of new operations Solution: Do not rely on the connection mutex to protect the page result context but on a dedicated array of locks.
commit 3c510e0a26e321949b552b5e8c887634d9d7e63e Author: progier389 <[email protected]> Date: Tue Jul 18 11:17:07 2023 +0200 Issue 4551 - Paged search impacts performance (#5838) * Issue 4551 - Paged search impacts performance Problem: Having a script looping doing a search with paged result impact greatly the performance of other clients (for example ldclt bind+search rate decreased by 80% in the test case) Cause: Page result field in connection were protected by the connection mutex that is also used by the listener thread, in some cases this cause contention that delays the handling of new operations Solution: Do not rely on the connection mutex to protect the page result context but on a dedicated array of locks. diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c index c5ad5ee0f..01168275f 100644 --- a/ldap/servers/slapd/daemon.c +++ b/ldap/servers/slapd/daemon.c @@ -1211,6 +1211,7 @@ slapd_daemon(daemon_ports_t *ports) slapi_log_err(SLAPI_LOG_TRACE, "slapd_daemon", "slapd shutting down - waiting for backends to close down\n"); + pageresult_lock_cleanup(); eq_stop(); /* deprecated */ eq_stop_rel(); if (!in_referral_mode) { diff --git a/ldap/servers/slapd/main.c b/ldap/servers/slapd/main.c index f24650547..deae0af3b 100644 --- a/ldap/servers/slapd/main.c +++ b/ldap/servers/slapd/main.c @@ -1007,6 +1007,7 @@ main(int argc, char **argv) eq_init_rel(); /* must be done before plugins started */ ps_init_psearch_system(); /* must come before plugin_startall() */ + pageresult_lock_init(); /* initialize UniqueID generator - must be done once backends are started @@ -2265,6 +2266,7 @@ slapd_exemode_db2ldif(int argc, char **argv, struct main_config *mcfg) eq_init_rel(); /* must be done before plugins started */ ps_init_psearch_system(); /* must come before plugin_startall() */ + pageresult_lock_init(); plugin_startall(argc, argv, plugin_list); eq_start(); /* must be done after plugins started - DEPRECATED*/ eq_start_rel(); /* must be done after plugins started */ diff --git a/ldap/servers/slapd/opshared.c b/ldap/servers/slapd/opshared.c index 789bd2e4f..a842d4249 100644 --- a/ldap/servers/slapd/opshared.c +++ b/ldap/servers/slapd/opshared.c @@ -271,6 +271,7 @@ op_shared_search(Slapi_PBlock *pb, int send_result) int pr_idx = -1; Slapi_DN *orig_sdn = NULL; int free_sdn = 0; + pthread_mutex_t *pagedresults_mutex = NULL; be_list[0] = NULL; referral_list[0] = NULL; @@ -576,6 +577,7 @@ op_shared_search(Slapi_PBlock *pb, int send_result) int32_t tlimit; slapi_pblock_get(pb, SLAPI_SEARCH_TIMELIMIT, &tlimit); pagedresults_set_timelimit(pb_conn, operation, (time_t)tlimit, pr_idx); + pagedresults_mutex = pageresult_lock_get_addr(pb_conn); } /* @@ -696,7 +698,7 @@ op_shared_search(Slapi_PBlock *pb, int send_result) * In async paged result case, the search result might be released * by other theads. We need to double check it in the locked region. */ - pthread_mutex_lock(&(pb_conn->c_mutex)); + pthread_mutex_lock(pagedresults_mutex); pr_search_result = pagedresults_get_search_result(pb_conn, operation, 1 /*locked*/, pr_idx); if (pr_search_result) { if (pagedresults_is_abandoned_or_notavailable(pb_conn, 1 /*locked*/, pr_idx)) { @@ -704,7 +706,7 @@ op_shared_search(Slapi_PBlock *pb, int send_result) /* Previous operation was abandoned and the simplepaged object is not in use. */ send_ldap_result(pb, 0, NULL, "Simple Paged Results Search abandoned", 0, NULL); rc = LDAP_SUCCESS; - pthread_mutex_unlock(&(pb_conn->c_mutex)); + pthread_mutex_unlock(pagedresults_mutex); goto free_and_return; } else { slapi_pblock_set(pb, SLAPI_SEARCH_RESULT_SET, pr_search_result); @@ -718,7 +720,7 @@ op_shared_search(Slapi_PBlock *pb, int send_result) pr_stat = PAGEDRESULTS_SEARCH_END; rc = LDAP_SUCCESS; } - pthread_mutex_unlock(&(pb_conn->c_mutex)); + pthread_mutex_unlock(pagedresults_mutex); pagedresults_unlock(pb_conn, pr_idx); if ((PAGEDRESULTS_SEARCH_END == pr_stat) || (0 == pnentries)) { @@ -843,10 +845,10 @@ op_shared_search(Slapi_PBlock *pb, int send_result) /* PAGED RESULTS */ if (op_is_pagedresults(operation)) { /* cleanup the slot */ - pthread_mutex_lock(&(pb_conn->c_mutex)); + pthread_mutex_lock(pagedresults_mutex); pagedresults_set_search_result(pb_conn, operation, NULL, 1, pr_idx); rc = pagedresults_set_current_be(pb_conn, NULL, pr_idx, 1); - pthread_mutex_unlock(&(pb_conn->c_mutex)); + pthread_mutex_unlock(pagedresults_mutex); } if (1 == flag_no_such_object) { break; @@ -887,11 +889,11 @@ op_shared_search(Slapi_PBlock *pb, int send_result) slapi_pblock_get(pb, SLAPI_SEARCH_RESULT_SET, &sr); if ((PAGEDRESULTS_SEARCH_END == pr_stat) || (0 == pnentries)) { /* no more entries, but at least another backend */ - pthread_mutex_lock(&(pb_conn->c_mutex)); + pthread_mutex_lock(pagedresults_mutex); pagedresults_set_search_result(pb_conn, operation, NULL, 1, pr_idx); be->be_search_results_release(&sr); rc = pagedresults_set_current_be(pb_conn, next_be, pr_idx, 1); - pthread_mutex_unlock(&(pb_conn->c_mutex)); + pthread_mutex_unlock(pagedresults_mutex); pr_stat = PAGEDRESULTS_SEARCH_END; /* make sure stat is SEARCH_END */ if (NULL == next_be) { /* no more entries && no more backends */ @@ -919,9 +921,9 @@ op_shared_search(Slapi_PBlock *pb, int send_result) next_be = NULL; /* to break the loop */ if (operation->o_status & SLAPI_OP_STATUS_ABANDONED) { /* It turned out this search was abandoned. */ - pthread_mutex_lock(&(pb_conn->c_mutex)); + pthread_mutex_lock(pagedresults_mutex); pagedresults_free_one_msgid_nolock(pb_conn, operation->o_msgid); - pthread_mutex_unlock(&(pb_conn->c_mutex)); + pthread_mutex_unlock(pagedresults_mutex); /* paged-results-request was abandoned; making an empty cookie. */ pagedresults_set_response_control(pb, 0, estimate, -1, pr_idx); send_ldap_result(pb, 0, NULL, "Simple Paged Results Search abandoned", 0, NULL); diff --git a/ldap/servers/slapd/pagedresults.c b/ldap/servers/slapd/pagedresults.c index 54aa086e8..fc15f6bec 100644 --- a/ldap/servers/slapd/pagedresults.c +++ b/ldap/servers/slapd/pagedresults.c @@ -12,6 +12,34 @@ #include "slap.h" +#define LOCK_HASH_SIZE 997 /* Should be a prime number */ + +static pthread_mutex_t *lock_hash = NULL; + +void +pageresult_lock_init() +{ + lock_hash = (pthread_mutex_t *)slapi_ch_calloc(LOCK_HASH_SIZE, sizeof(pthread_mutex_t)); + for (size_t i=0; i<LOCK_HASH_SIZE; i++) { + pthread_mutex_init(&lock_hash[i], NULL); + } +} + +void +pageresult_lock_cleanup() +{ + for (size_t i=0; i<LOCK_HASH_SIZE; i++) { + pthread_mutex_destroy(&lock_hash[i]); + } + slapi_ch_free((void**)&lock_hash); +} + +pthread_mutex_t * +pageresult_lock_get_addr(Connection *conn) +{ + return &lock_hash[(((size_t)conn)/sizeof (Connection))%LOCK_HASH_SIZE]; +} + /* helper function to clean up one prp slot */ static void _pr_cleanup_one_slot(PagedResults *prp) @@ -98,7 +126,7 @@ pagedresults_parse_control_value(Slapi_PBlock *pb, return LDAP_UNWILLING_TO_PERFORM; } - pthread_mutex_lock(&(conn->c_mutex)); + pthread_mutex_lock(pageresult_lock_get_addr(conn)); /* the ber encoding is no longer needed */ ber_free(ber, 1); if (cookie.bv_len <= 0) { @@ -206,7 +234,7 @@ bail: } } } - pthread_mutex_unlock(&(conn->c_mutex)); + pthread_mutex_unlock(pageresult_lock_get_addr(conn)); slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_parse_control_value", "<= idx %d\n", *index); @@ -300,7 +328,7 @@ pagedresults_free_one(Connection *conn, Operation *op, int index) slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_free_one", "=> idx=%d\n", index); if (conn && (index > -1)) { - pthread_mutex_lock(&(conn->c_mutex)); + pthread_mutex_lock(pageresult_lock_get_addr(conn)); if (conn->c_pagedresults.prl_count <= 0) { slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_free_one", "conn=%" PRIu64 " paged requests list count is %d\n", @@ -311,7 +339,7 @@ pagedresults_free_one(Connection *conn, Operation *op, int index) conn->c_pagedresults.prl_count--; rc = 0; } - pthread_mutex_unlock(&(conn->c_mutex)); + pthread_mutex_unlock(pageresult_lock_get_addr(conn)); } slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_free_one", "<= %d\n", rc); @@ -319,7 +347,7 @@ pagedresults_free_one(Connection *conn, Operation *op, int index) } /* - * Used for abandoning - conn->c_mutex is already locked in do_abandone. + * Used for abandoning - pageresult_lock_get_addr(conn) is already locked in do_abandone. */ int pagedresults_free_one_msgid_nolock(Connection *conn, ber_int_t msgid) @@ -363,11 +391,11 @@ pagedresults_get_current_be(Connection *conn, int index) slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_get_current_be", "=> idx=%d\n", index); if (conn && (index > -1)) { - pthread_mutex_lock(&(conn->c_mutex)); + pthread_mutex_lock(pageresult_lock_get_addr(conn)); if (index < conn->c_pagedresults.prl_maxlen) { be = conn->c_pagedresults.prl_list[index].pr_current_be; } - pthread_mutex_unlock(&(conn->c_mutex)); + pthread_mutex_unlock(pageresult_lock_get_addr(conn)); } slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_get_current_be", "<= %p\n", be); @@ -382,13 +410,13 @@ pagedresults_set_current_be(Connection *conn, Slapi_Backend *be, int index, int "pagedresults_set_current_be", "=> idx=%d\n", index); if (conn && (index > -1)) { if (!nolock) - pthread_mutex_lock(&(conn->c_mutex)); + pthread_mutex_lock(pageresult_lock_get_addr(conn)); if (index < conn->c_pagedresults.prl_maxlen) { conn->c_pagedresults.prl_list[index].pr_current_be = be; } rc = 0; if (!nolock) - pthread_mutex_unlock(&(conn->c_mutex)); + pthread_mutex_unlock(pageresult_lock_get_addr(conn)); } slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_set_current_be", "<= %d\n", rc); @@ -407,13 +435,13 @@ pagedresults_get_search_result(Connection *conn, Operation *op, int locked, int locked ? "locked" : "not locked", index); if (conn && (index > -1)) { if (!locked) { - pthread_mutex_lock(&(conn->c_mutex)); + pthread_mutex_lock(pageresult_lock_get_addr(conn)); } if (index < conn->c_pagedresults.prl_maxlen) { sr = conn->c_pagedresults.prl_list[index].pr_search_result_set; } if (!locked) { - pthread_mutex_unlock(&(conn->c_mutex)); + pthread_mutex_unlock(pageresult_lock_get_addr(conn)); } } slapi_log_err(SLAPI_LOG_TRACE, @@ -433,7 +461,7 @@ pagedresults_set_search_result(Connection *conn, Operation *op, void *sr, int lo index, sr); if (conn && (index > -1)) { if (!locked) - pthread_mutex_lock(&(conn->c_mutex)); + pthread_mutex_lock(pageresult_lock_get_addr(conn)); if (index < conn->c_pagedresults.prl_maxlen) { PagedResults *prp = conn->c_pagedresults.prl_list + index; if (!(prp->pr_flags & CONN_FLAG_PAGEDRESULTS_ABANDONED) || !sr) { @@ -443,7 +471,7 @@ pagedresults_set_search_result(Connection *conn, Operation *op, void *sr, int lo rc = 0; } if (!locked) - pthread_mutex_unlock(&(conn->c_mutex)); + pthread_mutex_unlock(pageresult_lock_get_addr(conn)); } slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_set_search_result", "=> %d\n", rc); @@ -460,11 +488,11 @@ pagedresults_get_search_result_count(Connection *conn, Operation *op, int index) slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_get_search_result_count", "=> idx=%d\n", index); if (conn && (index > -1)) { - pthread_mutex_lock(&(conn->c_mutex)); + pthread_mutex_lock(pageresult_lock_get_addr(conn)); if (index < conn->c_pagedresults.prl_maxlen) { count = conn->c_pagedresults.prl_list[index].pr_search_result_count; } - pthread_mutex_unlock(&(conn->c_mutex)); + pthread_mutex_unlock(pageresult_lock_get_addr(conn)); } slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_get_search_result_count", "<= %d\n", count); @@ -481,11 +509,11 @@ pagedresults_set_search_result_count(Connection *conn, Operation *op, int count, slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_set_search_result_count", "=> idx=%d\n", index); if (conn && (index > -1)) { - pthread_mutex_lock(&(conn->c_mutex)); + pthread_mutex_lock(pageresult_lock_get_addr(conn)); if (index < conn->c_pagedresults.prl_maxlen) { conn->c_pagedresults.prl_list[index].pr_search_result_count = count; } - pthread_mutex_unlock(&(conn->c_mutex)); + pthread_mutex_unlock(pageresult_lock_get_addr(conn)); rc = 0; } slapi_log_err(SLAPI_LOG_TRACE, @@ -506,11 +534,11 @@ pagedresults_get_search_result_set_size_estimate(Connection *conn, "pagedresults_get_search_result_set_size_estimate", "=> idx=%d\n", index); if (conn && (index > -1)) { - pthread_mutex_lock(&(conn->c_mutex)); + pthread_mutex_lock(pageresult_lock_get_addr(conn)); if (index < conn->c_pagedresults.prl_maxlen) { count = conn->c_pagedresults.prl_list[index].pr_search_result_set_size_estimate; } - pthread_mutex_unlock(&(conn->c_mutex)); + pthread_mutex_unlock(pageresult_lock_get_addr(conn)); } slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_get_search_result_set_size_estimate", "<= %d\n", @@ -532,11 +560,11 @@ pagedresults_set_search_result_set_size_estimate(Connection *conn, "pagedresults_set_search_result_set_size_estimate", "=> idx=%d\n", index); if (conn && (index > -1)) { - pthread_mutex_lock(&(conn->c_mutex)); + pthread_mutex_lock(pageresult_lock_get_addr(conn)); if (index < conn->c_pagedresults.prl_maxlen) { conn->c_pagedresults.prl_list[index].pr_search_result_set_size_estimate = count; } - pthread_mutex_unlock(&(conn->c_mutex)); + pthread_mutex_unlock(pageresult_lock_get_addr(conn)); rc = 0; } slapi_log_err(SLAPI_LOG_TRACE, @@ -555,11 +583,11 @@ pagedresults_get_with_sort(Connection *conn, Operation *op, int index) slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_get_with_sort", "=> idx=%d\n", index); if (conn && (index > -1)) { - pthread_mutex_lock(&(conn->c_mutex)); + pthread_mutex_lock(pageresult_lock_get_addr(conn)); if (index < conn->c_pagedresults.prl_maxlen) { flags = conn->c_pagedresults.prl_list[index].pr_flags & CONN_FLAG_PAGEDRESULTS_WITH_SORT; } - pthread_mutex_unlock(&(conn->c_mutex)); + pthread_mutex_unlock(pageresult_lock_get_addr(conn)); } slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_get_with_sort", "<= %d\n", flags); @@ -576,14 +604,14 @@ pagedresults_set_with_sort(Connection *conn, Operation *op, int flags, int index slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_set_with_sort", "=> idx=%d\n", index); if (conn && (index > -1)) { - pthread_mutex_lock(&(conn->c_mutex)); + pthread_mutex_lock(pageresult_lock_get_addr(conn)); if (index < conn->c_pagedresults.prl_maxlen) { if (flags & OP_FLAG_SERVER_SIDE_SORTING) { conn->c_pagedresults.prl_list[index].pr_flags |= CONN_FLAG_PAGEDRESULTS_WITH_SORT; } } - pthread_mutex_unlock(&(conn->c_mutex)); + pthread_mutex_unlock(pageresult_lock_get_addr(conn)); rc = 0; } slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_set_with_sort", "<= %d\n", rc); @@ -600,11 +628,11 @@ pagedresults_get_unindexed(Connection *conn, Operation *op, int index) slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_get_unindexed", "=> idx=%d\n", index); if (conn && (index > -1)) { - pthread_mutex_lock(&(conn->c_mutex)); + pthread_mutex_lock(pageresult_lock_get_addr(conn)); if (index < conn->c_pagedresults.prl_maxlen) { flags = conn->c_pagedresults.prl_list[index].pr_flags & CONN_FLAG_PAGEDRESULTS_UNINDEXED; } - pthread_mutex_unlock(&(conn->c_mutex)); + pthread_mutex_unlock(pageresult_lock_get_addr(conn)); } slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_get_unindexed", "<= %d\n", flags); @@ -621,12 +649,12 @@ pagedresults_set_unindexed(Connection *conn, Operation *op, int index) slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_set_unindexed", "=> idx=%d\n", index); if (conn && (index > -1)) { - pthread_mutex_lock(&(conn->c_mutex)); + pthread_mutex_lock(pageresult_lock_get_addr(conn)); if (index < conn->c_pagedresults.prl_maxlen) { conn->c_pagedresults.prl_list[index].pr_flags |= CONN_FLAG_PAGEDRESULTS_UNINDEXED; } - pthread_mutex_unlock(&(conn->c_mutex)); + pthread_mutex_unlock(pageresult_lock_get_addr(conn)); rc = 0; } slapi_log_err(SLAPI_LOG_TRACE, @@ -644,11 +672,11 @@ pagedresults_get_sort_result_code(Connection *conn, Operation *op, int index) slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_get_sort_result_code", "=> idx=%d\n", index); if (conn && (index > -1)) { - pthread_mutex_lock(&(conn->c_mutex)); + pthread_mutex_lock(pageresult_lock_get_addr(conn)); if (index < conn->c_pagedresults.prl_maxlen) { code = conn->c_pagedresults.prl_list[index].pr_sort_result_code; } - pthread_mutex_unlock(&(conn->c_mutex)); + pthread_mutex_unlock(pageresult_lock_get_addr(conn)); } slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_get_sort_result_code", "<= %d\n", code); @@ -665,11 +693,11 @@ pagedresults_set_sort_result_code(Connection *conn, Operation *op, int code, int slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_set_sort_result_code", "=> idx=%d\n", index); if (conn && (index > -1)) { - pthread_mutex_lock(&(conn->c_mutex)); + pthread_mutex_lock(pageresult_lock_get_addr(conn)); if (index < conn->c_pagedresults.prl_maxlen) { conn->c_pagedresults.prl_list[index].pr_sort_result_code = code; } - pthread_mutex_unlock(&(conn->c_mutex)); + pthread_mutex_unlock(pageresult_lock_get_addr(conn)); rc = 0; } slapi_log_err(SLAPI_LOG_TRACE, @@ -687,11 +715,11 @@ pagedresults_set_timelimit(Connection *conn, Operation *op, time_t timelimit, in slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_set_timelimit", "=> idx=%d\n", index); if (conn && (index > -1)) { - pthread_mutex_lock(&(conn->c_mutex)); + pthread_mutex_lock(pageresult_lock_get_addr(conn)); if (index < conn->c_pagedresults.prl_maxlen) { slapi_timespec_expire_at(timelimit, &(conn->c_pagedresults.prl_list[index].pr_timelimit_hr)); } - pthread_mutex_unlock(&(conn->c_mutex)); + pthread_mutex_unlock(pageresult_lock_get_addr(conn)); rc = 0; } slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_set_timelimit", "<= %d\n", rc); @@ -746,7 +774,7 @@ pagedresults_cleanup(Connection *conn, int needlock) } if (needlock) { - pthread_mutex_lock(&(conn->c_mutex)); + pthread_mutex_lock(pageresult_lock_get_addr(conn)); } for (i = 0; conn->c_pagedresults.prl_list && i < conn->c_pagedresults.prl_maxlen; @@ -765,7 +793,7 @@ pagedresults_cleanup(Connection *conn, int needlock) } conn->c_pagedresults.prl_count = 0; if (needlock) { - pthread_mutex_unlock(&(conn->c_mutex)); + pthread_mutex_unlock(pageresult_lock_get_addr(conn)); } /* slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_cleanup", "<= %d\n", rc); */ return rc; @@ -789,7 +817,7 @@ pagedresults_cleanup_all(Connection *conn, int needlock) } if (needlock) { - pthread_mutex_lock(&(conn->c_mutex)); + pthread_mutex_lock(pageresult_lock_get_addr(conn)); } for (i = 0; conn->c_pagedresults.prl_list && i < conn->c_pagedresults.prl_maxlen; @@ -809,7 +837,7 @@ pagedresults_cleanup_all(Connection *conn, int needlock) conn->c_pagedresults.prl_maxlen = 0; conn->c_pagedresults.prl_count = 0; if (needlock) { - pthread_mutex_unlock(&(conn->c_mutex)); + pthread_mutex_unlock(pageresult_lock_get_addr(conn)); } return rc; } @@ -827,7 +855,7 @@ pagedresults_check_or_set_processing(Connection *conn, int index) slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_check_or_set_processing", "=>\n", index); if (conn && (index > -1)) { - pthread_mutex_lock(&(conn->c_mutex)); + pthread_mutex_lock(pageresult_lock_get_addr(conn)); if (index < conn->c_pagedresults.prl_maxlen) { ret = (conn->c_pagedresults.prl_list[index].pr_flags & CONN_FLAG_PAGEDRESULTS_PROCESSING); @@ -835,7 +863,7 @@ pagedresults_check_or_set_processing(Connection *conn, int index) conn->c_pagedresults.prl_list[index].pr_flags |= CONN_FLAG_PAGEDRESULTS_PROCESSING; } - pthread_mutex_unlock(&(conn->c_mutex)); + pthread_mutex_unlock(pageresult_lock_get_addr(conn)); } slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_check_or_set_processing", "<= %d\n", ret); @@ -854,7 +882,7 @@ pagedresults_reset_processing(Connection *conn, int index) slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_reset_processing", "=> idx=%d\n", index); if (conn && (index > -1)) { - pthread_mutex_lock(&(conn->c_mutex)); + pthread_mutex_lock(pageresult_lock_get_addr(conn)); if (index < conn->c_pagedresults.prl_maxlen) { ret = (conn->c_pagedresults.prl_list[index].pr_flags & CONN_FLAG_PAGEDRESULTS_PROCESSING); @@ -862,7 +890,7 @@ pagedresults_reset_processing(Connection *conn, int index) conn->c_pagedresults.prl_list[index].pr_flags &= ~CONN_FLAG_PAGEDRESULTS_PROCESSING; } - pthread_mutex_unlock(&(conn->c_mutex)); + pthread_mutex_unlock(pageresult_lock_get_addr(conn)); } slapi_log_err(SLAPI_LOG_TRACE, "pagedresults_reset_processing", "<= %d\n", ret); @@ -881,7 +909,7 @@ pagedresults_reset_processing(Connection *conn, int index) * Do not return timed out here. But let the next request take care the * timedout slot(s). * - * must be called within conn->c_mutex + * must be called within pageresult_lock_get_addr(conn) */ int pagedresults_is_timedout_nolock(Connection *conn) @@ -908,7 +936,7 @@ pagedresults_is_timedout_nolock(Connection *conn) /* * reset all timeout - * must be called within conn->c_mutex + * must be called within pageresult_lock_get_addr(conn) */ int pagedresults_reset_timedout_nolock(Connection *conn) @@ -973,9 +1001,9 @@ pagedresults_lock(Connection *conn, int index) if (!conn || (index < 0) || (index >= conn->c_pagedresults.prl_maxlen)) { return; } - pthread_mutex_lock(&(conn->c_mutex)); + pthread_mutex_lock(pageresult_lock_get_addr(conn)); prp = conn->c_pagedresults.prl_list + index; - pthread_mutex_unlock(&(conn->c_mutex)); + pthread_mutex_unlock(pageresult_lock_get_addr(conn)); if (prp->pr_mutex) { PR_Lock(prp->pr_mutex); } @@ -989,9 +1017,9 @@ pagedresults_unlock(Connection *conn, int index) if (!conn || (index < 0) || (index >= conn->c_pagedresults.prl_maxlen)) { return; } - pthread_mutex_lock(&(conn->c_mutex)); + pthread_mutex_lock(pageresult_lock_get_addr(conn)); prp = conn->c_pagedresults.prl_list + index; - pthread_mutex_unlock(&(conn->c_mutex)); + pthread_mutex_unlock(pageresult_lock_get_addr(conn)); if (prp->pr_mutex) { PR_Unlock(prp->pr_mutex); } @@ -1006,11 +1034,11 @@ pagedresults_is_abandoned_or_notavailable(Connection *conn, int locked, int inde return 1; /* not abandoned, but do not want to proceed paged results op. */ } if (!locked) { - pthread_mutex_lock(&(conn->c_mutex)); + pthread_mutex_lock(pageresult_lock_get_addr(conn)); } prp = conn->c_pagedresults.prl_list + index; if (!locked) { - pthread_mutex_unlock(&(conn->c_mutex)); + pthread_mutex_unlock(pageresult_lock_get_addr(conn)); } return prp->pr_flags & CONN_FLAG_PAGEDRESULTS_ABANDONED; } @@ -1035,13 +1063,13 @@ pagedresults_set_search_result_pb(Slapi_PBlock *pb, void *sr, int locked) "pagedresults_set_search_result_pb", "=> idx=%d, sr=%p\n", index, sr); if (conn && (index > -1)) { if (!locked) - pthread_mutex_lock(&(conn->c_mutex)); + pthread_mutex_lock(pageresult_lock_get_addr(conn)); if (index < conn->c_pagedresults.prl_maxlen) { conn->c_pagedresults.prl_list[index].pr_search_result_set = sr; rc = 0; } if (!locked) { - pthread_mutex_unlock(&(conn->c_mutex)); + pthread_mutex_unlock(pageresult_lock_get_addr(conn)); } } slapi_log_err(SLAPI_LOG_TRACE, diff --git a/ldap/servers/slapd/proto-slap.h b/ldap/servers/slapd/proto-slap.h index b3c1afdf4..a8540ae63 100644 --- a/ldap/servers/slapd/proto-slap.h +++ b/ldap/servers/slapd/proto-slap.h @@ -1588,6 +1588,9 @@ int slapd_do_all_nss_ssl_init(int slapd_exemode, int importexport_encrypt, int s /* * pagedresults.c */ +void pageresult_lock_init(); +void pageresult_lock_cleanup(); +pthread_mutex_t *pageresult_lock_get_addr(Connection *conn); int pagedresults_parse_control_value(Slapi_PBlock *pb, struct berval *psbvp, ber_int_t *pagesize, int *index, Slapi_Backend *be); void pagedresults_set_response_control(Slapi_PBlock *pb, int iscritical, ber_int_t estimate, int curr_search_count, int index); Slapi_Backend *pagedresults_get_current_be(Connection *conn, int index);
0