commit_id
string
repo
string
commit_message
string
diff
string
label
int64
13cf56fca1e260fa11ec43e402d0d51ab654ff0b
389ds/389-ds-base
add convenience agreement_dn method to get a single replication agreement dn Reviewed by: tbordaz (Thanks!)
commit 13cf56fca1e260fa11ec43e402d0d51ab654ff0b Author: Rich Megginson <[email protected]> Date: Wed Nov 20 19:19:41 2013 -0700 add convenience agreement_dn method to get a single replication agreement dn Reviewed by: tbordaz (Thanks!) diff --git a/src/lib389/lib389/brooker.py b/src/lib389/lib389/brooker.py index e067fba07..58d4ffe89 100644 --- a/src/lib389/lib389/brooker.py +++ b/src/lib389/lib389/brooker.py @@ -763,6 +763,23 @@ class Replica(object): return [ent.dn for ent in ents] return ents + def agreement_dn(self, basedn, oth): + '''get the replication agreement to the dirsrv object + specified by oth for the suffix basedn + this returns the replication agreement handle in a form suitable + for passing to agreement_changes, start_async, etc.''' + filt = '(nsds5replicahost=%s)(nsds5replicaport=%d)(nsds5replicaroot=%s)' % \ + (oth.host, oth.port, normalizeDN(basedn)) + dns = self.agreements(filt, None, True) + if not dns: + self.log.error('no agreement found for host %s port %d suffix %s', oth.host, oth.port, basedn) + return None + elif len(dns) > 1: + self.log.error('%d agreements found for host %s port %d suffix %s', len(dns), oth.host, oth.port, basedn) + return None + else: + return dns[0] +
0
68ab6a809a2a46fbdb6fb880b1e79f99b09457b1
389ds/389-ds-base
Issue 51076 - remove unnecessary slapi entry dups Description: So the problem is that slapi_search_internal_get_entry() duplicates the entry twice. It does that as a convenience where it will allocate a pblock, do the search, copy the entry, free search results from the pblock, and then free the pblock itself. I basically split this function into two functions. One function allocates the pblock, does the search and returns the entry. The other function frees the entries and pblock. 99% of time when we call slapi_search_internal_get_entry() we are just reading it and freeing it. It's not being consumed. In these cases we can use the two function approach eliminates an extra slapi_entry_dup(). Over the time of an operation/connection we can save quite a bit of mallocing/freeing. This could also help with memory fragmentation. ASAN: passed relates: https://pagure.io/389-ds-base/issue/51076 Reviewed by: firstyear & tbordaz(Thanks!)
commit 68ab6a809a2a46fbdb6fb880b1e79f99b09457b1 Author: Mark Reynolds <[email protected]> Date: Tue May 12 13:48:30 2020 -0400 Issue 51076 - remove unnecessary slapi entry dups Description: So the problem is that slapi_search_internal_get_entry() duplicates the entry twice. It does that as a convenience where it will allocate a pblock, do the search, copy the entry, free search results from the pblock, and then free the pblock itself. I basically split this function into two functions. One function allocates the pblock, does the search and returns the entry. The other function frees the entries and pblock. 99% of time when we call slapi_search_internal_get_entry() we are just reading it and freeing it. It's not being consumed. In these cases we can use the two function approach eliminates an extra slapi_entry_dup(). Over the time of an operation/connection we can save quite a bit of mallocing/freeing. This could also help with memory fragmentation. ASAN: passed relates: https://pagure.io/389-ds-base/issue/51076 Reviewed by: firstyear & tbordaz(Thanks!) diff --git a/ldap/servers/plugins/acctpolicy/acct_config.c b/ldap/servers/plugins/acctpolicy/acct_config.c index fe35ba5a0..01e4f319f 100644 --- a/ldap/servers/plugins/acctpolicy/acct_config.c +++ b/ldap/servers/plugins/acctpolicy/acct_config.c @@ -37,6 +37,7 @@ static int acct_policy_entry2config(Slapi_Entry *e, int acct_policy_load_config_startup(Slapi_PBlock *pb __attribute__((unused)), void *plugin_id) { + Slapi_PBlock *entry_pb = NULL; acctPluginCfg *newcfg; Slapi_Entry *config_entry = NULL; Slapi_DN *config_sdn = NULL; @@ -44,8 +45,7 @@ acct_policy_load_config_startup(Slapi_PBlock *pb __attribute__((unused)), void * /* Retrieve the config entry */ config_sdn = slapi_sdn_new_normdn_byref(PLUGIN_CONFIG_DN); - rc = slapi_search_internal_get_entry(config_sdn, NULL, &config_entry, - plugin_id); + rc = slapi_search_get_entry(&entry_pb, config_sdn, NULL, &config_entry, plugin_id); slapi_sdn_free(&config_sdn); if (rc != LDAP_SUCCESS || config_entry == NULL) { @@ -60,7 +60,7 @@ acct_policy_load_config_startup(Slapi_PBlock *pb __attribute__((unused)), void * rc = acct_policy_entry2config(config_entry, newcfg); config_unlock(); - slapi_entry_free(config_entry); + slapi_search_get_entry_done(&entry_pb); return (rc); } diff --git a/ldap/servers/plugins/acctpolicy/acct_plugin.c b/ldap/servers/plugins/acctpolicy/acct_plugin.c index 2a876ad72..c3c32b074 100644 --- a/ldap/servers/plugins/acctpolicy/acct_plugin.c +++ b/ldap/servers/plugins/acctpolicy/acct_plugin.c @@ -209,6 +209,7 @@ done: int acct_bind_preop(Slapi_PBlock *pb) { + Slapi_PBlock *entry_pb = NULL; const char *dn = NULL; Slapi_DN *sdn = NULL; Slapi_Entry *target_entry = NULL; @@ -236,8 +237,7 @@ acct_bind_preop(Slapi_PBlock *pb) goto done; } - ldrc = slapi_search_internal_get_entry(sdn, NULL, &target_entry, - plugin_id); + ldrc = slapi_search_get_entry(&entry_pb, sdn, NULL, &target_entry, plugin_id); /* There was a problem retrieving the entry */ if (ldrc != LDAP_SUCCESS) { @@ -275,7 +275,7 @@ done: slapi_send_ldap_result(pb, LDAP_UNWILLING_TO_PERFORM, NULL, NULL, 0, NULL); } - slapi_entry_free(target_entry); + slapi_search_get_entry_done(&entry_pb); free_acctpolicy(&policy); @@ -293,6 +293,7 @@ done: int acct_bind_postop(Slapi_PBlock *pb) { + Slapi_PBlock *entry_pb = NULL; char *dn = NULL; int ldrc, tracklogin = 0; int rc = 0; /* Optimistic default */ @@ -327,8 +328,7 @@ acct_bind_postop(Slapi_PBlock *pb) covered by an account policy to decide whether we should track */ if (tracklogin == 0) { sdn = slapi_sdn_new_normdn_byref(dn); - ldrc = slapi_search_internal_get_entry(sdn, NULL, &target_entry, - plugin_id); + ldrc = slapi_search_get_entry(&entry_pb, sdn, NULL, &target_entry, plugin_id); if (ldrc != LDAP_SUCCESS) { slapi_log_err(SLAPI_LOG_ERR, POST_PLUGIN_NAME, @@ -355,7 +355,7 @@ done: slapi_send_ldap_result(pb, LDAP_UNWILLING_TO_PERFORM, NULL, NULL, 0, NULL); } - slapi_entry_free(target_entry); + slapi_search_get_entry_done(&entry_pb); slapi_sdn_free(&sdn); @@ -370,11 +370,11 @@ done: static int acct_pre_op(Slapi_PBlock *pb, int modop) { + Slapi_PBlock *entry_pb = NULL; Slapi_DN *sdn = 0; Slapi_Entry *e = 0; Slapi_Mods *smods = 0; LDAPMod **mods; - int free_entry = 0; char *errstr = NULL; int ret = SLAPI_PLUGIN_SUCCESS; @@ -384,28 +384,25 @@ acct_pre_op(Slapi_PBlock *pb, int modop) if (acct_policy_dn_is_config(sdn)) { /* Validate config changes, but don't apply them. - * This allows us to reject invalid config changes - * here at the pre-op stage. Applying the config - * needs to be done at the post-op stage. */ + * This allows us to reject invalid config changes + * here at the pre-op stage. Applying the config + * needs to be done at the post-op stage. */ if (LDAP_CHANGETYPE_ADD == modop) { slapi_pblock_get(pb, SLAPI_ADD_ENTRY, &e); - /* If the entry doesn't exist, just bail and - * let the server handle it. */ + /* If the entry doesn't exist, just bail and let the server handle it. */ if (e == NULL) { goto bail; } } else if (LDAP_CHANGETYPE_MODIFY == modop) { /* Fetch the entry being modified so we can - * create the resulting entry for validation. */ + * create the resulting entry for validation. */ if (sdn) { - slapi_search_internal_get_entry(sdn, 0, &e, get_identity()); - free_entry = 1; + slapi_search_get_entry(&entry_pb, sdn, 0, &e, get_identity()); } - /* If the entry doesn't exist, just bail and - * let the server handle it. */ + /* If the entry doesn't exist, just bail and let the server handle it. */ if (e == NULL) { goto bail; } @@ -418,7 +415,7 @@ acct_pre_op(Slapi_PBlock *pb, int modop) /* Apply the mods to create the resulting entry. */ if (mods && (slapi_entry_apply_mods(e, mods) != LDAP_SUCCESS)) { /* The mods don't apply cleanly, so we just let this op go - * to let the main server handle it. */ + * to let the main server handle it. */ goto bailmod; } } else if (modop == LDAP_CHANGETYPE_DELETE) { @@ -439,8 +436,7 @@ bailmod: } bail: - if (free_entry && e) - slapi_entry_free(e); + slapi_search_get_entry_done(&entry_pb); if (ret) { slapi_log_err(SLAPI_LOG_PLUGIN, PRE_PLUGIN_NAME, diff --git a/ldap/servers/plugins/acctpolicy/acct_util.c b/ldap/servers/plugins/acctpolicy/acct_util.c index f25a3202d..f432092fe 100644 --- a/ldap/servers/plugins/acctpolicy/acct_util.c +++ b/ldap/servers/plugins/acctpolicy/acct_util.c @@ -85,6 +85,7 @@ get_attr_string_val(Slapi_Entry *target_entry, char *attr_name) int get_acctpolicy(Slapi_PBlock *pb __attribute__((unused)), Slapi_Entry *target_entry, void *plugin_id, acctPolicy **policy) { + Slapi_PBlock *entry_pb = NULL; Slapi_DN *sdn = NULL; Slapi_Entry *policy_entry = NULL; Slapi_Attr *attr; @@ -123,8 +124,7 @@ get_acctpolicy(Slapi_PBlock *pb __attribute__((unused)), Slapi_Entry *target_ent } sdn = slapi_sdn_new_dn_byref(policy_dn); - ldrc = slapi_search_internal_get_entry(sdn, NULL, &policy_entry, - plugin_id); + ldrc = slapi_search_get_entry(&entry_pb, sdn, NULL, &policy_entry, plugin_id); slapi_sdn_free(&sdn); /* There should be a policy but it can't be retrieved; fatal error */ @@ -160,7 +160,7 @@ dopolicy: done: config_unlock(); slapi_ch_free_string(&policy_dn); - slapi_entry_free(policy_entry); + slapi_search_get_entry_done(&entry_pb); return (rc); } diff --git a/ldap/servers/plugins/automember/automember.c b/ldap/servers/plugins/automember/automember.c index 7c875c852..39350ad53 100644 --- a/ldap/servers/plugins/automember/automember.c +++ b/ldap/servers/plugins/automember/automember.c @@ -1629,13 +1629,12 @@ automember_update_member_value(Slapi_Entry *member_e, const char *group_dn, char char *member_value = NULL; int rc = 0; Slapi_DN *group_sdn; - Slapi_Entry *group_entry = NULL; /* First thing check that the group still exists */ group_sdn = slapi_sdn_new_dn_byval(group_dn); - rc = slapi_search_internal_get_entry(group_sdn, NULL, &group_entry, automember_get_plugin_id()); + rc = slapi_search_internal_get_entry(group_sdn, NULL, NULL, automember_get_plugin_id()); slapi_sdn_free(&group_sdn); - if (rc != LDAP_SUCCESS || group_entry == NULL) { + if (rc != LDAP_SUCCESS) { if (rc == LDAP_NO_SUCH_OBJECT) { /* the automember group (default or target) does not exist, just skip this definition */ slapi_log_err(SLAPI_LOG_INFO, AUTOMEMBER_PLUGIN_SUBSYSTEM, @@ -1647,10 +1646,8 @@ automember_update_member_value(Slapi_Entry *member_e, const char *group_dn, char "automember_update_member_value - group (default or target) can not be retrieved (%s) err=%d\n", group_dn, rc); } - slapi_entry_free(group_entry); return rc; } - slapi_entry_free(group_entry); /* If grouping_value is dn, we need to fetch the dn instead. */ if (slapi_attr_type_cmp(grouping_value, "dn", SLAPI_TYPE_CMP_EXACT) == 0) { @@ -1752,11 +1749,11 @@ out: static int automember_pre_op(Slapi_PBlock *pb, int modop) { + Slapi_PBlock *entry_pb = NULL; Slapi_DN *sdn = 0; Slapi_Entry *e = 0; Slapi_Mods *smods = 0; LDAPMod **mods; - int free_entry = 0; char *errstr = NULL; int ret = SLAPI_PLUGIN_SUCCESS; @@ -1784,8 +1781,7 @@ automember_pre_op(Slapi_PBlock *pb, int modop) /* Fetch the entry being modified so we can * create the resulting entry for validation. */ if (sdn) { - slapi_search_internal_get_entry(sdn, 0, &e, automember_get_plugin_id()); - free_entry = 1; + slapi_search_get_entry(&entry_pb, sdn, 0, &e, automember_get_plugin_id()); } /* If the entry doesn't exist, just bail and @@ -1799,7 +1795,7 @@ automember_pre_op(Slapi_PBlock *pb, int modop) smods = slapi_mods_new(); slapi_mods_init_byref(smods, mods); - /* Apply the mods to create the resulting entry. */ + /* Apply the mods to create the resulting entry. */ if (mods && (slapi_entry_apply_mods(e, mods) != LDAP_SUCCESS)) { /* The mods don't apply cleanly, so we just let this op go * to let the main server handle it. */ @@ -1831,8 +1827,7 @@ bailmod: } bail: - if (free_entry && e) - slapi_entry_free(e); + slapi_search_get_entry_done(&entry_pb); if (ret) { slapi_log_err(SLAPI_LOG_PLUGIN, AUTOMEMBER_PLUGIN_SUBSYSTEM, diff --git a/ldap/servers/plugins/dna/dna.c b/ldap/servers/plugins/dna/dna.c index 1ee271359..16c625bb0 100644 --- a/ldap/servers/plugins/dna/dna.c +++ b/ldap/servers/plugins/dna/dna.c @@ -1178,7 +1178,6 @@ dna_parse_config_entry(Slapi_PBlock *pb, Slapi_Entry *e, int apply) value = slapi_entry_attr_get_charptr(e, DNA_SHARED_CFG_DN); if (value) { - Slapi_Entry *shared_e = NULL; Slapi_DN *sdn = NULL; char *normdn = NULL; char *attrs[2]; @@ -1197,10 +1196,8 @@ dna_parse_config_entry(Slapi_PBlock *pb, Slapi_Entry *e, int apply) /* We don't need attributes */ attrs[0] = "cn"; attrs[1] = NULL; - slapi_search_internal_get_entry(sdn, attrs, &shared_e, getPluginID()); - /* Make sure that the shared config entry exists. */ - if (!shared_e) { + if(slapi_search_internal_get_entry(sdn, attrs, NULL, getPluginID()) != LDAP_SUCCESS) { /* We didn't locate the shared config container entry. Log * a message and skip this config entry. */ slapi_log_err(SLAPI_LOG_ERR, DNA_PLUGIN_SUBSYSTEM, @@ -1210,9 +1207,6 @@ dna_parse_config_entry(Slapi_PBlock *pb, Slapi_Entry *e, int apply) ret = DNA_FAILURE; slapi_sdn_free(&sdn); goto bail; - } else { - slapi_entry_free(shared_e); - shared_e = NULL; } normdn = (char *)slapi_sdn_get_dn(sdn); @@ -1539,6 +1533,7 @@ dna_delete_shared_servers(PRCList **servers) static int dna_load_host_port(void) { + Slapi_PBlock *pb = NULL; int status = DNA_SUCCESS; Slapi_Entry *e = NULL; Slapi_DN *config_dn = NULL; @@ -1554,7 +1549,7 @@ dna_load_host_port(void) config_dn = slapi_sdn_new_ndn_byref("cn=config"); if (config_dn) { - slapi_search_internal_get_entry(config_dn, attrs, &e, getPluginID()); + slapi_search_get_entry(&pb, config_dn, attrs, &e, getPluginID()); slapi_sdn_free(&config_dn); } @@ -1562,8 +1557,8 @@ dna_load_host_port(void) hostname = slapi_entry_attr_get_charptr(e, "nsslapd-localhost"); portnum = slapi_entry_attr_get_charptr(e, "nsslapd-port"); secureportnum = slapi_entry_attr_get_charptr(e, "nsslapd-secureport"); - slapi_entry_free(e); } + slapi_search_get_entry_done(&pb); if (!hostname || !portnum) { status = DNA_FAILURE; @@ -2876,6 +2871,7 @@ bail: static int dna_is_replica_bind_dn(char *range_dn, char *bind_dn) { + Slapi_PBlock *entry_pb = NULL; char *replica_dn = NULL; Slapi_DN *replica_sdn = NULL; Slapi_DN *range_sdn = NULL; @@ -2912,8 +2908,7 @@ dna_is_replica_bind_dn(char *range_dn, char *bind_dn) attrs[2] = 0; /* Find cn=replica entry via search */ - slapi_search_internal_get_entry(replica_sdn, attrs, &e, getPluginID()); - + slapi_search_get_entry(&entry_pb, replica_sdn, attrs, &e, getPluginID()); if (e) { /* Check if the passed in bind dn matches any of the replica bind dns. */ Slapi_Value *bind_dn_sv = slapi_value_new_string(bind_dn); @@ -2927,6 +2922,7 @@ dna_is_replica_bind_dn(char *range_dn, char *bind_dn) attrs[0] = "member"; attrs[1] = "uniquemember"; attrs[2] = 0; + slapi_search_get_entry_done(&entry_pb); for (i = 0; bind_group_dn != NULL && bind_group_dn[i] != NULL; i++) { if (ret) { /* already found a member, just free group */ @@ -2934,14 +2930,14 @@ dna_is_replica_bind_dn(char *range_dn, char *bind_dn) continue; } bind_group_sdn = slapi_sdn_new_normdn_passin(bind_group_dn[i]); - slapi_search_internal_get_entry(bind_group_sdn, attrs, &bind_group_entry, getPluginID()); + slapi_search_get_entry(&entry_pb, bind_group_sdn, attrs, &bind_group_entry, getPluginID()); if (bind_group_entry) { ret = slapi_entry_attr_has_syntax_value(bind_group_entry, "member", bind_dn_sv); if (ret == 0) { ret = slapi_entry_attr_has_syntax_value(bind_group_entry, "uniquemember", bind_dn_sv); } } - slapi_entry_free(bind_group_entry); + slapi_search_get_entry_done(&entry_pb); slapi_sdn_free(&bind_group_sdn); } slapi_ch_free((void **)&bind_group_dn); @@ -2956,7 +2952,6 @@ dna_is_replica_bind_dn(char *range_dn, char *bind_dn) } done: - slapi_entry_free(e); slapi_sdn_free(&range_sdn); slapi_sdn_free(&replica_sdn); diff --git a/ldap/servers/plugins/memberof/memberof.c b/ldap/servers/plugins/memberof/memberof.c index 40bd4b380..e9e1ec4c7 100644 --- a/ldap/servers/plugins/memberof/memberof.c +++ b/ldap/servers/plugins/memberof/memberof.c @@ -884,7 +884,7 @@ memberof_postop_modrdn(Slapi_PBlock *pb) pre_sdn = slapi_entry_get_sdn(pre_e); post_sdn = slapi_entry_get_sdn(post_e); } - + if (pre_sdn && post_sdn && slapi_sdn_compare(pre_sdn, post_sdn) == 0) { /* Regarding memberof plugin, this rename is a no-op * but it can be expensive to process it. So skip it @@ -1466,6 +1466,7 @@ memberof_modop_one_r(Slapi_PBlock *pb, MemberOfConfig *config, int mod_op, Slapi int memberof_modop_one_replace_r(Slapi_PBlock *pb, MemberOfConfig *config, int mod_op, Slapi_DN *group_sdn, Slapi_DN *op_this_sdn, Slapi_DN *replace_with_sdn, Slapi_DN *op_to_sdn, memberofstringll *stack) { + Slapi_PBlock *entry_pb = NULL; int rc = 0; LDAPMod mod; LDAPMod replace_mod; @@ -1515,8 +1516,7 @@ memberof_modop_one_replace_r(Slapi_PBlock *pb, MemberOfConfig *config, int mod_o } /* determine if this is a group op or single entry */ - slapi_search_internal_get_entry(op_to_sdn, config->groupattrs, - &e, memberof_get_plugin_id()); + slapi_search_get_entry(&entry_pb, op_to_sdn, config->groupattrs, &e, memberof_get_plugin_id()); if (!e) { /* In the case of a delete, we need to worry about the * missing entry being a nested group. There's a small @@ -1751,7 +1751,7 @@ memberof_modop_one_replace_r(Slapi_PBlock *pb, MemberOfConfig *config, int mod_o bail: slapi_value_free(&to_dn_val); slapi_value_free(&this_dn_val); - slapi_entry_free(e); + slapi_search_get_entry_done(&entry_pb); return rc; } @@ -2368,6 +2368,7 @@ bail: int memberof_is_direct_member(MemberOfConfig *config, Slapi_Value *groupdn, Slapi_Value *memberdn) { + Slapi_PBlock *pb = NULL; int rc = 0; Slapi_DN *sdn = 0; Slapi_Entry *group_e = 0; @@ -2376,8 +2377,8 @@ memberof_is_direct_member(MemberOfConfig *config, Slapi_Value *groupdn, Slapi_Va sdn = slapi_sdn_new_normdn_byref(slapi_value_get_string(groupdn)); - slapi_search_internal_get_entry(sdn, config->groupattrs, - &group_e, memberof_get_plugin_id()); + slapi_search_get_entry(&pb, sdn, config->groupattrs, + &group_e, memberof_get_plugin_id()); if (group_e) { /* See if memberdn is referred to by any of the group attributes. */ @@ -2388,9 +2389,8 @@ memberof_is_direct_member(MemberOfConfig *config, Slapi_Value *groupdn, Slapi_Va break; } } - - slapi_entry_free(group_e); } + slapi_search_get_entry_done(&pb); slapi_sdn_free(&sdn); return rc; diff --git a/ldap/servers/plugins/pam_passthru/pam_ptconfig.c b/ldap/servers/plugins/pam_passthru/pam_ptconfig.c index 46a76d884..cbec2ec40 100644 --- a/ldap/servers/plugins/pam_passthru/pam_ptconfig.c +++ b/ldap/servers/plugins/pam_passthru/pam_ptconfig.c @@ -749,22 +749,22 @@ pam_passthru_get_config(Slapi_DN *bind_sdn) if (pam_passthru_check_suffix(cfg, bind_sdn) == LDAP_SUCCESS) { if (cfg->slapi_filter) { /* A filter is configured, so see if the bind entry is a match. */ + Slapi_PBlock *entry_pb = NULL; Slapi_Entry *test_e = NULL; /* Fetch the bind entry */ - slapi_search_internal_get_entry(bind_sdn, NULL, &test_e, - pam_passthruauth_get_plugin_identity()); + slapi_search_get_entry(&entry_pb, bind_sdn, NULL, &test_e, + pam_passthruauth_get_plugin_identity()); /* If the entry doesn't exist, just fall through to the main server code */ if (test_e) { /* Evaluate the filter. */ if (LDAP_SUCCESS == slapi_filter_test_simple(test_e, cfg->slapi_filter)) { /* This is a match. */ - slapi_entry_free(test_e); + slapi_search_get_entry_done(&entry_pb); goto done; } - - slapi_entry_free(test_e); + slapi_search_get_entry_done(&entry_pb); } } else { /* There is no filter to check, so this is a match. */ diff --git a/ldap/servers/plugins/pam_passthru/pam_ptimpl.c b/ldap/servers/plugins/pam_passthru/pam_ptimpl.c index 7f5fb02c4..5b43f8d1f 100644 --- a/ldap/servers/plugins/pam_passthru/pam_ptimpl.c +++ b/ldap/servers/plugins/pam_passthru/pam_ptimpl.c @@ -81,11 +81,12 @@ derive_from_bind_dn(Slapi_PBlock *pb __attribute__((unused)), const Slapi_DN *bi static char * derive_from_bind_entry(Slapi_PBlock *pb, const Slapi_DN *bindsdn, MyStrBuf *pam_id, char *map_ident_attr, int *locked) { + Slapi_PBlock *entry_pb = NULL; Slapi_Entry *entry = NULL; char *attrs[] = {NULL, NULL}; attrs[0] = map_ident_attr; - int rc = slapi_search_internal_get_entry((Slapi_DN *)bindsdn, attrs, &entry, - pam_passthruauth_get_plugin_identity()); + int32_t rc = slapi_search_get_entry(&entry_pb, (Slapi_DN *)bindsdn, attrs, &entry, + pam_passthruauth_get_plugin_identity()); if (rc != LDAP_SUCCESS) { slapi_log_err(SLAPI_LOG_ERR, PAM_PASSTHRU_PLUGIN_SUBSYSTEM, @@ -108,7 +109,7 @@ derive_from_bind_entry(Slapi_PBlock *pb, const Slapi_DN *bindsdn, MyStrBuf *pam_ init_my_str_buf(pam_id, val); } - slapi_entry_free(entry); + slapi_search_get_entry_done(&entry_pb); return pam_id->str; } diff --git a/ldap/servers/plugins/pam_passthru/pam_ptpreop.c b/ldap/servers/plugins/pam_passthru/pam_ptpreop.c index 3d0067531..5bca823ff 100644 --- a/ldap/servers/plugins/pam_passthru/pam_ptpreop.c +++ b/ldap/servers/plugins/pam_passthru/pam_ptpreop.c @@ -526,6 +526,7 @@ done: static int pam_passthru_preop(Slapi_PBlock *pb, int modtype) { + Slapi_PBlock *entry_pb = NULL; Slapi_DN *sdn = NULL; Slapi_Entry *e = NULL; LDAPMod **mods; @@ -555,8 +556,8 @@ pam_passthru_preop(Slapi_PBlock *pb, int modtype) case LDAP_CHANGETYPE_MODIFY: /* Fetch the entry being modified so we can * create the resulting entry for validation. */ - slapi_search_internal_get_entry(sdn, 0, &e, - pam_passthruauth_get_plugin_identity()); + slapi_search_get_entry(&entry_pb, sdn, 0, &e, + pam_passthruauth_get_plugin_identity()); /* If the entry doesn't exist, just bail and * let the server handle it. */ @@ -576,9 +577,6 @@ pam_passthru_preop(Slapi_PBlock *pb, int modtype) /* Don't bail here, as we need to free the entry. */ } } - - /* Free the entry. */ - slapi_entry_free(e); break; case LDAP_CHANGETYPE_DELETE: case LDAP_CHANGETYPE_MODDN: @@ -591,6 +589,7 @@ pam_passthru_preop(Slapi_PBlock *pb, int modtype) } bail: + slapi_search_get_entry_done(&entry_pb); /* If we are refusing the operation, return the result to the client. */ if (ret) { slapi_send_ldap_result(pb, ret, NULL, returntext, 0, NULL); diff --git a/ldap/servers/plugins/replication/repl5_tot_protocol.c b/ldap/servers/plugins/replication/repl5_tot_protocol.c index 3b65d6b20..a25839f21 100644 --- a/ldap/servers/plugins/replication/repl5_tot_protocol.c +++ b/ldap/servers/plugins/replication/repl5_tot_protocol.c @@ -469,7 +469,8 @@ retry: */ /* Get suffix */ Slapi_Entry *suffix = NULL; - rc = slapi_search_internal_get_entry(area_sdn, NULL, &suffix, repl_get_plugin_identity(PLUGIN_MULTIMASTER_REPLICATION)); + Slapi_PBlock *suffix_pb = NULL; + rc = slapi_search_get_entry(&suffix_pb, area_sdn, NULL, &suffix, repl_get_plugin_identity(PLUGIN_MULTIMASTER_REPLICATION)); if (rc) { slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name, "repl5_tot_run - Unable to " "get the suffix entry \"%s\".\n", @@ -517,7 +518,7 @@ retry: LDAP_SCOPE_SUBTREE, "(parentid>=1)", NULL, 0, ctrls, NULL, repl_get_plugin_identity(PLUGIN_MULTIMASTER_REPLICATION), OP_FLAG_BULK_IMPORT); cb_data.num_entries = 0UL; - slapi_entry_free(suffix); + slapi_search_get_entry_done(&suffix_pb); } else { /* Original total update */ /* we need to provide managedsait control so that referral entries can diff --git a/ldap/servers/plugins/uiduniq/uid.c b/ldap/servers/plugins/uiduniq/uid.c index d7ccf0e07..e69012204 100644 --- a/ldap/servers/plugins/uiduniq/uid.c +++ b/ldap/servers/plugins/uiduniq/uid.c @@ -1254,6 +1254,7 @@ preop_modify(Slapi_PBlock *pb) static int preop_modrdn(Slapi_PBlock *pb) { + Slapi_PBlock *entry_pb = NULL; int result = LDAP_SUCCESS; Slapi_Entry *e = NULL; Slapi_Value *sv_requiredObjectClass = NULL; @@ -1351,7 +1352,7 @@ preop_modrdn(Slapi_PBlock *pb) /* Get the entry that is being renamed so we can make a dummy copy * of what it will look like after the rename. */ - err = slapi_search_internal_get_entry(sdn, NULL, &e, plugin_identity); + err = slapi_search_get_entry(&entry_pb, sdn, NULL, &e, plugin_identity); if (err != LDAP_SUCCESS) { result = uid_op_error(35); /* We want to return a no such object error if the target doesn't exist. */ @@ -1371,24 +1372,24 @@ preop_modrdn(Slapi_PBlock *pb) /* - * Check if it has the required object class - */ + * Check if it has the required object class + */ if (requiredObjectClass && !slapi_entry_attr_has_syntax_value(e, SLAPI_ATTR_OBJECTCLASS, sv_requiredObjectClass)) { break; } /* - * Find any unique attribute data in the new RDN - */ + * Find any unique attribute data in the new RDN + */ for (i = 0; attrNames && attrNames[i]; i++) { err = slapi_entry_attr_find(e, attrNames[i], &attr); if (!err) { /* - * Passed all the requirements - this is an operation we - * need to enforce uniqueness on. Now find all parent entries - * with the marker object class, and do a search for each one. - */ + * Passed all the requirements - this is an operation we + * need to enforce uniqueness on. Now find all parent entries + * with the marker object class, and do a search for each one. + */ if (NULL != markerObjectClass) { /* Subtree defined by location of marker object class */ result = findSubtreeAndSearch(slapi_entry_get_sdn(e), attrNames, attr, NULL, @@ -1407,8 +1408,8 @@ preop_modrdn(Slapi_PBlock *pb) END /* Clean-up */ slapi_value_free(&sv_requiredObjectClass); - if (e) - slapi_entry_free(e); + + slapi_search_get_entry_done(&entry_pb); if (result) { slapi_log_err(SLAPI_LOG_PLUGIN, plugin_name, diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c index 8a24ccc74..be32961b1 100644 --- a/ldap/servers/slapd/daemon.c +++ b/ldap/servers/slapd/daemon.c @@ -1916,18 +1916,13 @@ slapd_bind_local_user(Connection *conn) char *root_dn = config_get_ldapi_root_dn(); if (root_dn) { + Slapi_PBlock *entry_pb = NULL; Slapi_DN *edn = slapi_sdn_new_dn_byref( slapi_dn_normalize(root_dn)); Slapi_Entry *e = 0; /* root might be locked too! :) */ - ret = slapi_search_internal_get_entry( - edn, 0, - &e, - (void *)plugin_get_default_component_id() - - ); - + ret = slapi_search_get_entry(&entry_pb, edn, 0, &e, (void *)plugin_get_default_component_id()); if (0 == ret && e) { ret = slapi_check_account_lock( 0, /* pb not req */ @@ -1955,7 +1950,7 @@ slapd_bind_local_user(Connection *conn) root_map_free: /* root_dn consumed by bind creds set */ slapi_sdn_free(&edn); - slapi_entry_free(e); + slapi_search_get_entry_done(&entry_pb); ret = 0; } } diff --git a/ldap/servers/slapd/modify.c b/ldap/servers/slapd/modify.c index bbc0ab71a..259bedfff 100644 --- a/ldap/servers/slapd/modify.c +++ b/ldap/servers/slapd/modify.c @@ -592,6 +592,7 @@ modify_internal_pb(Slapi_PBlock *pb) static void op_shared_modify(Slapi_PBlock *pb, int pw_change, char *old_pw) { + Slapi_PBlock *entry_pb = NULL; Slapi_Backend *be = NULL; Slapi_Entry *pse; Slapi_Entry *referral; @@ -723,7 +724,7 @@ op_shared_modify(Slapi_PBlock *pb, int pw_change, char *old_pw) * 2. If yes, then if the mods contain any passwdpolicy specific attributes. * 3. If yes, then it invokes corrosponding checking function. */ - if (!repl_op && !internal_op && normdn && (e = get_entry(pb, normdn))) { + if (!repl_op && !internal_op && normdn && slapi_search_get_entry(&entry_pb, sdn, NULL, &e, NULL) == LDAP_SUCCESS) { Slapi_Value target; slapi_value_init(&target); slapi_value_set_string(&target, "passwordpolicy"); @@ -1072,7 +1073,7 @@ free_and_return : { slapi_entry_free(epre); slapi_entry_free(epost); } - slapi_entry_free(e); + slapi_search_get_entry_done(&entry_pb); if (be) slapi_be_Unlock(be); @@ -1202,12 +1203,13 @@ op_shared_allow_pw_change(Slapi_PBlock *pb, LDAPMod *mod, char **old_pw, Slapi_M if (!internal_op) { /* slapi_acl_check_mods needs an array of LDAPMods, but * we're really only interested in the one password mod. */ + Slapi_PBlock *entry_pb = NULL; LDAPMod *mods[2]; mods[0] = mod; mods[1] = NULL; /* We need to actually fetch the target here to use for ACI checking. */ - slapi_search_internal_get_entry(&sdn, NULL, &e, (void *)plugin_get_default_component_id()); + slapi_search_get_entry(&entry_pb, &sdn, NULL, &e, NULL); /* Create a bogus entry with just the target dn if we were unable to * find the actual entry. This will only be used for checking the ACIs. */ @@ -1238,9 +1240,12 @@ op_shared_allow_pw_change(Slapi_PBlock *pb, LDAPMod *mod, char **old_pw, Slapi_M } send_ldap_result(pb, res, NULL, errtxt, 0, NULL); slapi_ch_free_string(&errtxt); + slapi_search_get_entry_done(&entry_pb); rc = -1; goto done; } + /* done with slapi entry e */ + slapi_search_get_entry_done(&entry_pb); /* * If this mod is being performed by a password administrator/rootDN, @@ -1353,7 +1358,6 @@ op_shared_allow_pw_change(Slapi_PBlock *pb, LDAPMod *mod, char **old_pw, Slapi_M valuearray_free(&values); done: - slapi_entry_free(e); slapi_sdn_done(&sdn); slapi_ch_free_string(&proxydn); slapi_ch_free_string(&proxystr); diff --git a/ldap/servers/slapd/plugin_internal_op.c b/ldap/servers/slapd/plugin_internal_op.c index 9da266b61..a140e7988 100644 --- a/ldap/servers/slapd/plugin_internal_op.c +++ b/ldap/servers/slapd/plugin_internal_op.c @@ -882,3 +882,51 @@ slapi_search_internal_get_entry(Slapi_DN *dn, char **attrs, Slapi_Entry **ret_en int_search_pb = NULL; return rc; } + +int32_t +slapi_search_get_entry(Slapi_PBlock **pb, Slapi_DN *dn, char **attrs, Slapi_Entry **ret_entry, void *component_identity) +{ + Slapi_Entry **entries = NULL; + int32_t rc = 0; + void *component = component_identity; + + if (ret_entry) { + *ret_entry = NULL; + } + + if (component == NULL) { + component = (void *)plugin_get_default_component_id(); + } + + if (*pb == NULL) { + *pb = slapi_pblock_new(); + } + slapi_search_internal_set_pb(*pb, slapi_sdn_get_dn(dn), LDAP_SCOPE_BASE, + "(|(objectclass=*)(objectclass=ldapsubentry))", + attrs, 0, NULL, NULL, component, 0 ); + slapi_search_internal_pb(*pb); + slapi_pblock_get(*pb, SLAPI_PLUGIN_INTOP_RESULT, &rc); + if (LDAP_SUCCESS == rc) { + slapi_pblock_get(*pb, SLAPI_PLUGIN_INTOP_SEARCH_ENTRIES, &entries); + if (NULL != entries && NULL != entries[0]) { + /* Only need to dup the entry if the caller passed ret_entry in. */ + if (ret_entry) { + *ret_entry = entries[0]; + } + } else { + rc = LDAP_NO_SUCH_OBJECT; + } + } + + return rc; +} + +void +slapi_search_get_entry_done(Slapi_PBlock **pb) +{ + if (pb && *pb) { + slapi_free_search_results_internal(*pb); + slapi_pblock_destroy(*pb); + *pb = NULL; + } +} diff --git a/ldap/servers/slapd/resourcelimit.c b/ldap/servers/slapd/resourcelimit.c index 705344c84..9c2619716 100644 --- a/ldap/servers/slapd/resourcelimit.c +++ b/ldap/servers/slapd/resourcelimit.c @@ -305,22 +305,17 @@ reslimit_get_ext(Slapi_Connection *conn, const char *logname, SLAPIResLimitConnD int reslimit_update_from_dn(Slapi_Connection *conn, Slapi_DN *dn) { - Slapi_Entry *e; + Slapi_PBlock *pb = NULL; + Slapi_Entry *e = NULL; int rc; - e = NULL; if (dn != NULL) { - char **attrs = reslimit_get_registered_attributes(); - (void)slapi_search_internal_get_entry(dn, attrs, &e, reslimit_componentid); + slapi_search_get_entry(&pb, dn, attrs, &e, reslimit_componentid); charray_free(attrs); } - rc = reslimit_update_from_entry(conn, e); - - if (NULL != e) { - slapi_entry_free(e); - } + slapi_search_get_entry_done(&pb); return (rc); } diff --git a/ldap/servers/slapd/schema.c b/ldap/servers/slapd/schema.c index d44b03b0e..bf7e59f75 100644 --- a/ldap/servers/slapd/schema.c +++ b/ldap/servers/slapd/schema.c @@ -341,6 +341,7 @@ schema_policy_add_action(Slapi_Entry *entry, char *attrName, schema_item_t **lis static void schema_load_repl_policy(const char *dn, repl_schema_policy_t *replica) { + Slapi_PBlock *pb = NULL; Slapi_DN sdn; Slapi_Entry *entry = NULL; schema_item_t *schema_item, *next; @@ -369,8 +370,7 @@ schema_load_repl_policy(const char *dn, repl_schema_policy_t *replica) /* Load the replication policy of the schema */ slapi_sdn_init_dn_byref(&sdn, dn); - if (slapi_search_internal_get_entry(&sdn, NULL, &entry, plugin_get_default_component_id()) == LDAP_SUCCESS) { - + if (slapi_search_get_entry(&pb, &sdn, NULL, &entry, plugin_get_default_component_id()) == LDAP_SUCCESS) { /* fill the policies (accept/reject) regarding objectclass */ schema_policy_add_action(entry, ATTR_SCHEMA_UPDATE_OBJECTCLASS_ACCEPT, &replica->objectclasses); schema_policy_add_action(entry, ATTR_SCHEMA_UPDATE_OBJECTCLASS_REJECT, &replica->objectclasses); @@ -378,9 +378,8 @@ schema_load_repl_policy(const char *dn, repl_schema_policy_t *replica) /* fill the policies (accept/reject) regarding attribute */ schema_policy_add_action(entry, ATTR_SCHEMA_UPDATE_ATTRIBUTE_ACCEPT, &replica->attributes); schema_policy_add_action(entry, ATTR_SCHEMA_UPDATE_ATTRIBUTE_REJECT, &replica->attributes); - - slapi_entry_free(entry); } + slapi_search_get_entry_done(&pb); slapi_sdn_done(&sdn); } diff --git a/ldap/servers/slapd/slapi-plugin.h b/ldap/servers/slapd/slapi-plugin.h index 9889100c1..e41b6a8a5 100644 --- a/ldap/servers/slapd/slapi-plugin.h +++ b/ldap/servers/slapd/slapi-plugin.h @@ -5972,7 +5972,7 @@ void slapi_seq_internal_set_pb(Slapi_PBlock *pb, char *ibase, int type, char *at /* * slapi_search_internal_get_entry() finds an entry given a dn. It returns - * an LDAP error code (LDAP_SUCCESS if all goes well). + * an LDAP error code (LDAP_SUCCESS if all goes well). Caller must free ret_entry */ int slapi_search_internal_get_entry(Slapi_DN *dn, char **attrlist, Slapi_Entry **ret_entry, void *caller_identity); @@ -8304,6 +8304,27 @@ uint64_t slapi_atomic_decr_64(uint64_t *ptr, int memorder); /* helper function */ const char * slapi_fetch_attr(Slapi_Entry *e, const char *attrname, char *default_val); +/** + * Get a Slapi_Entry via an internal search. The caller then needs to call + * slapi_get_entry_done() to free any resources allocated to get the entry + * + * \param pb - slapi_pblock pointer (the function will allocate if necessary) + * \param dn - Slapi_DN of the entry to retrieve + * \param attrs - char list of attributes to get + * \param ret_entry - pointer to a Slapi_entry wer the returned entry is stored + * \param component_identity - plugin component + * + * \return - ldap result code + */ +int32_t slapi_search_get_entry(Slapi_PBlock **pb, Slapi_DN *dn, char **attrs, Slapi_Entry **ret_entry, void *component_identity); + +/** + * Free the resources allocated by slapi_search_get_entry() + * + * \param pb - slapi_pblock pointer + */ +void slapi_search_get_entry_done(Slapi_PBlock **pb); + #ifdef __cplusplus } #endif
0
d70d772e768245c06466a68fc3f32739692c20cc
389ds/389-ds-base
Add support for password change extended operation.
commit d70d772e768245c06466a68fc3f32739692c20cc Author: David Boreham <[email protected]> Date: Fri Jan 28 20:27:05 2005 +0000 Add support for password change extended operation. diff --git a/ldap/servers/slapd/proto-slap.h b/ldap/servers/slapd/proto-slap.h index 96a60a01c..78c32f902 100644 --- a/ldap/servers/slapd/proto-slap.h +++ b/ldap/servers/slapd/proto-slap.h @@ -843,6 +843,9 @@ int start_tls_graceful_closure( Connection *conn, Slapi_PBlock *pb, int is_initi int start_tls_register_plugin(); int start_tls_init( Slapi_PBlock *pb ); +/* passwd_extop.c */ +int passwd_modify_register_plugin(); + /* * slapi_str2filter.c */
0
37995ff4c1d22816ee6b2c813723dca3b4bfefc6
389ds/389-ds-base
Issue 5032 - Fix configure option in specfile (#5174) Description: Lildap_r is no longer used since Fedora 34 and is completely removed from Fedora 36 and older. Hence, adjust the specfile accordingly. Relates: https://github.com/389ds/389-ds-base/issues/5032 Reviewed by: @mreynolds389 (Thanks!)
commit 37995ff4c1d22816ee6b2c813723dca3b4bfefc6 Author: Simon Pichugin <[email protected]> Date: Thu Feb 24 12:58:19 2022 -0800 Issue 5032 - Fix configure option in specfile (#5174) Description: Lildap_r is no longer used since Fedora 34 and is completely removed from Fedora 36 and older. Hence, adjust the specfile accordingly. Relates: https://github.com/389ds/389-ds-base/issues/5032 Reviewed by: @mreynolds389 (Thanks!) diff --git a/rpm/389-ds-base.spec.in b/rpm/389-ds-base.spec.in index 9e1a83ba9..e6d68963f 100644 --- a/rpm/389-ds-base.spec.in +++ b/rpm/389-ds-base.spec.in @@ -393,6 +393,9 @@ autoreconf -fiv --with-systemdgroupname=%{groupname} \ --libexecdir=%{_libexecdir}/%{pkgname} \ $ASAN_FLAGS $MSAN_FLAGS $TSAN_FLAGS $UBSAN_FLAGS $RUST_FLAGS $CLANG_FLAGS $COCKPIT_FLAGS \ +%if 0%{?fedora} >= 34 || 0%{?rhel} >= 9 + --with-libldap-r=no \ +%endif --enable-cmocka # Avoid "Unknown key name 'XXX' in section 'Service', ignoring." warnings from systemd on older releases
0
a9b98efb4fda1672e915a6d4cbbad1b096e8f66d
389ds/389-ds-base
Bug 630092 - Coverity #12003: Resource leaks issues https://bugzilla.redhat.com/show_bug.cgi?id=630092 Description: The cos_cache_add_defn() has been modified to release theDef when an error occurs.
commit a9b98efb4fda1672e915a6d4cbbad1b096e8f66d Author: Endi Sukma Dewata <[email protected]> Date: Fri Sep 17 17:07:54 2010 -0400 Bug 630092 - Coverity #12003: Resource leaks issues https://bugzilla.redhat.com/show_bug.cgi?id=630092 Description: The cos_cache_add_defn() has been modified to release theDef when an error occurs. diff --git a/ldap/servers/plugins/cos/cos_cache.c b/ldap/servers/plugins/cos/cos_cache.c index e20fd0dea..db99586c1 100644 --- a/ldap/servers/plugins/cos/cos_cache.c +++ b/ldap/servers/plugins/cos/cos_cache.c @@ -1498,6 +1498,7 @@ static int cos_cache_add_defn( out: if(ret < 0) { + slapi_ch_free((void**)&theDef); if(dn) cos_cache_del_attrval_list(dn); if(tree)
0
d56a29b5285b411fd9d6d0b51e1e874fc1eb1887
389ds/389-ds-base
Bug 610119 - fix coverify Defect Type: Null pointer dereferences issues 12167 - 12199 https://bugzilla.redhat.com/show_bug.cgi?id=610119 Resolves: bug 610119 Bug description: Fix coverify Defect Type: Null pointer dereferences issues 12167 - 12199 Fix description: Remove unnecessary NULL pointer checking in attrcrypt_keymgmt_store_key(). coverity ID: 12168
commit d56a29b5285b411fd9d6d0b51e1e874fc1eb1887 Author: Noriko Hosoi <[email protected]> Date: Fri Aug 20 15:41:57 2010 -0700 Bug 610119 - fix coverify Defect Type: Null pointer dereferences issues 12167 - 12199 https://bugzilla.redhat.com/show_bug.cgi?id=610119 Resolves: bug 610119 Bug description: Fix coverify Defect Type: Null pointer dereferences issues 12167 - 12199 Fix description: Remove unnecessary NULL pointer checking in attrcrypt_keymgmt_store_key(). coverity ID: 12168 diff --git a/ldap/servers/slapd/back-ldbm/ldbm_attrcrypt.c b/ldap/servers/slapd/back-ldbm/ldbm_attrcrypt.c index 6600f008e..83130207f 100644 --- a/ldap/servers/slapd/back-ldbm/ldbm_attrcrypt.c +++ b/ldap/servers/slapd/back-ldbm/ldbm_attrcrypt.c @@ -230,12 +230,8 @@ attrcrypt_keymgmt_store_key(ldbm_instance *li, attrcrypt_cipher_state *acs, SECK LDAPDebug(LDAP_DEBUG_ANY, "attrcrypt_keymgmt_store_key: failed to add config key entries to the DSE: %d: %s: %s\n", rc, ldap_err2string(rc), resulttext ? resulttext : "unknown"); ret = -1; } - if (entry_string) { - slapi_ch_free((void**)&entry_string); - } - if (pb) { - slapi_pblock_destroy(pb); - } + slapi_ch_free((void**)&entry_string); + slapi_pblock_destroy(pb); } LDAPDebug(LDAP_DEBUG_TRACE,"<- attrcrypt_keymgmt_store_key\n", 0, 0, 0); return ret;
0
634250f788effc843846e725fe38f4c475984319
389ds/389-ds-base
Ticket #47772 empty modify returns LDAP_INVALID_DN_SYNTAX https://fedorahosted.org/389/ticket/47772 Reviewed by: tbordaz, mreynolds (Thanks!) Branch: master Fix Description: tbordaz pointed out a couple of other places where the pblock modify_mods structure is being referenced directly, and needs to be checked for NULL. Platforms tested: RHEL6 x86_64 Flag Day: no Doc impact: no
commit 634250f788effc843846e725fe38f4c475984319 Author: Rich Megginson <[email protected]> Date: Fri Apr 11 10:26:35 2014 -0600 Ticket #47772 empty modify returns LDAP_INVALID_DN_SYNTAX https://fedorahosted.org/389/ticket/47772 Reviewed by: tbordaz, mreynolds (Thanks!) Branch: master Fix Description: tbordaz pointed out a couple of other places where the pblock modify_mods structure is being referenced directly, and needs to be checked for NULL. Platforms tested: RHEL6 x86_64 Flag Day: no Doc impact: no diff --git a/ldap/servers/plugins/replication/repl5_agmt.c b/ldap/servers/plugins/replication/repl5_agmt.c index 139c8a31f..9d617e752 100644 --- a/ldap/servers/plugins/replication/repl5_agmt.c +++ b/ldap/servers/plugins/replication/repl5_agmt.c @@ -2780,7 +2780,7 @@ agmt_update_maxcsn(Replica *r, Slapi_DN *sdn, int op, LDAPMod **mods, CSN *csn) if(op == SLAPI_OPERATION_MODIFY) { slapi_rwlock_rdlock(agmt->attr_lock); - for ( excluded_count = 0, mod_count = 0; NULL != mods[ mod_count ]; mod_count++){ + for ( excluded_count = 0, mod_count = 0; mods && (NULL != mods[ mod_count ]); mod_count++){ if(charray_inlist(agmt->frac_attrs, mods[mod_count]->mod_type)){ excluded_count++; } else if(charray_inlist(agmt->attrs_to_strip, mods[mod_count]->mod_type)){ diff --git a/ldap/servers/plugins/replication/repl5_protocol_util.c b/ldap/servers/plugins/replication/repl5_protocol_util.c index 82af97051..893839de2 100644 --- a/ldap/servers/plugins/replication/repl5_protocol_util.c +++ b/ldap/servers/plugins/replication/repl5_protocol_util.c @@ -695,6 +695,10 @@ repl5_strip_fractional_mods(Repl_Agmt *agmt, LDAPMod ** mods) int strip = 1; int i, j, k; + if (mods == NULL) { + return retval; + } + if (a) { /* Iterate through the fractional attr list */ for ( i = 0; a[i] != NULL; i++ )
0
71d7ca07aa27bc7932f839ed51ff60e8710eb32d
389ds/389-ds-base
Issue 50536 - Audit log heading written to log after every update Bug Description: Once the audit log is rotated the log "title" is incorrectly written to the log after every single update. This happened becuase when we udpated the state of the log it was applied to a local variable, and not the log info structure itself. Fix Description: After writting the "title", update the state of the log using a pointer to the log info structure. relates: https://pagure.io/389-ds-base/issue/50536 Reviewed by: lkrispenz(Thanks!)
commit 71d7ca07aa27bc7932f839ed51ff60e8710eb32d Author: Mark Reynolds <[email protected]> Date: Wed Aug 7 16:57:17 2019 -0400 Issue 50536 - Audit log heading written to log after every update Bug Description: Once the audit log is rotated the log "title" is incorrectly written to the log after every single update. This happened becuase when we udpated the state of the log it was applied to a local variable, and not the log info structure itself. Fix Description: After writting the "title", update the state of the log using a pointer to the log info structure. relates: https://pagure.io/389-ds-base/issue/50536 Reviewed by: lkrispenz(Thanks!) diff --git a/ldap/servers/slapd/log.c b/ldap/servers/slapd/log.c index a8204fed4..bfcf57475 100644 --- a/ldap/servers/slapd/log.c +++ b/ldap/servers/slapd/log.c @@ -2071,11 +2071,11 @@ slapd_log_audit( int retval = LDAP_SUCCESS; int lbackend = loginfo.log_backend; /* We copy this to make these next checks atomic */ - int state = 0; + int *state; if (sourcelog == SLAPD_AUDIT_LOG) { - state = loginfo.log_audit_state; + state = &loginfo.log_audit_state; } else if (sourcelog == SLAPD_AUDITFAIL_LOG) { - state = loginfo.log_auditfail_state; + state = &loginfo.log_auditfail_state; } else { /* How did we even get here! */ return 1; @@ -2104,9 +2104,9 @@ int slapd_log_audit_internal( char *buffer, int buf_len, - int state) + int *state) { - if ((state & LOGGING_ENABLED) && (loginfo.log_audit_file != NULL)) { + if ((*state & LOGGING_ENABLED) && (loginfo.log_audit_file != NULL)) { LOG_AUDIT_LOCK_WRITE(); if (log__needrotation(loginfo.log_audit_fdes, SLAPD_AUDIT_LOG) == LOG_ROTATE) { @@ -2120,9 +2120,9 @@ slapd_log_audit_internal( loginfo.log_audit_rotationsyncclock += PR_ABS(loginfo.log_audit_rotationtime_secs); } } - if (state & LOGGING_NEED_TITLE) { + if (*state & LOGGING_NEED_TITLE) { log_write_title(loginfo.log_audit_fdes); - state &= ~LOGGING_NEED_TITLE; + *state &= ~LOGGING_NEED_TITLE; } LOG_WRITE_NOW_NO_ERR(loginfo.log_audit_fdes, buffer, buf_len, 0); LOG_AUDIT_UNLOCK_WRITE(); diff --git a/ldap/servers/slapd/proto-slap.h b/ldap/servers/slapd/proto-slap.h index b7e82c833..c25a11dbd 100644 --- a/ldap/servers/slapd/proto-slap.h +++ b/ldap/servers/slapd/proto-slap.h @@ -791,7 +791,7 @@ int slapi_log_access(int level, char *fmt, ...) ; #endif int slapd_log_audit(char *buffer, int buf_len, int sourcelog); -int slapd_log_audit_internal(char *buffer, int buf_len, int state); +int slapd_log_audit_internal(char *buffer, int buf_len, int *state); int slapd_log_auditfail(char *buffer, int buf_len); int slapd_log_auditfail_internal(char *buffer, int buf_len); void log_access_flush(void); diff --git a/src/cockpit/389-console/src/servers.html b/src/cockpit/389-console/src/servers.html index 0261895ef..e8171c005 100644 --- a/src/cockpit/389-console/src/servers.html +++ b/src/cockpit/389-console/src/servers.html @@ -521,7 +521,7 @@ <label for="nsslapd-accesslog-logrotationtime" class="ds-config-sub-label" title="Access log rotation time settings (nsslapd-accesslog-logrotationtime).">Create New Log Every...</label><input class="ds-input" type="text" id="nsslapd-accesslog-logrotationtime" size="40"/> <select class="btn btn-default dropdown" id="nsslapd-accesslog-logrotationtimeunit"> <option>minute</option> - <option>hours</option> + <option>hour</option> <option>day</option> <option>week</option> <option>month</option> @@ -610,7 +610,7 @@ <label for="nsslapd-auditlog-logrotationtime" class="ds-config-sub-label" title="Audit log rotation time settings (nsslapd-auditlog-logrotationtime).">Create New Log Every...</label><input class="ds-input" type="text" id="nsslapd-auditlog-logrotationtime" size="40"/> <select class="btn btn-default dropdown" id="nsslapd-auditlog-logrotationtimeunit"> <option>minute</option> - <option>hours</option> + <option>hour</option> <option>day</option> <option>week</option> <option>month</option> @@ -669,7 +669,7 @@ <label for="nsslapd-auditfaillog-logrotationtime" class="ds-config-sub-label" title="Audit failure log rotation time settings (nsslapd-auditlog-logrotationtime).">Create New Log Every...</label><input class="ds-input" type="text" id="nsslapd-auditfaillog-logrotationtime" size="40"/> <select class="btn btn-default dropdown" id="nsslapd-auditfaillog-logrotationtimeunit"> <option>minute</option> - <option>hours</option> + <option>hour</option> <option>day</option> <option>week</option> <option>month</option> @@ -726,7 +726,7 @@ <label for="nsslapd-errorlog-logrotationtime" class="ds-config-sub-label" title="Errors log rotation time settings (nsslapd-errorlog-logrotationtime).">Create New Log Every...</label><input class="ds-input" type="text" id="nsslapd-errorlog-logrotationtime" size="40"/> <select class="btn btn-default dropdown" id="nsslapd-errorlog-logrotationtimeunit"> <option>minute</option> - <option>hours</option> + <option>hour</option> <option>day</option> <option>week</option> <option>month</option>
0
7f7f83cbf68f4ce88c00ad203889272812accd0e
389ds/389-ds-base
Ticket #47299 - allow cmdline scripts to work with non-root user https://fedorahosted.org/389/ticket/47299 Reviewed by: mreynolds (Thanks!) Branch: master Fix Description: If running as non-root, look for the initconfig scripts/files in ~/.dirsrv. If the INITCONFIGDIR env. var. is set, use that and do not look anywhere else. Cannot depend on the value of $USER - must use id to determine the effective user id. Also fixed a problem with return value handling - the if test changes the value of $?, so have to save the value just after starting the instance. Platforms tested: RHEL6 x86_64 Flag Day: no Doc impact: no
commit 7f7f83cbf68f4ce88c00ad203889272812accd0e Author: Rich Megginson <[email protected]> Date: Thu Mar 21 10:20:19 2013 -0600 Ticket #47299 - allow cmdline scripts to work with non-root user https://fedorahosted.org/389/ticket/47299 Reviewed by: mreynolds (Thanks!) Branch: master Fix Description: If running as non-root, look for the initconfig scripts/files in ~/.dirsrv. If the INITCONFIGDIR env. var. is set, use that and do not look anywhere else. Cannot depend on the value of $USER - must use id to determine the effective user id. Also fixed a problem with return value handling - the if test changes the value of $?, so have to save the value just after starting the instance. Platforms tested: RHEL6 x86_64 Flag Day: no Doc impact: no diff --git a/ldap/admin/src/scripts/DSSharedLib.in b/ldap/admin/src/scripts/DSSharedLib.in index 17079d75e..1a66e2892 100644 --- a/ldap/admin/src/scripts/DSSharedLib.in +++ b/ldap/admin/src/scripts/DSSharedLib.in @@ -20,13 +20,38 @@ get_server_id() inst_count=0 instances="<none>" rc=0 - - for i in `ls $dir/dirsrv-* 2>/dev/null` + + # convert + # uid=0(root) gid=0(root) groups=0(root),1(bin),2(daemon),3(sys),..... + # to + # 0 + # this is the only portable, secure way to determine the id number + userid=`id | awk -F'[=(]+' '{print $2}'` + if [ "$userid" -ne 0 ] ; then + # magic - see start-dirsrv, DSCreate.pm::get_initconfigdir, etc. + dir=$HOME/.@package_name@ + fi + if [ -n "$INITCONFIGDIR" ] ; then + dir=$INITCONFIGDIR + fi + + # look first in user provided INITCONFIGDIR, then in the system/build location, + # then in the users home dir - cases + # 1. system install but running as non-root user + # in this case, we want to use the instance from $dir - it will + # fallback to $homedir in that case, and if that is a problem, the user will + # just have to temporarily move $homedir/dirsrv-sysinstancename out of the way + # while working on the system instance + # 2. prefix/non-system install + # in this case, we want to use $homedir - if for some reason there is a system + # instance in $dir with the same name, the user can use INITCONFIGDIR to + # override that and force the use of the one from $homedir + for i in `ls $dir/@package_name@-* 2>/dev/null` do - if [ $i != "$dir/dirsrv-admin" ] + if [ $i != "$dir/@package_name@-admin" ] then inst_count=`expr $inst_count + 1` - id=$(expr "$i" : ".*dirsrv-\([^)]*\).*") + id=$(expr "$i" : ".*@package_name@-\([^)]*\).*") if [ $first == "yes" ] then instances=$id @@ -52,11 +77,11 @@ get_server_id() elif [ $servid == slapd-* ] then servid=`echo "$servid" | sed -e 's/slapd-//'` - elif [ $servid == dirsrv-* ] + elif [ $servid == @package_name@-* ] then - servid=`echo "$servid" | sed -e 's/dirsrv-//'` + servid=`echo "$servid" | sed -e 's/@package_name@-//'` fi - if ! [ -a "$dir/dirsrv-$servid" ] + if ! [ -a "$dir/@package_name@-$servid" ] then # invalid instance name, return the "valid" instance names servid=$instances diff --git a/ldap/admin/src/scripts/DSUtil.pm.in b/ldap/admin/src/scripts/DSUtil.pm.in index e5876719e..b69cbb50c 100644 --- a/ldap/admin/src/scripts/DSUtil.pm.in +++ b/ldap/admin/src/scripts/DSUtil.pm.in @@ -1287,13 +1287,21 @@ sub get_server_id { my $instances = "<none>"; my $name; my $file; + + if (getLogin ne 'root') { + $dir = "$ENV{HOME}/.@package_name@"; + } + + if (defined $ENV{INITCONFIGDIR}) { + $dir = $ENV{INITCONFIGDIR}; + } opendir(DIR, "$dir"); my @files = readdir(DIR); foreach $file (@files){ - if($file =~ /^dirsrv-/ && $file ne "dirsrv-admin"){ + if($file =~ /^@package_name@-/ && $file ne "@package_name@-admin"){ $instance_count++; - if($file =~ /dirsrv-(.*)/){ + if($file =~ /@package_name@-(.*)/){ if($first eq "yes"){ $instances=$1; $first = "no"; @@ -1312,17 +1320,17 @@ sub get_server_id { print "Available instances: $instances\n"; exit (1); } - } elsif ($servid =~ /^dirsrv-/){ - # strip off "dirsrv-" - $servid =~ s/^dirsrv-//; + } elsif ($servid =~ /^@package_name@-/){ + # strip off "@package_name@-" + $servid =~ s/^@package_name@-//; } elsif ($servid =~ /^slapd-/){ # strip off "slapd-" $servid =~ s/^slapd-//; } - unless ( -e "$dir/dirsrv-$servid" ){ + unless ( -e "$dir/@package_name@-$servid" ){ print (STDERR "Invalid server identifer: $servid\n"); - print (STDERR "Available instances: $instances\n"); + print (STDERR "Available instances in $dir: $instances\n"); exit (1); } diff --git a/ldap/admin/src/scripts/restart-dirsrv.in b/ldap/admin/src/scripts/restart-dirsrv.in index 74dc1cf40..c5825b98a 100644 --- a/ldap/admin/src/scripts/restart-dirsrv.in +++ b/ldap/admin/src/scripts/restart-dirsrv.in @@ -37,7 +37,13 @@ done shift $(($OPTIND-1)) if [ "$initconfig_dir" = "" ]; then - if [ $USER = root ] ; then + # convert + # uid=0(root) gid=0(root) groups=0(root),1(bin),2(daemon),3(sys),..... + # to + # 0 + # this is the only portable, secure way to determine the id number + userid=`id | awk -F'[=(]+' '{print $2}'` + if [ "$userid" -eq 0 ] ; then initconfig_dir=@initconfigdir@ else initconfig_dir=$HOME/.@package_name@ @@ -56,8 +62,9 @@ if [ "$#" -eq 0 ]; then fi echo Restarting instance \"$inst\" restart_instance $inst - if [ "$?" -ne 0 ]; then - ret=$? + rv=$? + if [ "$rv" -ne 0 ]; then + ret=$rv fi done exit $ret diff --git a/ldap/admin/src/scripts/start-dirsrv.in b/ldap/admin/src/scripts/start-dirsrv.in index b5f45f93c..7864ad932 100755 --- a/ldap/admin/src/scripts/start-dirsrv.in +++ b/ldap/admin/src/scripts/start-dirsrv.in @@ -115,7 +115,13 @@ done shift $(($OPTIND-1)) if [ "$initconfig_dir" = "" ]; then - if [ $USER = root ] ; then + # convert + # uid=0(root) gid=0(root) groups=0(root),1(bin),2(daemon),3(sys),..... + # to + # 0 + # this is the only portable, secure way to determine the id number + userid=`id | awk -F'[=(]+' '{print $2}'` + if [ "$userid" -eq 0 ] ; then initconfig_dir=@initconfigdir@ else initconfig_dir=$HOME/.@package_name@ @@ -134,8 +140,9 @@ if [ "$#" -eq 0 ]; then fi echo Starting instance \"$inst\" start_instance $inst - if [ "$?" -ne 0 ]; then - ret=$? + rv=$? + if [ "$rv" -ne 0 ]; then + ret=$rv fi done exit $ret diff --git a/ldap/admin/src/scripts/stop-dirsrv.in b/ldap/admin/src/scripts/stop-dirsrv.in index 220fe924a..cec65d921 100755 --- a/ldap/admin/src/scripts/stop-dirsrv.in +++ b/ldap/admin/src/scripts/stop-dirsrv.in @@ -67,7 +67,13 @@ done shift $(($OPTIND-1)) if [ "$initconfig_dir" = "" ]; then - if [ $USER = root ] ; then + # convert + # uid=0(root) gid=0(root) groups=0(root),1(bin),2(daemon),3(sys),..... + # to + # 0 + # this is the only portable, secure way to determine the id number + userid=`id | awk -F'[=(]+' '{print $2}'` + if [ "$userid" -eq 0 ] ; then initconfig_dir=@initconfigdir@ else initconfig_dir=$HOME/.@package_name@ @@ -86,8 +92,9 @@ if [ "$#" -eq 0 ]; then fi echo Stopping instance \"$inst\" stop_instance $inst - if [ "$?" -ne 0 ]; then - ret=$? + rv=$? + if [ "$rv" -ne 0 ]; then + ret=$rv fi done exit $ret
0
51796b5efab4f1a760f4bb2298ede7fcfa2c40ca
389ds/389-ds-base
610281 - fix coverity Defect Type: Control flow issues https://bugzilla.redhat.com/show_bug.cgi?id=610281 11808 DEADCODE Triaged Unassigned Bug Minor Fix Required replication_multimaster_plugin_init() ds/ldap/servers/plugins/replication/repl5_init.c Comment: There used to be an initializing replica hash code between the line 573 and 575: 572 /* initialize replica hash - has to be done before mapping tree is 573 initialized so we can't do it in the start function */ 575 if (rc != 0) 576 { Execution cannot reach this statement "slapi_log_error(0, repl_plu...". 577 slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name, 578 "replication_multimaster_plugin_init: failed to initialize replica hash\n"); 579 return -1; 580 } The initializing replica hash code was moved out but the comment and the result checking code were left. Removing them. Also, setting non 0 value to a static int variable multimaster_initialised if the plugin registration was successful.
commit 51796b5efab4f1a760f4bb2298ede7fcfa2c40ca Author: Noriko Hosoi <[email protected]> Date: Fri Jul 2 15:44:23 2010 -0700 610281 - fix coverity Defect Type: Control flow issues https://bugzilla.redhat.com/show_bug.cgi?id=610281 11808 DEADCODE Triaged Unassigned Bug Minor Fix Required replication_multimaster_plugin_init() ds/ldap/servers/plugins/replication/repl5_init.c Comment: There used to be an initializing replica hash code between the line 573 and 575: 572 /* initialize replica hash - has to be done before mapping tree is 573 initialized so we can't do it in the start function */ 575 if (rc != 0) 576 { Execution cannot reach this statement "slapi_log_error(0, repl_plu...". 577 slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name, 578 "replication_multimaster_plugin_init: failed to initialize replica hash\n"); 579 return -1; 580 } The initializing replica hash code was moved out but the comment and the result checking code were left. Removing them. Also, setting non 0 value to a static int variable multimaster_initialised if the plugin registration was successful. diff --git a/ldap/servers/plugins/replication/repl5_init.c b/ldap/servers/plugins/replication/repl5_init.c index 9d8776ad8..8d4eb1c09 100644 --- a/ldap/servers/plugins/replication/repl5_init.c +++ b/ldap/servers/plugins/replication/repl5_init.c @@ -571,18 +571,8 @@ int replication_multimaster_plugin_init(Slapi_PBlock *pb) */ multimaster_mtnode_extension_init (); - if(rc==0 && !multimaster_initialised) + if(!multimaster_initialised) { - /* initialize replica hash - has to be done before mapping tree is - initialized so we can't do it in the start function */ - - if (rc != 0) - { - slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name, - "replication_multimaster_plugin_init: failed to initialize replica hash\n"); - return -1; - } - /* Initialize extensions */ repl_con_init_ext(); repl_sup_init_ext(); @@ -603,6 +593,10 @@ int replication_multimaster_plugin_init(Slapi_PBlock *pb) rc= slapi_register_plugin("extendedop", 1 /* Enabled */, "multimaster_end_extop_init", multimaster_end_extop_init, "Multimaster replication end extended operation plugin", NULL, identity); rc= slapi_register_plugin("extendedop", 1 /* Enabled */, "multimaster_total_extop_init", multimaster_total_extop_init, "Multimaster replication total update extended operation plugin", NULL, identity); rc= slapi_register_plugin("extendedop", 1 /* Enabled */, "multimaster_response_extop_init", multimaster_response_extop_init, "Multimaster replication extended response plugin", NULL, identity); + if (0 == rc) + { + multimaster_initialised = 1; + } } return rc; }
0
55ea40a5c4c066db00d6079437d9ca32691410f0
389ds/389-ds-base
Bug 697027 - 3 - minor memory leaks found by Valgrind + TET https://bugzilla.redhat.com/show_bug.cgi?id=697027 [Case 3] Description: Adding slapi_counter_destroy to destroy counters cache->c_cursize, c_hits, and c_tries used in the entry cache.
commit 55ea40a5c4c066db00d6079437d9ca32691410f0 Author: Noriko Hosoi <[email protected]> Date: Fri Apr 15 10:38:21 2011 -0700 Bug 697027 - 3 - minor memory leaks found by Valgrind + TET https://bugzilla.redhat.com/show_bug.cgi?id=697027 [Case 3] Description: Adding slapi_counter_destroy to destroy counters cache->c_cursize, c_hits, and c_tries used in the entry cache. diff --git a/ldap/servers/slapd/back-ldbm/cache.c b/ldap/servers/slapd/back-ldbm/cache.c index f21cd8732..e29247e9d 100644 --- a/ldap/servers/slapd/back-ldbm/cache.c +++ b/ldap/servers/slapd/back-ldbm/cache.c @@ -642,6 +642,9 @@ static void erase_cache(struct cache *cache, int type) #ifdef UUIDCACHE_ON slapi_ch_free((void **)&cache->c_uuidtable); #endif + slapi_counter_destroy(&cache->c_cursize); + slapi_counter_destroy(&cache->c_hits); + slapi_counter_destroy(&cache->c_tries); } /* to be used on shutdown or when destroying a backend instance */
0
0219577ab70b324730dc224821876e6563647807
389ds/389-ds-base
Ticket #563 - DSCreate.pm: Error messages cannot be used in the if expression since they could be localized. Bug description: Error messages from the command line interface could be localized based upon the users' configuration. But the script expects English sentence. Fix description: Set the C locale '$ENV{LANG} = "C"' before running the command line if the message from the command is examined in the expression. Reviewed by Mark (Thank you!!)
commit 0219577ab70b324730dc224821876e6563647807 Author: Noriko Hosoi <[email protected]> Date: Thu Jan 24 16:24:13 2013 -0800 Ticket #563 - DSCreate.pm: Error messages cannot be used in the if expression since they could be localized. Bug description: Error messages from the command line interface could be localized based upon the users' configuration. But the script expects English sentence. Fix description: Set the C locale '$ENV{LANG} = "C"' before running the command line if the message from the command is examined in the expression. Reviewed by Mark (Thank you!!) diff --git a/ldap/admin/src/scripts/DSCreate.pm.in b/ldap/admin/src/scripts/DSCreate.pm.in index efcec936e..68bb584a4 100644 --- a/ldap/admin/src/scripts/DSCreate.pm.in +++ b/ldap/admin/src/scripts/DSCreate.pm.in @@ -1013,6 +1013,7 @@ sub updateSelinuxPolicy { my $semanage_err; my $rc; my $retry = 60; + $ENV{LANG} = "C"; while (($retry > 0) && ($semanage_err = `semanage port -a -t ldap_port_t -p tcp $inf->{slapd}->{ServerPort} 2>&1`) && ($rc = $?)) { debug(1, "Adding port $inf->{slapd}->{ServerPort} to selinux policy failed - $semanage_err (return code: $rc).\n"); debug(1, "Retrying in 5 seconds\n"); @@ -1393,6 +1394,7 @@ sub removeDSInstance { my $semanage_err; my $rc; my $retry = 60; + $ENV{LANG} = "C"; while (($retry > 0) && ($semanage_err = `semanage port -d -t ldap_port_t -p tcp $port 2>&1`) && ($rc = $?)) { if (($semanage_err =~ /defined in policy, cannot be deleted/) || ($semanage_err =~ /is not defined/)) { $retry = -1; @@ -1415,6 +1417,7 @@ sub removeDSInstance { my $semanage_err; my $rc; my $retry = 60; + $ENV{LANG} = "C"; while (($retry > 0) && ($semanage_err = `semanage port -d -t ldap_port_t -p tcp $secureport 2>&1`) && ($rc = $?)) { if (($semanage_err =~ /defined in policy, cannot be deleted/) || ($semanage_err =~ /is not defined/)) { $retry = -1;
0
fda9349416b20636d605d60fc62a410303cc0e43
389ds/389-ds-base
Bump version to 1.4.3.2
commit fda9349416b20636d605d60fc62a410303cc0e43 Author: Mark Reynolds <[email protected]> Date: Thu Jan 23 15:09:40 2020 -0500 Bump version to 1.4.3.2 diff --git a/VERSION.sh b/VERSION.sh index 90c051299..0983b05e3 100644 --- a/VERSION.sh +++ b/VERSION.sh @@ -10,7 +10,7 @@ vendor="389 Project" # PACKAGE_VERSION is constructed from these VERSION_MAJOR=1 VERSION_MINOR=4 -VERSION_MAINT=3.1 +VERSION_MAINT=3.2 # NOTE: VERSION_PREREL is automatically set for builds made out of a git tree VERSION_PREREL= VERSION_DATE=$(date -u +%Y%m%d)
0
ae8f4277e2c7e2f9881018035146b9d7767e4004
389ds/389-ds-base
Ticket 29 - fix incorrect format in tools Bug Description: Can not concat str and int Fix Description: change error to use string formatting https://pagure.io/lib389/issue/29 Author: wibrown Review by: mreynolds (Thanks!)
commit ae8f4277e2c7e2f9881018035146b9d7767e4004 Author: William Brown <[email protected]> Date: Wed May 10 16:23:10 2017 +1000 Ticket 29 - fix incorrect format in tools Bug Description: Can not concat str and int Fix Description: change error to use string formatting https://pagure.io/lib389/issue/29 Author: wibrown Review by: mreynolds (Thanks!) diff --git a/src/lib389/lib389/tools.py b/src/lib389/lib389/tools.py index 51de6897b..edb95b1e0 100644 --- a/src/lib389/lib389/tools.py +++ b/src/lib389/lib389/tools.py @@ -948,8 +948,8 @@ class DirSrvTools(object): process.stdin.close() process.wait() if process.returncode != 0: - log.fatal('runUpgrade failed! Error: ' + process.returncode) - assert False + log.fatal('runUpgrade failed! Error: %s ' % process.returncode) + assert(False) except: log.fatal('runUpgrade failed!') raise
0
8dc3806d75b6e3d4722047e230db68ac20ab3e69
389ds/389-ds-base
Ticket #47835 - Coverity: 12687..12692 12692 - Use of untrusted string value Description: lines read from the sysconfig reload task's attribute sysconfigfile (e.g., /etc/sysconfig/dirsrv-localhost) could be tainted. Check the end of the line more rigorously, and eliminate a chance to overflow env_var and env_value by copying the characters from read line. Reviewed by [email protected] (Thanks, Rich!) https://fedorahosted.org/389/ticket/47835
commit 8dc3806d75b6e3d4722047e230db68ac20ab3e69 Author: Noriko Hosoi <[email protected]> Date: Mon Jun 30 16:21:52 2014 -0700 Ticket #47835 - Coverity: 12687..12692 12692 - Use of untrusted string value Description: lines read from the sysconfig reload task's attribute sysconfigfile (e.g., /etc/sysconfig/dirsrv-localhost) could be tainted. Check the end of the line more rigorously, and eliminate a chance to overflow env_var and env_value by copying the characters from read line. Reviewed by [email protected] (Thanks, Rich!) https://fedorahosted.org/389/ticket/47835 diff --git a/ldap/servers/slapd/task.c b/ldap/servers/slapd/task.c index 6340db8cc..1243492d5 100644 --- a/ldap/servers/slapd/task.c +++ b/ldap/servers/slapd/task.c @@ -1949,6 +1949,8 @@ task_sysconfig_reload_add(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eAfter, if ( file != NULL ){ char line[4096]; char *s = NULL; + /* fgets() reads in at most one less than size characters */ + char *end_of_line = line + sizeof(line) - 1; if(logchanges){ LDAPDebug(LDAP_DEBUG_ANY, "sysconfig reload task: processing file (%s)\n", @@ -1960,8 +1962,8 @@ task_sysconfig_reload_add(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eAfter, /* skip comments */ continue; } else { - char env_value[4096]; - char env_var[4096]; + char env_value[sizeof(line)]; + char env_var[sizeof(line)]; int using_setenv = 0; int value_index = 0; int start_value = 0; @@ -1997,7 +1999,7 @@ task_sysconfig_reload_add(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eAfter, using_setenv = 1; } if(strncmp(s, "export ", 7) == 0){ - /* strip off "export " */ + /* strip off "export " */ s = s + 7; } else if(strncmp(s, "set ", 4) == 0){ /* strip off "set " */ @@ -2021,7 +2023,7 @@ task_sysconfig_reload_add(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eAfter, /* * Start parsing the names and values */ - for (; s && *s; s++){ + for (; s && (s < end_of_line) && *s; s++){ /* * If using "setenv", allow the first space/tab only, and start on the env value */
0
5420e154aaf08cbf41c3c233b0d71c289b85b0e8
389ds/389-ds-base
Ticket 48145 - RFE Add log file for rejected changes https://fedorahosted.org/389/ticket/48145 http://www.port389.org/docs/389ds/design/audit_improvement.html Bug Description: Add log file for rejected changes: This will help with debug third party and other applications that are failing to connect to or work correctly with ldap servers. Fix Description: The bulk of this code is duplication of existing audit log code. The remainder that is new is configuration items in schema, an update to the template dse.ldif for installation, hooking in add.c, delete.c, modify.c and modrdn.c. Finally, we extract the return code in write_auditfail_log_entry and insert this to the fail log. You can enable this with: cn=config nsslapd-auditfaillog-logging-enabled: on The auditfail log is: var/log/dirsrv/slapd-%instance%/auditfail And contains entries such as: time: 20151111152800 dn: uid=test,dc=example,dc=com result: 65 changetype: modify replace: objectClass objectClass: account objectClass: posixGroup objectClass: simpleSecurityObject objectClass: top - Note the result maps to the ldap result code, in this case 65 == 0x41 LDAP_OBJECT_CLASS_VIOLATION 0x41 Author: wibrown Review by: mreynolds, nhosoi (Thanks!)
commit 5420e154aaf08cbf41c3c233b0d71c289b85b0e8 Author: William Brown <[email protected]> Date: Mon Nov 9 14:06:17 2015 +1000 Ticket 48145 - RFE Add log file for rejected changes https://fedorahosted.org/389/ticket/48145 http://www.port389.org/docs/389ds/design/audit_improvement.html Bug Description: Add log file for rejected changes: This will help with debug third party and other applications that are failing to connect to or work correctly with ldap servers. Fix Description: The bulk of this code is duplication of existing audit log code. The remainder that is new is configuration items in schema, an update to the template dse.ldif for installation, hooking in add.c, delete.c, modify.c and modrdn.c. Finally, we extract the return code in write_auditfail_log_entry and insert this to the fail log. You can enable this with: cn=config nsslapd-auditfaillog-logging-enabled: on The auditfail log is: var/log/dirsrv/slapd-%instance%/auditfail And contains entries such as: time: 20151111152800 dn: uid=test,dc=example,dc=com result: 65 changetype: modify replace: objectClass objectClass: account objectClass: posixGroup objectClass: simpleSecurityObject objectClass: top - Note the result maps to the ldap result code, in this case 65 == 0x41 LDAP_OBJECT_CLASS_VIOLATION 0x41 Author: wibrown Review by: mreynolds, nhosoi (Thanks!) diff --git a/ldap/ldif/template-dse.ldif.in b/ldap/ldif/template-dse.ldif.in index 6acbfaebb..a25295b7b 100644 --- a/ldap/ldif/template-dse.ldif.in +++ b/ldap/ldif/template-dse.ldif.in @@ -52,6 +52,11 @@ nsslapd-auditlog-mode: 600 nsslapd-auditlog-maxlogsize: 100 nsslapd-auditlog-logrotationtime: 1 nsslapd-auditlog-logrotationtimeunit: day +nsslapd-auditfaillog: %log_dir%/auditfail +nsslapd-auditfaillog-mode: 600 +nsslapd-auditfaillog-maxlogsize: 100 +nsslapd-auditfaillog-logrotationtime: 1 +nsslapd-auditfaillog-logrotationtimeunit: day nsslapd-rootdn: %rootdn% nsslapd-rootpw: %ds_passwd% nsslapd-maxdescriptors: 1024 diff --git a/ldap/schema/01core389.ldif b/ldap/schema/01core389.ldif index aebdb5a28..42af40dee 100644 --- a/ldap/schema/01core389.ldif +++ b/ldap/schema/01core389.ldif @@ -278,6 +278,19 @@ attributeTypes: ( 2.16.840.1.113730.3.1.2311 NAME 'nsds5ReplicaFlowControlPause' attributeTypes: ( 2.16.840.1.113730.3.1.2313 NAME 'nsslapd-changelogtrim-interval' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE X-ORIGIN 'Netscape Directory Server' ) attributeTypes: ( 2.16.840.1.113730.3.1.2314 NAME 'nsslapd-changelogcompactdb-interval' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE X-ORIGIN 'Netscape Directory Server' ) attributeTypes: ( 2.16.840.1.113730.3.1.2315 NAME 'nsDS5ReplicaWaitForAsyncResults' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-ORIGIN 'Netscape Directory Server' ) +attributeTypes: ( 2.16.840.1.113730.3.1.2316 NAME 'nsslapd-auditfaillog-maxlogsize' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-ORIGIN 'Netscape Directory Server' ) +attributeTypes: ( 2.16.840.1.113730.3.1.2317 NAME 'nsslapd-auditfaillog-logrotationsync-enabled' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE X-ORIGIN 'Netscape Directory Server' ) +attributeTypes: ( 2.16.840.1.113730.3.1.2318 NAME 'nsslapd-auditfaillog-logrotationsynchour' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-ORIGIN 'Netscape Directory Server' ) +attributeTypes: ( 2.16.840.1.113730.3.1.2319 NAME 'nsslapd-auditfaillog-logrotationtime' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-ORIGIN 'Netscape Directory Server' ) +attributeTypes: ( 2.16.840.1.113730.3.1.2320 NAME 'nsslapd-auditfaillog-logrotationtimeunit' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE X-ORIGIN 'Netscape Directory Server' ) +attributeTypes: ( 2.16.840.1.113730.3.1.2321 NAME 'nsslapd-auditfaillog-logmaxdiskspace' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-ORIGIN 'Netscape Directory Server' ) +attributeTypes: ( 2.16.840.1.113730.3.1.2322 NAME 'nsslapd-auditfaillog-logminfreediskspace' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-ORIGIN 'Netscape Directory Server' ) +attributeTypes: ( 2.16.840.1.113730.3.1.2323 NAME 'nsslapd-auditfaillog-logexpirationtime' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-ORIGIN 'Netscape Directory Server' ) +attributeTypes: ( 2.16.840.1.113730.3.1.2324 NAME 'nsslapd-auditfaillog-logexpirationtimeunit' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE X-ORIGIN 'Netscape Directory Server' ) +attributeTypes: ( 2.16.840.1.113730.3.1.2325 NAME 'nsslapd-auditfaillog-logging-enabled' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE X-ORIGIN 'Netscape Directory Server' ) +attributeTypes: ( 2.16.840.1.113730.3.1.2326 NAME 'nsslapd-auditfaillog-logging-hide-unhashed-pw' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE X-ORIGIN 'Netscape Directory Server' ) +attributeTypes: ( 2.16.840.1.113730.3.1.2327 NAME 'nsslapd-auditfaillog' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE X-ORIGIN 'Netscape Directory Server' ) +attributeTypes: ( 2.16.840.1.113730.3.1.2328 NAME 'nsslapd-auditfaillog-list' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 X-ORIGIN 'Netscape Directory Server' ) # # objectclasses # diff --git a/ldap/servers/slapd/add.c b/ldap/servers/slapd/add.c index 31012a276..5e5002582 100644 --- a/ldap/servers/slapd/add.c +++ b/ldap/servers/slapd/add.c @@ -753,6 +753,11 @@ static void op_shared_add (Slapi_PBlock *pb) operation_out_of_disk_space(); goto done; } + /* If the disk is full we don't want to make it worse ... */ + if (operation_is_flag_set(operation,OP_FLAG_ACTION_LOG_AUDIT)) + { + write_auditfail_log_entry(pb); /* Record the operation in the audit log */ + } } } else diff --git a/ldap/servers/slapd/auditlog.c b/ldap/servers/slapd/auditlog.c index 69019bed3..2ddfad078 100644 --- a/ldap/servers/slapd/auditlog.c +++ b/ldap/servers/slapd/auditlog.c @@ -26,10 +26,12 @@ char *attr_changetype = ATTR_CHANGETYPE; char *attr_newrdn = ATTR_NEWRDN; char *attr_deleteoldrdn = ATTR_DELETEOLDRDN; char *attr_modifiersname = ATTR_MODIFIERSNAME; -static int hide_unhashed_pw = 1; + +static int audit_hide_unhashed_pw = 1; +static int auditfail_hide_unhashed_pw = 1; /* Forward Declarations */ -static void write_audit_file( int optype, const char *dn, void *change, int flag, time_t curtime ); +static void write_audit_file(int logtype, int optype, const char *dn, void *change, int flag, time_t curtime, int rc ); void write_audit_log_entry( Slapi_PBlock *pb ) @@ -76,9 +78,60 @@ write_audit_log_entry( Slapi_PBlock *pb ) curtime = current_time(); /* log the raw, unnormalized DN */ dn = slapi_sdn_get_udn(sdn); - write_audit_file( operation_get_type(op), dn, change, flag, curtime ); + write_audit_file(SLAPD_AUDIT_LOG, operation_get_type(op), dn, change, flag, curtime, 0); } +void +write_auditfail_log_entry( Slapi_PBlock *pb ) +{ + time_t curtime; + Slapi_DN *sdn; + const char *dn; + void *change; + int flag = 0; + Operation *op; + int pbrc = 0; + + /* if the audit log is not enabled, just skip all of + this stuff */ + if (!config_get_auditfaillog_logging_enabled()) { + return; + } + + slapi_pblock_get( pb, SLAPI_OPERATION, &op ); + slapi_pblock_get( pb, SLAPI_TARGET_SDN, &sdn ); + + slapi_pblock_get( pb, SLAPI_RESULT_CODE, &pbrc ); + + switch ( operation_get_type(op) ) + { + case SLAPI_OPERATION_MODIFY: + slapi_pblock_get( pb, SLAPI_MODIFY_MODS, &change ); + break; + case SLAPI_OPERATION_ADD: + slapi_pblock_get( pb, SLAPI_ADD_ENTRY, &change ); + break; + case SLAPI_OPERATION_DELETE: + { + char * deleterDN = NULL; + slapi_pblock_get(pb, SLAPI_REQUESTOR_DN, &deleterDN); + change = deleterDN; + } + break; + case SLAPI_OPERATION_MODDN: + /* newrdn: change is just for logging -- case does not matter. */ + slapi_pblock_get( pb, SLAPI_MODRDN_NEWRDN, &change ); + slapi_pblock_get( pb, SLAPI_MODRDN_DELOLDRDN, &flag ); + break; + default: + return; /* Unsupported operation type. */ + } + curtime = current_time(); + /* log the raw, unnormalized DN */ + dn = slapi_sdn_get_udn(sdn); + /* If we are combined */ + write_audit_file(SLAPD_AUDITFAIL_LOG, operation_get_type(op), dn, change, flag, curtime, pbrc); +} /* @@ -90,139 +143,174 @@ write_audit_log_entry( Slapi_PBlock *pb ) * For a delete operation, may contain the modifier's DN. * flag - only used by modrdn operations - value of deleteoldrdn flag * curtime - the current time + * rc - The ldap result code. Used in conjunction with auditfail * Returns: nothing */ static void write_audit_file( - int optype, - const char *dn, - void *change, - int flag, - time_t curtime + int logtype, + int optype, + const char *dn, + void *change, + int flag, + time_t curtime, + int rc ) { - LDAPMod **mods; - Slapi_Entry *e; - char *newrdn, *tmp, *tmpsave; - int len, i, j; - char *timestr; - lenstr *l; + LDAPMod **mods; + Slapi_Entry *e; + char *newrdn, *tmp, *tmpsave; + int len, i, j; + char *timestr; + char *rcstr; + lenstr *l; l = lenstr_new(); addlenstr( l, "time: " ); timestr = format_localTime( curtime ); addlenstr( l, timestr ); - slapi_ch_free((void **) &timestr ); + slapi_ch_free_string(&timestr); addlenstr( l, "\n" ); addlenstr( l, "dn: " ); addlenstr( l, dn ); addlenstr( l, "\n" ); + addlenstr( l, "result: " ); + rcstr = slapi_ch_smprintf("%d", rc); + addlenstr( l, rcstr ); + slapi_ch_free_string(&rcstr); + addlenstr( l, "\n" ); + + switch ( optype ) - { + { case SLAPI_OPERATION_MODIFY: - addlenstr( l, attr_changetype ); - addlenstr( l, ": modify\n" ); - mods = change; - for ( j = 0; (mods != NULL) && (mods[j] != NULL); j++ ) - { - int operationtype= mods[j]->mod_op & ~LDAP_MOD_BVALUES; - - if((strcmp(mods[j]->mod_type, PSEUDO_ATTR_UNHASHEDUSERPASSWORD) == 0) && hide_unhashed_pw){ - continue; - } - switch ( operationtype ) - { - case LDAP_MOD_ADD: - addlenstr( l, "add: " ); - addlenstr( l, mods[j]->mod_type ); - addlenstr( l, "\n" ); - break; - - case LDAP_MOD_DELETE: - addlenstr( l, "delete: " ); - addlenstr( l, mods[j]->mod_type ); - addlenstr( l, "\n" ); - break; - - case LDAP_MOD_REPLACE: - addlenstr( l, "replace: " ); - addlenstr( l, mods[j]->mod_type ); - addlenstr( l, "\n" ); - break; - - default: - operationtype= LDAP_MOD_IGNORE; - break; - } - if(operationtype!=LDAP_MOD_IGNORE) - { - for ( i = 0; mods[j]->mod_bvalues != NULL && mods[j]->mod_bvalues[i] != NULL; i++ ) - { - char *buf, *bufp; - len = strlen( mods[j]->mod_type ); - len = LDIF_SIZE_NEEDED( len, mods[j]->mod_bvalues[i]->bv_len ) + 1; - buf = slapi_ch_malloc( len ); - bufp = buf; - slapi_ldif_put_type_and_value_with_options( &bufp, mods[j]->mod_type, - mods[j]->mod_bvalues[i]->bv_val, - mods[j]->mod_bvalues[i]->bv_len, 0 ); - *bufp = '\0'; - addlenstr( l, buf ); - slapi_ch_free( (void**)&buf ); - } - } - addlenstr( l, "-\n" ); - } - break; + addlenstr( l, attr_changetype ); + addlenstr( l, ": modify\n" ); + mods = change; + for ( j = 0; (mods != NULL) && (mods[j] != NULL); j++ ) + { + int operationtype= mods[j]->mod_op & ~LDAP_MOD_BVALUES; + + if(strcmp(mods[j]->mod_type, PSEUDO_ATTR_UNHASHEDUSERPASSWORD) == 0){ + switch (logtype) + { + case SLAPD_AUDIT_LOG: + if (audit_hide_unhashed_pw != 0) { + continue; + } + break; + case SLAPD_AUDITFAIL_LOG: + if (auditfail_hide_unhashed_pw != 0) { + continue; + } + break; + } + } + switch ( operationtype ) + { + case LDAP_MOD_ADD: + addlenstr( l, "add: " ); + addlenstr( l, mods[j]->mod_type ); + addlenstr( l, "\n" ); + break; + + case LDAP_MOD_DELETE: + addlenstr( l, "delete: " ); + addlenstr( l, mods[j]->mod_type ); + addlenstr( l, "\n" ); + break; + + case LDAP_MOD_REPLACE: + addlenstr( l, "replace: " ); + addlenstr( l, mods[j]->mod_type ); + addlenstr( l, "\n" ); + break; + + default: + operationtype= LDAP_MOD_IGNORE; + break; + } + if(operationtype!=LDAP_MOD_IGNORE) + { + for ( i = 0; mods[j]->mod_bvalues != NULL && mods[j]->mod_bvalues[i] != NULL; i++ ) + { + char *buf, *bufp; + len = strlen( mods[j]->mod_type ); + len = LDIF_SIZE_NEEDED( len, mods[j]->mod_bvalues[i]->bv_len ) + 1; + buf = slapi_ch_malloc( len ); + bufp = buf; + slapi_ldif_put_type_and_value_with_options( &bufp, mods[j]->mod_type, + mods[j]->mod_bvalues[i]->bv_val, + mods[j]->mod_bvalues[i]->bv_len, 0 ); + *bufp = '\0'; + addlenstr( l, buf ); + slapi_ch_free( (void**)&buf ); + } + } + addlenstr( l, "-\n" ); + } + break; case SLAPI_OPERATION_ADD: - e = change; - addlenstr( l, attr_changetype ); - addlenstr( l, ": add\n" ); - tmp = slapi_entry2str( e, &len ); - tmpsave = tmp; - while (( tmp = strchr( tmp, '\n' )) != NULL ) - { - tmp++; - if ( !ldap_utf8isspace( tmp )) - { - break; - } - } - addlenstr( l, tmp ); - slapi_ch_free((void**)&tmpsave ); - break; + e = change; + addlenstr( l, attr_changetype ); + addlenstr( l, ": add\n" ); + tmp = slapi_entry2str( e, &len ); + tmpsave = tmp; + while (( tmp = strchr( tmp, '\n' )) != NULL ) + { + tmp++; + if ( !ldap_utf8isspace( tmp )) + { + break; + } + } + addlenstr( l, tmp ); + slapi_ch_free((void**)&tmpsave ); + break; case SLAPI_OPERATION_DELETE: - tmp = change; - addlenstr( l, attr_changetype ); - addlenstr( l, ": delete\n" ); - if (tmp && tmp[0]) { - addlenstr( l, attr_modifiersname ); - addlenstr( l, ": "); - addlenstr( l, tmp); - addlenstr( l, "\n"); - } - break; + tmp = change; + addlenstr( l, attr_changetype ); + addlenstr( l, ": delete\n" ); + if (tmp && tmp[0]) { + addlenstr( l, attr_modifiersname ); + addlenstr( l, ": "); + addlenstr( l, tmp); + addlenstr( l, "\n"); + } + break; case SLAPI_OPERATION_MODDN: - newrdn = change; - addlenstr( l, attr_changetype ); - addlenstr( l, ": modrdn\n" ); - addlenstr( l, attr_newrdn ); - addlenstr( l, ": " ); - addlenstr( l, newrdn ); - addlenstr( l, "\n" ); - addlenstr( l, attr_deleteoldrdn ); - addlenstr( l, ": " ); - addlenstr( l, flag ? "1" : "0" ); - addlenstr( l, "\n" ); + newrdn = change; + addlenstr( l, attr_changetype ); + addlenstr( l, ": modrdn\n" ); + addlenstr( l, attr_newrdn ); + addlenstr( l, ": " ); + addlenstr( l, newrdn ); + addlenstr( l, "\n" ); + addlenstr( l, attr_deleteoldrdn ); + addlenstr( l, ": " ); + addlenstr( l, flag ? "1" : "0" ); + addlenstr( l, "\n" ); } addlenstr( l, "\n" ); - slapd_log_audit_proc (l->ls_buf, l->ls_len); + switch (logtype) + { + case SLAPD_AUDIT_LOG: + slapd_log_audit_proc (l->ls_buf, l->ls_len); + break; + case SLAPD_AUDITFAIL_LOG: + slapd_log_auditfail_proc (l->ls_buf, l->ls_len); + break; + default: + /* Unsupported log type, we should make some noise */ + LDAPDebug1Arg(LDAP_DEBUG_ANY, "write_audit_log: Invalid log type specified. logtype %d\n", logtype); + break; + } lenstr_free( &l ); } @@ -230,11 +318,23 @@ write_audit_file( void auditlog_hide_unhashed_pw() { - hide_unhashed_pw = 1; + audit_hide_unhashed_pw = 1; } void auditlog_expose_unhashed_pw() { - hide_unhashed_pw = 0; + audit_hide_unhashed_pw = 0; +} + +void +auditfaillog_hide_unhashed_pw() +{ + auditfail_hide_unhashed_pw = 1; +} + +void +auditfaillog_expose_unhashed_pw() +{ + auditfail_hide_unhashed_pw = 0; } diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c index 5d70647c1..841393730 100644 --- a/ldap/servers/slapd/daemon.c +++ b/ldap/servers/slapd/daemon.c @@ -357,6 +357,7 @@ disk_mon_get_dirs(char ***list, int logs_critical){ disk_mon_add_dir(list, config->accesslog); disk_mon_add_dir(list, config->errorlog); disk_mon_add_dir(list, config->auditlog); + disk_mon_add_dir(list, config->auditfaillog); CFG_UNLOCK_READ(config); be = slapi_get_first_backend (&cookie); @@ -456,6 +457,7 @@ disk_monitoring_thread(void *nothing) int verbose_logging = 0; int using_accesslog = 0; int using_auditlog = 0; + int using_auditfaillog = 0; int logs_disabled = 0; int grace_period = 0; int first_pass = 1; @@ -488,6 +490,9 @@ disk_monitoring_thread(void *nothing) if(config_get_auditlog_logging_enabled()){ using_auditlog = 1; } + if(config_get_auditfaillog_logging_enabled()){ + using_auditfaillog = 1; + } if(config_get_accesslog_logging_enabled()){ using_accesslog = 1; } @@ -513,6 +518,9 @@ disk_monitoring_thread(void *nothing) if(using_auditlog){ config_set_auditlog_enabled(LOGGING_ON); } + if(using_auditfaillog){ + config_set_auditfaillog_enabled(LOGGING_ON); + } } else { LDAPDebug(LDAP_DEBUG_ANY, "Disk space is now within acceptable levels.\n",0,0,0); } @@ -557,6 +565,7 @@ disk_monitoring_thread(void *nothing) "disabling access and audit logging.\n", dirstr, (disk_space / 1024), 0); config_set_accesslog_enabled(LOGGING_OFF); config_set_auditlog_enabled(LOGGING_OFF); + config_set_auditfaillog_enabled(LOGGING_OFF); logs_disabled = 1; continue; } @@ -617,6 +626,9 @@ disk_monitoring_thread(void *nothing) if(logs_disabled && using_auditlog){ config_set_auditlog_enabled(LOGGING_ON); } + if(logs_disabled && using_auditfaillog){ + config_set_auditfaillog_enabled(LOGGING_ON); + } deleted_rotated_logs = 0; passed_threshold = 0; logs_disabled = 0; diff --git a/ldap/servers/slapd/delete.c b/ldap/servers/slapd/delete.c index b51aea5b2..d3c4d8ac9 100644 --- a/ldap/servers/slapd/delete.c +++ b/ldap/servers/slapd/delete.c @@ -351,6 +351,11 @@ static void op_shared_delete (Slapi_PBlock *pb) operation_out_of_disk_space(); goto free_and_return; } + /* If the disk is full we don't want to make it worse ... */ + if (operation_is_flag_set(operation,OP_FLAG_ACTION_LOG_AUDIT)) + { + write_auditfail_log_entry(pb); /* Record the operation in the audit log */ + } } } diff --git a/ldap/servers/slapd/libglobs.c b/ldap/servers/slapd/libglobs.c index fde388516..229672092 100644 --- a/ldap/servers/slapd/libglobs.c +++ b/ldap/servers/slapd/libglobs.c @@ -152,12 +152,15 @@ static int invalid_sasl_mech(char *str); #define INIT_ACCESSLOG_MODE "600" #define INIT_ERRORLOG_MODE "600" #define INIT_AUDITLOG_MODE "600" +#define INIT_AUDITFAILLOG_MODE "600" #define INIT_ACCESSLOG_ROTATIONUNIT "day" #define INIT_ERRORLOG_ROTATIONUNIT "week" #define INIT_AUDITLOG_ROTATIONUNIT "week" +#define INIT_AUDITFAILLOG_ROTATIONUNIT "week" #define INIT_ACCESSLOG_EXPTIMEUNIT "month" #define INIT_ERRORLOG_EXPTIMEUNIT "month" #define INIT_AUDITLOG_EXPTIMEUNIT "month" +#define INIT_AUDITFAILLOG_EXPTIMEUNIT "month" #define DEFAULT_DIRECTORY_MANAGER "cn=Directory Manager" #define DEFAULT_UIDNUM_TYPE "uidNumber" #define DEFAULT_GIDNUM_TYPE "gidNumber" @@ -171,11 +174,14 @@ static int invalid_sasl_mech(char *str); slapi_onoff_t init_accesslog_rotationsync_enabled; slapi_onoff_t init_errorlog_rotationsync_enabled; slapi_onoff_t init_auditlog_rotationsync_enabled; +slapi_onoff_t init_auditfaillog_rotationsync_enabled; slapi_onoff_t init_accesslog_logging_enabled; slapi_onoff_t init_accesslogbuffering; slapi_onoff_t init_errorlog_logging_enabled; slapi_onoff_t init_auditlog_logging_enabled; slapi_onoff_t init_auditlog_logging_hide_unhashed_pw; +slapi_onoff_t init_auditfaillog_logging_enabled; +slapi_onoff_t init_auditfaillog_logging_hide_unhashed_pw; slapi_onoff_t init_csnlogging; slapi_onoff_t init_pw_unlock; slapi_onoff_t init_pw_must_change; @@ -1105,8 +1111,73 @@ static struct config_get_and_set { NULL, 0, (void**)&global_slapdFrontendConfig.mempool_maxfreelist, CONFIG_INT, (ConfigGetFunc)config_get_mempool_maxfreelist, - DEFAULT_MEMPOOL_MAXFREELIST} + DEFAULT_MEMPOOL_MAXFREELIST}, #endif /* MEMPOOL_EXPERIMENTAL */ + /* Audit fail log configuration */ + {CONFIG_AUDITFAILLOG_MODE_ATTRIBUTE, NULL, + log_set_mode, SLAPD_AUDITFAIL_LOG, + (void**)&global_slapdFrontendConfig.auditfaillog_mode, + CONFIG_STRING, NULL, INIT_AUDITFAILLOG_MODE}, + {CONFIG_AUDITFAILLOG_LOGROTATIONSYNCENABLED_ATTRIBUTE, NULL, + log_set_rotationsync_enabled, SLAPD_AUDITFAIL_LOG, + (void**)&global_slapdFrontendConfig.auditfaillog_rotationsync_enabled, + CONFIG_ON_OFF, NULL, &init_auditfaillog_rotationsync_enabled}, + {CONFIG_AUDITFAILLOG_LOGROTATIONSYNCHOUR_ATTRIBUTE, NULL, + log_set_rotationsynchour, SLAPD_AUDITFAIL_LOG, + (void**)&global_slapdFrontendConfig.auditfaillog_rotationsynchour, + CONFIG_INT, NULL, DEFAULT_LOG_ROTATIONSYNCHOUR}, + {CONFIG_AUDITFAILLOG_LOGROTATIONSYNCMIN_ATTRIBUTE, NULL, + log_set_rotationsyncmin, SLAPD_AUDITFAIL_LOG, + (void**)&global_slapdFrontendConfig.auditfaillog_rotationsyncmin, + CONFIG_INT, NULL, DEFAULT_LOG_ROTATIONSYNCMIN}, + {CONFIG_AUDITFAILLOG_LOGROTATIONTIME_ATTRIBUTE, NULL, + log_set_rotationtime, SLAPD_AUDITFAIL_LOG, + (void**)&global_slapdFrontendConfig.auditfaillog_rotationtime, + CONFIG_INT, NULL, DEFAULT_LOG_ROTATIONTIME}, + {CONFIG_AUDITFAILLOG_MAXLOGDISKSPACE_ATTRIBUTE, NULL, + log_set_maxdiskspace, SLAPD_AUDITFAIL_LOG, + (void**)&global_slapdFrontendConfig.auditfaillog_maxdiskspace, + CONFIG_INT, NULL, DEFAULT_LOG_MAXDISKSPACE}, + {CONFIG_AUDITFAILLOG_MAXLOGSIZE_ATTRIBUTE, NULL, + log_set_logsize, SLAPD_AUDITFAIL_LOG, + (void**)&global_slapdFrontendConfig.auditfaillog_maxlogsize, + CONFIG_INT, NULL, DEFAULT_LOG_MAXLOGSIZE}, + {CONFIG_AUDITFAILLOG_LOGEXPIRATIONTIME_ATTRIBUTE, NULL, + log_set_expirationtime, SLAPD_AUDITFAIL_LOG, + (void**)&global_slapdFrontendConfig.auditfaillog_exptime, + CONFIG_INT, NULL, DEFAULT_LOG_EXPTIME}, + {CONFIG_AUDITFAILLOG_MAXNUMOFLOGSPERDIR_ATTRIBUTE, NULL, + log_set_numlogsperdir, SLAPD_AUDITFAIL_LOG, + (void**)&global_slapdFrontendConfig.auditfaillog_maxnumlogs, + CONFIG_INT, NULL, DEFAULT_LOG_MAXNUMLOGS}, + {CONFIG_AUDITFAILLOG_LIST_ATTRIBUTE, NULL, + NULL, 0, NULL, + CONFIG_CHARRAY, (ConfigGetFunc)config_get_auditfaillog_list, NULL}, + {CONFIG_AUDITFAILLOG_LOGGING_ENABLED_ATTRIBUTE, NULL, + log_set_logging, SLAPD_AUDITFAIL_LOG, + (void**)&global_slapdFrontendConfig.auditfaillog_logging_enabled, + CONFIG_ON_OFF, NULL, &init_auditfaillog_logging_enabled}, + {CONFIG_AUDITFAILLOG_LOGGING_HIDE_UNHASHED_PW, config_set_auditfaillog_unhashed_pw, + NULL, 0, + (void**)&global_slapdFrontendConfig.auditfaillog_logging_hide_unhashed_pw, + CONFIG_ON_OFF, NULL, &init_auditfaillog_logging_hide_unhashed_pw}, + {CONFIG_AUDITFAILLOG_LOGEXPIRATIONTIMEUNIT_ATTRIBUTE, NULL, + log_set_expirationtimeunit, SLAPD_AUDITFAIL_LOG, + (void**)&global_slapdFrontendConfig.auditfaillog_exptimeunit, + CONFIG_STRING_OR_UNKNOWN, NULL, INIT_AUDITFAILLOG_EXPTIMEUNIT}, + {CONFIG_AUDITFAILLOG_MINFREEDISKSPACE_ATTRIBUTE, NULL, + log_set_mindiskspace, SLAPD_AUDITFAIL_LOG, + (void**)&global_slapdFrontendConfig.auditfaillog_minfreespace, + CONFIG_INT, NULL, DEFAULT_LOG_MINFREESPACE}, + {CONFIG_AUDITFAILLOG_LOGROTATIONTIMEUNIT_ATTRIBUTE, NULL, + log_set_rotationtimeunit, SLAPD_AUDITFAIL_LOG, + (void**)&global_slapdFrontendConfig.auditfaillog_rotationunit, + CONFIG_STRING_OR_UNKNOWN, NULL, INIT_AUDITFAILLOG_ROTATIONUNIT}, + {CONFIG_AUDITFAILFILE_ATTRIBUTE, config_set_auditfaillog, + NULL, 0, + (void**)&global_slapdFrontendConfig.auditfaillog, + CONFIG_STRING_OR_EMPTY, NULL, NULL/* deletion is not allowed */} + /* End audit fail log configuration */ }; /* @@ -1513,6 +1584,23 @@ FrontendConfig_init () { init_auditlog_logging_hide_unhashed_pw = cfg->auditlog_logging_hide_unhashed_pw = LDAP_ON; + init_auditfaillog_logging_enabled = cfg->auditfaillog_logging_enabled = LDAP_OFF; + cfg->auditfaillog_mode = slapi_ch_strdup(INIT_AUDITFAILLOG_MODE); + cfg->auditfaillog_maxnumlogs = 1; + cfg->auditfaillog_maxlogsize = 100; + cfg->auditfaillog_rotationtime = 1; + cfg->auditfaillog_rotationunit = slapi_ch_strdup(INIT_AUDITFAILLOG_ROTATIONUNIT); + init_auditfaillog_rotationsync_enabled = + cfg->auditfaillog_rotationsync_enabled = LDAP_OFF; + cfg->auditfaillog_rotationsynchour = 0; + cfg->auditfaillog_rotationsyncmin = 0; + cfg->auditfaillog_maxdiskspace = 100; + cfg->auditfaillog_minfreespace = 5; + cfg->auditfaillog_exptime = 1; + cfg->auditfaillog_exptimeunit = slapi_ch_strdup(INIT_AUDITFAILLOG_EXPTIMEUNIT); + init_auditfaillog_logging_hide_unhashed_pw = + cfg->auditfaillog_logging_hide_unhashed_pw = LDAP_ON; + init_entryusn_global = cfg->entryusn_global = LDAP_OFF; cfg->entryusn_import_init = slapi_ch_strdup(ENTRYUSN_IMPORT_INIT); cfg->allowed_to_delete_attrs = slapi_ch_strdup("passwordadmindn nsslapd-listenhost nsslapd-securelistenhost nsslapd-defaultnamingcontext"); @@ -1630,17 +1718,33 @@ get_entry_point( int ep_name, caddr_t *ep_addr ) int config_set_auditlog_unhashed_pw(const char *attrname, char *value, char *errorbuf, int apply) { - slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); - int retVal = LDAP_SUCCESS; + slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); + int retVal = LDAP_SUCCESS; - retVal = config_set_onoff ( attrname, value, &(slapdFrontendConfig->auditlog_logging_hide_unhashed_pw), - errorbuf, apply); - if(strcasecmp(value,"on") == 0){ - auditlog_hide_unhashed_pw(); - } else { - auditlog_expose_unhashed_pw(); - } - return retVal; + retVal = config_set_onoff ( attrname, value, &(slapdFrontendConfig->auditlog_logging_hide_unhashed_pw), + errorbuf, apply); + if(strcasecmp(value,"on") == 0){ + auditlog_hide_unhashed_pw(); + } else { + auditlog_expose_unhashed_pw(); + } + return retVal; +} + +int +config_set_auditfaillog_unhashed_pw(const char *attrname, char *value, char *errorbuf, int apply) +{ + slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); + int retVal = LDAP_SUCCESS; + + retVal = config_set_onoff ( attrname, value, &(slapdFrontendConfig->auditfaillog_logging_hide_unhashed_pw), + errorbuf, apply); + if(strcasecmp(value,"on") == 0){ + auditfaillog_hide_unhashed_pw(); + } else { + auditfaillog_expose_unhashed_pw(); + } + return retVal; } /* @@ -4157,6 +4261,31 @@ config_set_errorlog( const char *attrname, char *value, char *errorbuf, int appl int config_set_auditlog( const char *attrname, char *value, char *errorbuf, int apply ) { + int retVal = LDAP_SUCCESS; + slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); + + if ( config_value_is_null( attrname, value, errorbuf, 1 )) { + return LDAP_OPERATIONS_ERROR; + } + + retVal = log_update_auditlogdir ( value, apply ); + + if ( retVal != LDAP_SUCCESS ) { + PR_snprintf ( errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, + "Cannot open auditlog directory \"%s\"", value ); + } + + if ( apply ) { + CFG_LOCK_WRITE(slapdFrontendConfig); + slapi_ch_free ( (void **) &(slapdFrontendConfig->auditlog) ); + slapdFrontendConfig->auditlog = slapi_ch_strdup ( value ); + CFG_UNLOCK_WRITE(slapdFrontendConfig); + } + return retVal; +} + +int +config_set_auditfaillog( const char *attrname, char *value, char *errorbuf, int apply ) { int retVal = LDAP_SUCCESS; slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); @@ -4164,17 +4293,17 @@ config_set_auditlog( const char *attrname, char *value, char *errorbuf, int appl return LDAP_OPERATIONS_ERROR; } - retVal = log_update_auditlogdir ( value, apply ); + retVal = log_update_auditfaillogdir ( value, apply ); if ( retVal != LDAP_SUCCESS ) { PR_snprintf ( errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, - "Cannot open auditlog directory \"%s\"", value ); + "Cannot open auditfaillog directory \"%s\"", value ); } if ( apply ) { CFG_LOCK_WRITE(slapdFrontendConfig); - slapi_ch_free ( (void **) &(slapdFrontendConfig->auditlog) ); - slapdFrontendConfig->auditlog = slapi_ch_strdup ( value ); + slapi_ch_free ( (void **) &(slapdFrontendConfig->auditfaillog) ); + slapdFrontendConfig->auditfaillog = slapi_ch_strdup ( value ); CFG_UNLOCK_WRITE(slapdFrontendConfig); } return retVal; @@ -5514,6 +5643,18 @@ config_get_auditlog( ){ return retVal; } +char * +config_get_auditfaillog( ){ + slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); + char *retVal; + + CFG_LOCK_READ(slapdFrontendConfig); + retVal = config_copy_strval(slapdFrontendConfig->auditfaillog); + CFG_UNLOCK_READ(slapdFrontendConfig); + + return retVal; +} + long config_get_pw_maxage() { slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); @@ -5588,6 +5729,16 @@ config_get_auditlog_logging_enabled(){ return retVal; } +int +config_get_auditfaillog_logging_enabled(){ + slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); + int retVal; + + retVal = (int)slapdFrontendConfig->auditfaillog_logging_enabled; + + return retVal; +} + int config_get_accesslog_logging_enabled(){ slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); @@ -6428,6 +6579,12 @@ config_get_auditlog_list() return log_get_loglist(SLAPD_AUDIT_LOG); } +char ** +config_get_auditfaillog_list() +{ + return log_get_loglist(SLAPD_AUDITFAIL_LOG); +} + int config_set_accesslogbuffering(const char *attrname, char *value, char *errorbuf, int apply) { @@ -7771,6 +7928,21 @@ config_set_auditlog_enabled(int value){ CFG_ONOFF_UNLOCK_WRITE(slapdFrontendConfig); } +void +config_set_auditfaillog_enabled(int value){ + slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); + char errorbuf[BUFSIZ]; + + CFG_ONOFF_LOCK_WRITE(slapdFrontendConfig); + slapdFrontendConfig->auditfaillog_logging_enabled = (int)value; + if(value){ + log_set_logging(CONFIG_AUDITFAILLOG_LOGGING_ENABLED_ATTRIBUTE, "on", SLAPD_AUDITFAIL_LOG, errorbuf, CONFIG_APPLY); + } else { + log_set_logging(CONFIG_AUDITFAILLOG_LOGGING_ENABLED_ATTRIBUTE, "off", SLAPD_AUDITFAIL_LOG, errorbuf, CONFIG_APPLY); + } + CFG_ONOFF_UNLOCK_WRITE(slapdFrontendConfig); +} + int config_set_maxsimplepaged_per_conn( const char *attrname, char *value, char *errorbuf, int apply ) { diff --git a/ldap/servers/slapd/log.c b/ldap/servers/slapd/log.c index 78701c4a5..ef6e57b6b 100644 --- a/ldap/servers/slapd/log.c +++ b/ldap/servers/slapd/log.c @@ -77,13 +77,16 @@ static int slapi_log_map[] = { static int log__open_accesslogfile(int logfile_type, int locked); static int log__open_errorlogfile(int logfile_type, int locked); static int log__open_auditlogfile(int logfile_type, int locked); +static int log__open_auditfaillogfile(int logfile_type, int locked); static int log__needrotation(LOGFD fp, int logtype); static int log__delete_access_logfile(); static int log__delete_error_logfile(int locked); static int log__delete_audit_logfile(); +static int log__delete_auditfail_logfile(); static int log__access_rotationinfof(char *pathname); static int log__error_rotationinfof(char *pathname); static int log__audit_rotationinfof(char *pathname); +static int log__auditfail_rotationinfof(char *pathname); static int log__extract_logheader (FILE *fp, long *f_ctime, PRInt64 *f_size); static int log__check_prevlogs (FILE *fp, char *filename); static PRInt64 log__getfilesize(LOGFD fp); @@ -273,6 +276,33 @@ void g_log_init(int log_enabled) if ((loginfo.log_audit_rwlock =slapi_new_rwlock())== NULL ) { exit (-1); } + + /* AUDIT LOG */ + loginfo.log_auditfail_state = 0; + loginfo.log_auditfail_mode = SLAPD_DEFAULT_FILE_MODE; + loginfo.log_auditfail_maxnumlogs = 1; + loginfo.log_auditfail_maxlogsize = -1; + loginfo.log_auditfail_rotationsync_enabled = 0; + loginfo.log_auditfail_rotationsynchour = -1; + loginfo.log_auditfail_rotationsyncmin = -1; + loginfo.log_auditfail_rotationsyncclock = -1; + loginfo.log_auditfail_rotationtime = 1; /* default: 1 */ + loginfo.log_auditfail_rotationunit = LOG_UNIT_WEEKS; /* default: week */ + loginfo.log_auditfail_rotationtime_secs = 604800; /* default: 1 week */ + loginfo.log_auditfail_maxdiskspace = -1; + loginfo.log_auditfail_minfreespace = -1; + loginfo.log_auditfail_exptime = -1; /* default: -1 */ + loginfo.log_auditfail_exptimeunit = LOG_UNIT_WEEKS; /* default: week */ + loginfo.log_auditfail_exptime_secs = -1; /* default: -1 */ + loginfo.log_auditfail_ctime = 0L; + loginfo.log_auditfail_file = NULL; + loginfo.log_auditfailinfo_file = NULL; + loginfo.log_numof_auditfail_logs = 1; + loginfo.log_auditfail_fdes = NULL; + loginfo.log_auditfail_logchain = NULL; + if ((loginfo.log_auditfail_rwlock =slapi_new_rwlock())== NULL ) { + exit (-1); + } } /****************************************************************************** @@ -342,6 +372,17 @@ log_set_logging(const char *attrname, char *value, int logtype, char *errorbuf, } LOG_AUDIT_UNLOCK_WRITE(); break; + case SLAPD_AUDITFAIL_LOG: + LOG_AUDITFAIL_LOCK_WRITE( ); + fe_cfg->auditfaillog_logging_enabled = v; + if (v) { + loginfo.log_auditfail_state |= LOGGING_ENABLED; + } + else { + loginfo.log_auditfail_state &= ~LOGGING_ENABLED; + } + LOG_AUDITFAIL_UNLOCK_WRITE(); + break; } return LDAP_SUCCESS; @@ -580,6 +621,84 @@ log_update_auditlogdir(char *pathname, int apply) return rv; } +/****************************************************************************** +* Tell me the audit fail log file name inc path +******************************************************************************/ +char * +g_get_auditfail_log() { + char *logfile = NULL; + + LOG_AUDITFAIL_LOCK_READ(); + if ( loginfo.log_auditfail_file) { + logfile = slapi_ch_strdup (loginfo.log_auditfail_file); + } + LOG_AUDITFAIL_UNLOCK_READ(); + + return logfile; +} +/****************************************************************************** +* Point to a new auditfail logdir +* +* Returns: +* LDAP_SUCCESS -- success +* LDAP_UNWILLING_TO_PERFORM -- when trying to open a invalid log file +* LDAP_LOCAL_ERRO -- some error +******************************************************************************/ +int +log_update_auditfaillogdir(char *pathname, int apply) +{ + int rv = LDAP_SUCCESS; + LOGFD fp; + + /* try to open the file, we may have a incorrect path */ + if (! LOG_OPEN_APPEND(fp, pathname, loginfo.log_auditfail_mode)) { + LDAPDebug(LDAP_DEBUG_ANY, "WARNING: can't open file %s. " + "errno %d (%s)\n", + pathname, errno, slapd_system_strerror(errno)); + /* stay with the current log file */ + return LDAP_UNWILLING_TO_PERFORM; + } + LOG_CLOSE(fp); + + /* skip the rest if we aren't doing this for real */ + if ( !apply ) { + return LDAP_SUCCESS; + } + + /* + ** The user has changed the audit log directory. That means we + ** need to start fresh. + */ + LOG_AUDITFAIL_LOCK_WRITE (); + if (loginfo.log_auditfail_fdes) { + LogFileInfo *logp, *d_logp; + LDAPDebug(LDAP_DEBUG_TRACE, + "LOGINFO:Closing the auditfail log file. " + "Moving to a new auditfail file (%s)\n", pathname,0,0); + + LOG_CLOSE(loginfo.log_auditfail_fdes); + loginfo.log_auditfail_fdes = 0; + loginfo.log_auditfail_ctime = 0; + logp = loginfo.log_auditfail_logchain; + while (logp) { + d_logp = logp; + logp = logp->l_next; + slapi_ch_free((void**)&d_logp); + } + loginfo.log_auditfail_logchain = NULL; + slapi_ch_free((void**)&loginfo.log_auditfail_file); + loginfo.log_auditfail_file = NULL; + loginfo.log_numof_auditfail_logs = 1; + } + + /* Now open the new auditlog */ + if ( auditfail_log_openf (pathname, 1 /* locked */)) { + rv = LDAP_LOCAL_ERROR; /* error: Unable to use the new dir */ + } + LOG_AUDITFAIL_UNLOCK_WRITE(); + return rv; +} + int log_set_mode (const char *attrname, char *value, int logtype, char *errorbuf, int apply) { @@ -667,7 +786,8 @@ log_set_numlogsperdir(const char *attrname, char *numlogs_str, int logtype, char if ( logtype != SLAPD_ACCESS_LOG && logtype != SLAPD_ERROR_LOG && - logtype != SLAPD_AUDIT_LOG ) { + logtype != SLAPD_AUDIT_LOG && + logtype != SLAPD_AUDITFAIL_LOG ) { rv = LDAP_OPERATIONS_ERROR; PR_snprintf( returntext, SLAPI_DSE_RETURNTEXT_SIZE, "%s: invalid log type %d", attrname, logtype ); @@ -698,6 +818,12 @@ log_set_numlogsperdir(const char *attrname, char *numlogs_str, int logtype, char fe_cfg->auditlog_maxnumlogs = numlogs; LOG_AUDIT_UNLOCK_WRITE(); break; + case SLAPD_AUDITFAIL_LOG: + LOG_AUDITFAIL_LOCK_WRITE( ); + loginfo.log_auditfail_maxnumlogs = numlogs; + fe_cfg->auditfaillog_maxnumlogs = numlogs; + LOG_AUDITFAIL_UNLOCK_WRITE(); + break; default: rv = LDAP_OPERATIONS_ERROR; LDAPDebug( LDAP_DEBUG_ANY, @@ -749,6 +875,10 @@ log_set_logsize(const char *attrname, char *logsize_str, int logtype, char *retu LOG_AUDIT_LOCK_WRITE( ); mdiskspace = loginfo.log_audit_maxdiskspace; break; + case SLAPD_AUDITFAIL_LOG: + LOG_AUDITFAIL_LOCK_WRITE( ); + mdiskspace = loginfo.log_auditfail_maxdiskspace; + break; default: PR_snprintf( returntext, SLAPI_DSE_RETURNTEXT_SIZE, "%s: invalid logtype %d", attrname, logtype ); @@ -780,6 +910,13 @@ log_set_logsize(const char *attrname, char *logsize_str, int logtype, char *retu } LOG_AUDIT_UNLOCK_WRITE(); break; + case SLAPD_AUDITFAIL_LOG: + if (!rv && apply) { + loginfo.log_auditfail_maxlogsize = max_logsize; + fe_cfg->auditfaillog_maxlogsize = logsize; + } + LOG_AUDITFAIL_UNLOCK_WRITE(); + break; default: rv = 1; } @@ -868,6 +1005,12 @@ log_set_rotationsync_enabled(const char *attrname, char *value, int logtype, cha loginfo.log_audit_rotationsync_enabled = v; LOG_AUDIT_UNLOCK_WRITE(); break; + case SLAPD_AUDITFAIL_LOG: + LOG_AUDITFAIL_LOCK_WRITE( ); + fe_cfg->auditfaillog_rotationsync_enabled = v; + loginfo.log_auditfail_rotationsync_enabled = v; + LOG_AUDITFAIL_UNLOCK_WRITE(); + break; } return LDAP_SUCCESS; } @@ -881,7 +1024,8 @@ log_set_rotationsynchour(const char *attrname, char *rhour_str, int logtype, cha if ( logtype != SLAPD_ACCESS_LOG && logtype != SLAPD_ERROR_LOG && - logtype != SLAPD_AUDIT_LOG ) { + logtype != SLAPD_AUDIT_LOG && + logtype != SLAPD_AUDITFAIL_LOG ) { PR_snprintf( returntext, SLAPI_DSE_RETURNTEXT_SIZE, "%s: invalid log type: %d", attrname, logtype ); return LDAP_OPERATIONS_ERROR; @@ -919,6 +1063,13 @@ log_set_rotationsynchour(const char *attrname, char *rhour_str, int logtype, cha fe_cfg->auditlog_rotationsynchour = rhour; LOG_AUDIT_UNLOCK_WRITE(); break; + case SLAPD_AUDITFAIL_LOG: + LOG_AUDITFAIL_LOCK_WRITE( ); + loginfo.log_auditfail_rotationsynchour = rhour; + loginfo.log_auditfail_rotationsyncclock = log_get_rotationsyncclock( rhour, loginfo.log_auditfail_rotationsyncmin ); + fe_cfg->auditfaillog_rotationsynchour = rhour; + LOG_AUDITFAIL_UNLOCK_WRITE(); + break; } return rv; @@ -933,7 +1084,8 @@ log_set_rotationsyncmin(const char *attrname, char *rmin_str, int logtype, char if ( logtype != SLAPD_ACCESS_LOG && logtype != SLAPD_ERROR_LOG && - logtype != SLAPD_AUDIT_LOG ) { + logtype != SLAPD_AUDIT_LOG && + logtype != SLAPD_AUDITFAIL_LOG ) { PR_snprintf( returntext, SLAPI_DSE_RETURNTEXT_SIZE, "%s: invalid log type: %d", attrname, logtype ); return LDAP_OPERATIONS_ERROR; @@ -971,6 +1123,13 @@ log_set_rotationsyncmin(const char *attrname, char *rmin_str, int logtype, char loginfo.log_audit_rotationsyncclock = log_get_rotationsyncclock( loginfo.log_audit_rotationsynchour, rmin ); LOG_AUDIT_UNLOCK_WRITE(); break; + case SLAPD_AUDITFAIL_LOG: + LOG_AUDITFAIL_LOCK_WRITE( ); + loginfo.log_auditfail_rotationsyncmin = rmin; + fe_cfg->auditfaillog_rotationsyncmin = rmin; + loginfo.log_auditfail_rotationsyncclock = log_get_rotationsyncclock( loginfo.log_auditfail_rotationsynchour, rmin ); + LOG_AUDITFAIL_UNLOCK_WRITE(); + break; } return rv; @@ -993,7 +1152,8 @@ log_set_rotationtime(const char *attrname, char *rtime_str, int logtype, char *r if ( logtype != SLAPD_ACCESS_LOG && logtype != SLAPD_ERROR_LOG && - logtype != SLAPD_AUDIT_LOG ) { + logtype != SLAPD_AUDIT_LOG && + logtype != SLAPD_AUDITFAIL_LOG ) { PR_snprintf( returntext, SLAPI_DSE_RETURNTEXT_SIZE, "%s: invalid log type: %d", attrname, logtype ); return LDAP_OPERATIONS_ERROR; @@ -1026,6 +1186,11 @@ log_set_rotationtime(const char *attrname, char *rtime_str, int logtype, char *r loginfo.log_audit_rotationtime = rtime; runit = loginfo.log_audit_rotationunit; break; + case SLAPD_AUDITFAIL_LOG: + LOG_AUDITFAIL_LOCK_WRITE( ); + loginfo.log_auditfail_rotationtime = rtime; + runit = loginfo.log_auditfail_rotationunit; + break; } /* find out the rotation unit we have se right now */ @@ -1064,6 +1229,11 @@ log_set_rotationtime(const char *attrname, char *rtime_str, int logtype, char *r loginfo.log_audit_rotationtime_secs = value; LOG_AUDIT_UNLOCK_WRITE(); break; + case SLAPD_AUDITFAIL_LOG: + fe_cfg->auditfaillog_rotationtime = rtime; + loginfo.log_auditfail_rotationtime_secs = value; + LOG_AUDITFAIL_UNLOCK_WRITE(); + break; } return rv; } @@ -1082,92 +1252,104 @@ int log_set_rotationtimeunit(const char *attrname, char *runit, int logtype, cha slapdFrontendConfig_t *fe_cfg = getFrontendConfig(); if ( logtype != SLAPD_ACCESS_LOG && - logtype != SLAPD_ERROR_LOG && - logtype != SLAPD_AUDIT_LOG ) { - PR_snprintf ( errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, - "%s: invalid log type: %d", attrname, logtype ); - return LDAP_OPERATIONS_ERROR; + logtype != SLAPD_ERROR_LOG && + logtype != SLAPD_AUDIT_LOG && + logtype != SLAPD_AUDITFAIL_LOG ) { + PR_snprintf ( errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, + "%s: invalid log type: %d", attrname, logtype ); + return LDAP_OPERATIONS_ERROR; } if ( (strcasecmp(runit, "month") == 0) || - (strcasecmp(runit, "week") == 0) || - (strcasecmp(runit, "day") == 0) || - (strcasecmp(runit, "hour") == 0) || - (strcasecmp(runit, "minute") == 0)) { - /* all good values */ + (strcasecmp(runit, "week") == 0) || + (strcasecmp(runit, "day") == 0) || + (strcasecmp(runit, "hour") == 0) || + (strcasecmp(runit, "minute") == 0)) { + /* all good values */ } else { - PR_snprintf ( errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, - "%s: unknown unit \"%s\"", attrname, runit ); - rv = LDAP_OPERATIONS_ERROR; + PR_snprintf ( errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, + "%s: unknown unit \"%s\"", attrname, runit ); + rv = LDAP_OPERATIONS_ERROR; } /* return if we aren't doing this for real */ if ( !apply ) { - return rv; + return rv; } switch (logtype) { case SLAPD_ACCESS_LOG: - LOG_ACCESS_LOCK_WRITE( ); - origvalue = loginfo.log_access_rotationtime; - break; + LOG_ACCESS_LOCK_WRITE( ); + origvalue = loginfo.log_access_rotationtime; + break; case SLAPD_ERROR_LOG: - LOG_ERROR_LOCK_WRITE( ); - origvalue = loginfo.log_error_rotationtime; - break; + LOG_ERROR_LOCK_WRITE( ); + origvalue = loginfo.log_error_rotationtime; + break; case SLAPD_AUDIT_LOG: - LOG_AUDIT_LOCK_WRITE( ); - origvalue = loginfo.log_audit_rotationtime; - break; + LOG_AUDIT_LOCK_WRITE( ); + origvalue = loginfo.log_audit_rotationtime; + break; + case SLAPD_AUDITFAIL_LOG: + LOG_AUDITFAIL_LOCK_WRITE( ); + origvalue = loginfo.log_auditfail_rotationtime; + break; } if (strcasecmp(runit, "month") == 0) { - runitType = LOG_UNIT_MONTHS; - value = origvalue * 31 * 24 * 60 * 60; + runitType = LOG_UNIT_MONTHS; + value = origvalue * 31 * 24 * 60 * 60; } else if (strcasecmp(runit, "week") == 0) { - runitType = LOG_UNIT_WEEKS; - value = origvalue * 7 * 24 * 60 * 60; + runitType = LOG_UNIT_WEEKS; + value = origvalue * 7 * 24 * 60 * 60; } else if (strcasecmp(runit, "day") == 0) { - runitType = LOG_UNIT_DAYS; - value = origvalue * 24 * 60 * 60; + runitType = LOG_UNIT_DAYS; + value = origvalue * 24 * 60 * 60; } else if (strcasecmp(runit, "hour") == 0) { - runitType = LOG_UNIT_HOURS; - value = origvalue * 3600; + runitType = LOG_UNIT_HOURS; + value = origvalue * 3600; } else if (strcasecmp(runit, "minute") == 0) { - runitType = LOG_UNIT_MINS; - value = origvalue * 60; + runitType = LOG_UNIT_MINS; + value = origvalue * 60; } else { - /* In this case we don't rotate */ - runitType = LOG_UNIT_UNKNOWN; - value = -1; + /* In this case we don't rotate */ + runitType = LOG_UNIT_UNKNOWN; + value = -1; } if (origvalue > 0 && value < 0) { - value = PR_INT32_MAX; /* overflown */ + value = PR_INT32_MAX; /* overflown */ } switch (logtype) { case SLAPD_ACCESS_LOG: - loginfo.log_access_rotationtime_secs = value; - loginfo.log_access_rotationunit = runitType; - slapi_ch_free ( (void **) &fe_cfg->accesslog_rotationunit); - fe_cfg->accesslog_rotationunit = slapi_ch_strdup ( runit ); - LOG_ACCESS_UNLOCK_WRITE(); - break; + loginfo.log_access_rotationtime_secs = value; + loginfo.log_access_rotationunit = runitType; + slapi_ch_free ( (void **) &fe_cfg->accesslog_rotationunit); + fe_cfg->accesslog_rotationunit = slapi_ch_strdup ( runit ); + LOG_ACCESS_UNLOCK_WRITE(); + break; case SLAPD_ERROR_LOG: - loginfo.log_error_rotationtime_secs = value; - loginfo.log_error_rotationunit = runitType; - slapi_ch_free ( (void **) &fe_cfg->errorlog_rotationunit) ; - fe_cfg->errorlog_rotationunit = slapi_ch_strdup ( runit ); - LOG_ERROR_UNLOCK_WRITE(); - break; + loginfo.log_error_rotationtime_secs = value; + loginfo.log_error_rotationunit = runitType; + slapi_ch_free ( (void **) &fe_cfg->errorlog_rotationunit) ; + fe_cfg->errorlog_rotationunit = slapi_ch_strdup ( runit ); + LOG_ERROR_UNLOCK_WRITE(); + break; case SLAPD_AUDIT_LOG: - loginfo.log_audit_rotationtime_secs = value; - loginfo.log_audit_rotationunit = runitType; - slapi_ch_free ( (void **) &fe_cfg->auditlog_rotationunit); - fe_cfg->auditlog_rotationunit = slapi_ch_strdup ( runit ); - LOG_AUDIT_UNLOCK_WRITE(); - break; + loginfo.log_audit_rotationtime_secs = value; + loginfo.log_audit_rotationunit = runitType; + slapi_ch_free ( (void **) &fe_cfg->auditlog_rotationunit); + fe_cfg->auditlog_rotationunit = slapi_ch_strdup ( runit ); + LOG_AUDIT_UNLOCK_WRITE(); + break; + case SLAPD_AUDITFAIL_LOG: + loginfo.log_auditfail_rotationtime_secs = value; + loginfo.log_auditfail_rotationunit = runitType; + slapi_ch_free ( (void **) &fe_cfg->auditfaillog_rotationunit); + fe_cfg->auditfaillog_rotationunit = slapi_ch_strdup ( runit ); + LOG_AUDITFAIL_UNLOCK_WRITE(); + break; } return rv; } @@ -1183,75 +1365,87 @@ int log_set_rotationtimeunit(const char *attrname, char *runit, int logtype, cha int log_set_maxdiskspace(const char *attrname, char *maxdiskspace_str, int logtype, char *errorbuf, int apply) { - int rv = 0; - PRInt64 mlogsize = 0; /* in bytes */ - PRInt64 maxdiskspace; /* in bytes */ - int s_maxdiskspace; /* in megabytes */ + int rv = 0; + PRInt64 mlogsize = 0; /* in bytes */ + PRInt64 maxdiskspace; /* in bytes */ + int s_maxdiskspace; /* in megabytes */ - slapdFrontendConfig_t *fe_cfg = getFrontendConfig(); - - if ( logtype != SLAPD_ACCESS_LOG && - logtype != SLAPD_ERROR_LOG && - logtype != SLAPD_AUDIT_LOG ) { - PR_snprintf( errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, - "%s: invalid log type: %d", attrname, logtype ); - return LDAP_OPERATIONS_ERROR; - } - - if (!apply || !maxdiskspace_str || !*maxdiskspace_str) - return rv; - - s_maxdiskspace = atoi(maxdiskspace_str); + slapdFrontendConfig_t *fe_cfg = getFrontendConfig(); + + if ( logtype != SLAPD_ACCESS_LOG && + logtype != SLAPD_ERROR_LOG && + logtype != SLAPD_AUDIT_LOG && + logtype != SLAPD_AUDITFAIL_LOG ) { + PR_snprintf( errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, + "%s: invalid log type: %d", attrname, logtype ); + return LDAP_OPERATIONS_ERROR; + } - /* Disk space are in MB but store in bytes */ - switch (logtype) { - case SLAPD_ACCESS_LOG: - LOG_ACCESS_LOCK_WRITE( ); - mlogsize = loginfo.log_access_maxlogsize; - break; - case SLAPD_ERROR_LOG: - LOG_ERROR_LOCK_WRITE( ); - mlogsize = loginfo.log_error_maxlogsize; - break; - case SLAPD_AUDIT_LOG: - LOG_AUDIT_LOCK_WRITE( ); - mlogsize = loginfo.log_audit_maxlogsize; - break; - } - maxdiskspace = (PRInt64)s_maxdiskspace * LOG_MB_IN_BYTES; - if (maxdiskspace < 0) { - maxdiskspace = -1; - } else if (maxdiskspace < mlogsize) { - rv = LDAP_OPERATIONS_ERROR; - PR_snprintf( errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, - "%s: \"%d (MB)\" is less than max log size \"%d (MB)\"", - attrname, s_maxdiskspace, (int)(mlogsize/LOG_MB_IN_BYTES) ); - } + if (!apply || !maxdiskspace_str || !*maxdiskspace_str) + return rv; + + s_maxdiskspace = atoi(maxdiskspace_str); + + /* Disk space are in MB but store in bytes */ + switch (logtype) { + case SLAPD_ACCESS_LOG: + LOG_ACCESS_LOCK_WRITE( ); + mlogsize = loginfo.log_access_maxlogsize; + break; + case SLAPD_ERROR_LOG: + LOG_ERROR_LOCK_WRITE( ); + mlogsize = loginfo.log_error_maxlogsize; + break; + case SLAPD_AUDIT_LOG: + LOG_AUDIT_LOCK_WRITE( ); + mlogsize = loginfo.log_audit_maxlogsize; + break; + case SLAPD_AUDITFAIL_LOG: + LOG_AUDITFAIL_LOCK_WRITE( ); + mlogsize = loginfo.log_auditfail_maxlogsize; + break; + } + maxdiskspace = (PRInt64)s_maxdiskspace * LOG_MB_IN_BYTES; + if (maxdiskspace < 0) { + maxdiskspace = -1; + } else if (maxdiskspace < mlogsize) { + rv = LDAP_OPERATIONS_ERROR; + PR_snprintf( errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, + "%s: \"%d (MB)\" is less than max log size \"%d (MB)\"", + attrname, s_maxdiskspace, (int)(mlogsize/LOG_MB_IN_BYTES) ); + } - switch (logtype) { - case SLAPD_ACCESS_LOG: - if (rv== 0 && apply) { - loginfo.log_access_maxdiskspace = maxdiskspace; /* in bytes */ - fe_cfg->accesslog_maxdiskspace = s_maxdiskspace; /* in megabytes */ - } - LOG_ACCESS_UNLOCK_WRITE(); - break; - case SLAPD_ERROR_LOG: - if (rv== 0 && apply) { - loginfo.log_error_maxdiskspace = maxdiskspace; /* in bytes */ - fe_cfg->errorlog_maxdiskspace = s_maxdiskspace; /* in megabytes */ - } - LOG_ERROR_UNLOCK_WRITE(); - break; - case SLAPD_AUDIT_LOG: - if (rv== 0 && apply) { - loginfo.log_audit_maxdiskspace = maxdiskspace; /* in bytes */ - fe_cfg->auditlog_maxdiskspace = s_maxdiskspace; /* in megabytes */ - } - LOG_AUDIT_UNLOCK_WRITE(); - break; - } - return rv; + switch (logtype) { + case SLAPD_ACCESS_LOG: + if (rv== 0 && apply) { + loginfo.log_access_maxdiskspace = maxdiskspace; /* in bytes */ + fe_cfg->accesslog_maxdiskspace = s_maxdiskspace; /* in megabytes */ + } + LOG_ACCESS_UNLOCK_WRITE(); + break; + case SLAPD_ERROR_LOG: + if (rv== 0 && apply) { + loginfo.log_error_maxdiskspace = maxdiskspace; /* in bytes */ + fe_cfg->errorlog_maxdiskspace = s_maxdiskspace; /* in megabytes */ + } + LOG_ERROR_UNLOCK_WRITE(); + break; + case SLAPD_AUDIT_LOG: + if (rv== 0 && apply) { + loginfo.log_audit_maxdiskspace = maxdiskspace; /* in bytes */ + fe_cfg->auditlog_maxdiskspace = s_maxdiskspace; /* in megabytes */ + } + LOG_AUDIT_UNLOCK_WRITE(); + break; + case SLAPD_AUDITFAIL_LOG: + if (rv== 0 && apply) { + loginfo.log_auditfail_maxdiskspace = maxdiskspace; /* in bytes */ + fe_cfg->auditfaillog_maxdiskspace = s_maxdiskspace; /* in megabytes */ + } + LOG_AUDITFAIL_UNLOCK_WRITE(); + break; + } + return rv; } /****************************************************************************** @@ -1271,7 +1465,8 @@ log_set_mindiskspace(const char *attrname, char *minfreespace_str, int logtype, if ( logtype != SLAPD_ACCESS_LOG && logtype != SLAPD_ERROR_LOG && - logtype != SLAPD_AUDIT_LOG ) { + logtype != SLAPD_AUDIT_LOG && + logtype != SLAPD_AUDITFAIL_LOG ) { PR_snprintf( errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, "%s: invalid log type: %d", attrname, logtype ); rv = LDAP_OPERATIONS_ERROR; @@ -1306,7 +1501,14 @@ log_set_mindiskspace(const char *attrname, char *minfreespace_str, int logtype, fe_cfg->auditlog_minfreespace = minfreespace; LOG_AUDIT_UNLOCK_WRITE(); break; + case SLAPD_AUDITFAIL_LOG: + LOG_AUDITFAIL_LOCK_WRITE( ); + loginfo.log_auditfail_minfreespace = minfreespaceB; + fe_cfg->auditfaillog_minfreespace = minfreespace; + LOG_AUDITFAIL_UNLOCK_WRITE(); + break; default: + /* This is unreachable ... */ rv = 1; } } @@ -1329,7 +1531,8 @@ log_set_expirationtime(const char *attrname, char *exptime_str, int logtype, cha if ( logtype != SLAPD_ACCESS_LOG && logtype != SLAPD_ERROR_LOG && - logtype != SLAPD_AUDIT_LOG ) { + logtype != SLAPD_AUDIT_LOG && + logtype != SLAPD_AUDITFAIL_LOG ) { PR_snprintf( errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, "%s: invalid log type: %d", attrname, logtype ); rv = LDAP_OPERATIONS_ERROR; @@ -1361,7 +1564,14 @@ log_set_expirationtime(const char *attrname, char *exptime_str, int logtype, cha eunit = loginfo.log_audit_exptimeunit; rsec = loginfo.log_audit_rotationtime_secs; break; + case SLAPD_AUDITFAIL_LOG: + LOG_AUDITFAIL_LOCK_WRITE( ); + loginfo.log_auditfail_exptime = exptime; + eunit = loginfo.log_auditfail_exptimeunit; + rsec = loginfo.log_auditfail_rotationtime_secs; + break; default: + /* This is unreachable */ rv = 1; eunit = -1; } @@ -1400,6 +1610,11 @@ log_set_expirationtime(const char *attrname, char *exptime_str, int logtype, cha fe_cfg->auditlog_exptime = exptime; LOG_AUDIT_UNLOCK_WRITE(); break; + case SLAPD_AUDITFAIL_LOG: + loginfo.log_auditfail_exptime_secs = value; + fe_cfg->auditfaillog_exptime = exptime; + LOG_AUDITFAIL_UNLOCK_WRITE(); + break; default: rv = 1; } @@ -1423,7 +1638,8 @@ log_set_expirationtimeunit(const char *attrname, char *expunit, int logtype, cha if ( logtype != SLAPD_ACCESS_LOG && logtype != SLAPD_ERROR_LOG && - logtype != SLAPD_AUDIT_LOG ) { + logtype != SLAPD_AUDIT_LOG && + logtype != SLAPD_AUDITFAIL_LOG ) { PR_snprintf( errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, "%s: invalid log type: %d", attrname, logtype ); return LDAP_OPERATIONS_ERROR; @@ -1469,6 +1685,12 @@ log_set_expirationtimeunit(const char *attrname, char *expunit, int logtype, cha rsecs = loginfo.log_audit_rotationtime_secs; exptimeunitp = &(loginfo.log_audit_exptimeunit); break; + case SLAPD_AUDITFAIL_LOG: + LOG_AUDITFAIL_LOCK_WRITE( ); + exptime = loginfo.log_auditfail_exptime; + rsecs = loginfo.log_auditfail_rotationtime_secs; + exptimeunitp = &(loginfo.log_auditfail_exptimeunit); + break; } value = -1; @@ -1518,6 +1740,12 @@ log_set_expirationtimeunit(const char *attrname, char *expunit, int logtype, cha fe_cfg->auditlog_exptimeunit = slapi_ch_strdup ( expunit ); LOG_AUDIT_UNLOCK_WRITE(); break; + case SLAPD_AUDITFAIL_LOG: + loginfo.log_auditfail_exptime_secs = value; + slapi_ch_free ( (void **) &(fe_cfg->auditfaillog_exptimeunit) ); + fe_cfg->auditfaillog_exptimeunit = slapi_ch_strdup ( expunit ); + LOG_AUDITFAIL_UNLOCK_WRITE(); + break; } return rv; @@ -1630,6 +1858,45 @@ audit_log_openf( char *pathname, int locked) return rv; } + +/****************************************************************************** +* init function for the auditfail log +* Returns: +* 0 - success +* 1 - fail +******************************************************************************/ +int +auditfail_log_openf( char *pathname, int locked) +{ + + int rv=0; + int logfile_type = 0; + + if (!locked) LOG_AUDITFAIL_LOCK_WRITE( ); + + /* store the path name */ + slapi_ch_free_string(&loginfo.log_auditfail_file); + loginfo.log_auditfail_file = slapi_ch_strdup ( pathname ); + + /* store the rotation info file path name */ + slapi_ch_free_string(&loginfo.log_auditfailinfo_file); + loginfo.log_auditfailinfo_file = slapi_ch_smprintf("%s.rotationinfo", pathname); + + /* + ** Check if we have a log file already. If we have it then + ** we need to parse the header info and update the loginfo + ** struct. + */ + logfile_type = log__auditfail_rotationinfof(loginfo.log_auditfailinfo_file); + + if (log__open_auditfaillogfile(logfile_type, 1/* got lock*/) != LOG_SUCCESS) { + rv = 1; + } + + if (!locked) LOG_AUDITFAIL_UNLOCK_WRITE(); + + return rv; +} /****************************************************************************** * write in the audit log ******************************************************************************/ @@ -1664,6 +1931,39 @@ slapd_log_audit_proc ( return 0; } /****************************************************************************** +* write in the audit fail log +******************************************************************************/ +int +slapd_log_auditfail_proc ( + char *buffer, + int buf_len) +{ + if ( (loginfo.log_auditfail_state & LOGGING_ENABLED) && (loginfo.log_auditfail_file != NULL) ){ + LOG_AUDITFAIL_LOCK_WRITE( ); + if (log__needrotation(loginfo.log_auditfail_fdes, + SLAPD_AUDITFAIL_LOG) == LOG_ROTATE) { + if (log__open_auditfaillogfile(LOGFILE_NEW, 1) != LOG_SUCCESS) { + LDAPDebug(LDAP_DEBUG_ANY, + "LOGINFO: Unable to open auditfail file:%s\n", + loginfo.log_auditfail_file,0,0); + LOG_AUDITFAIL_UNLOCK_WRITE(); + return 0; + } + while (loginfo.log_auditfail_rotationsyncclock <= loginfo.log_auditfail_ctime) { + loginfo.log_auditfail_rotationsyncclock += PR_ABS(loginfo.log_auditfail_rotationtime_secs); + } + } + if (loginfo.log_auditfail_state & LOGGING_NEED_TITLE) { + log_write_title( loginfo.log_auditfail_fdes); + loginfo.log_auditfail_state &= ~LOGGING_NEED_TITLE; + } + LOG_WRITE_NOW_NO_ERR(loginfo.log_auditfail_fdes, buffer, buf_len, 0); + LOG_AUDITFAIL_UNLOCK_WRITE(); + return 0; + } + return 0; +} +/****************************************************************************** * write in the error log ******************************************************************************/ int @@ -2233,6 +2533,15 @@ log__needrotation(LOGFD fp, int logtype) rotationtime_secs = loginfo.log_audit_rotationtime_secs; log_createtime = loginfo.log_audit_ctime; break; + case SLAPD_AUDITFAIL_LOG: + nlogs = loginfo.log_auditfail_maxnumlogs; + maxlogsize = loginfo.log_auditfail_maxlogsize; + sync_enabled = loginfo.log_auditfail_rotationsync_enabled; + syncclock = loginfo.log_auditfail_rotationsyncclock; + timeunit = loginfo.log_auditfail_rotationunit; + rotationtime_secs = loginfo.log_auditfail_rotationtime_secs; + log_createtime = loginfo.log_auditfail_ctime; + break; default: /* error */ maxlogsize = -1; nlogs = 1; @@ -3397,6 +3706,173 @@ delete_logfile: return 1; } +/****************************************************************************** +* log__delete_auditfail_logfile +* +* Do we need to delete a logfile. Find out if we need to delete the log +* file based on expiration time, max diskspace, and minfreespace. +* Delete the file if we need to. +* +* Assumption: A WRITE lock has been acquired for the auditfail log +******************************************************************************/ + +static int +log__delete_auditfail_logfile() +{ + struct logfileinfo *logp = NULL; + struct logfileinfo *delete_logp = NULL; + struct logfileinfo *p_delete_logp = NULL; + struct logfileinfo *prev_logp = NULL; + PRInt64 total_size=0; + time_t cur_time; + PRInt64 f_size; + int numoflogs=loginfo.log_numof_auditfail_logs; + int rv = 0; + char *logstr; + char buffer[BUFSIZ]; + char tbuf[TBUFSIZE]; + + /* If we have only one log, then will delete this one */ + if (loginfo.log_auditfail_maxnumlogs == 1) { + LOG_CLOSE(loginfo.log_auditfail_fdes); + loginfo.log_auditfail_fdes = NULL; + PR_snprintf(buffer, sizeof(buffer), "%s", loginfo.log_auditfail_file); + if (PR_Delete(buffer) != PR_SUCCESS) { + PRErrorCode prerr = PR_GetError(); + if (PR_FILE_NOT_FOUND_ERROR == prerr) { + slapi_log_error(SLAPI_LOG_TRACE, "LOGINFO", "File %s already removed\n", loginfo.log_auditfail_file); + } else { + slapi_log_error(SLAPI_LOG_TRACE, "LOGINFO", "Unable to remove file:%s error %d (%s)\n", + loginfo.log_auditfail_file, prerr, slapd_pr_strerror(prerr)); + } + } + + /* Delete the rotation file also. */ + PR_snprintf(buffer, sizeof(buffer), "%s.rotationinfo", loginfo.log_auditfail_file); + if (PR_Delete(buffer) != PR_SUCCESS) { + PRErrorCode prerr = PR_GetError(); + if (PR_FILE_NOT_FOUND_ERROR == prerr) { + slapi_log_error(SLAPI_LOG_TRACE, "LOGINFO", "File %s already removed\n", loginfo.log_auditfail_file); + } else { + slapi_log_error(SLAPI_LOG_TRACE, "LOGINFO", "Unable to remove file:%s.rotatoininfo error %d (%s)\n", + loginfo.log_auditfail_file, prerr, slapd_pr_strerror(prerr)); + } + } + return 0; + } + + /* If we have already the maximum number of log files, we + ** have to delete one any how. + */ + if (++numoflogs > loginfo.log_auditfail_maxnumlogs) { + logstr = "Delete Error Log File: Exceeded max number of logs allowed"; + goto delete_logfile; + } + + /* Now check based on the maxdiskspace */ + if (loginfo.log_auditfail_maxdiskspace > 0) { + logp = loginfo.log_auditfail_logchain; + while (logp) { + total_size += logp->l_size; + logp = logp->l_next; + } + if ((f_size = log__getfilesize(loginfo.log_auditfail_fdes)) == -1) { + /* then just assume the max size */ + total_size += loginfo.log_auditfail_maxlogsize; + } else { + total_size += f_size; + } + + /* If we have exceeded the max disk space or we have less than the + ** minimum, then we have to delete a file. + */ + if (total_size >= loginfo.log_auditfail_maxdiskspace) { + logstr = "exceeded maximum log disk space"; + goto delete_logfile; + } + } + + /* Now check based on the free space */ + if ( loginfo.log_auditfail_minfreespace > 0) { + rv = log__enough_freespace(loginfo.log_auditfail_file); + if ( rv == 0) { + /* Not enough free space */ + logstr = "Not enough free disk space"; + goto delete_logfile; + } + } + + /* Now check based on the expiration time */ + if ( loginfo.log_auditfail_exptime_secs > 0 ) { + /* is the file old enough */ + time (&cur_time); + prev_logp = logp = loginfo.log_auditfail_logchain; + while (logp) { + if ((cur_time - logp->l_ctime) > loginfo.log_auditfail_exptime_secs) { + delete_logp = logp; + p_delete_logp = prev_logp; + logstr = "The file is older than the log expiration time"; + goto delete_logfile; + } + prev_logp = logp; + logp = logp->l_next; + } + } + + /* No log files to delete */ + return 0; + +delete_logfile: + if (delete_logp == NULL) { + time_t oldest; + + time(&oldest); + + prev_logp = logp = loginfo.log_auditfail_logchain; + while (logp) { + if (logp->l_ctime <= oldest) { + oldest = logp->l_ctime; + delete_logp = logp; + p_delete_logp = prev_logp; + } + prev_logp = logp; + logp = logp->l_next; + } + /* We might face this case if we have only one log file and + ** trying to delete it because of deletion requirement. + */ + if (!delete_logp) { + return 0; + } + } + + if (p_delete_logp == delete_logp) { + /* then we are deleteing the first one */ + loginfo.log_auditfail_logchain = delete_logp->l_next; + } else { + p_delete_logp->l_next = delete_logp->l_next; + } + + /* Delete the audit file */ + log_convert_time (delete_logp->l_ctime, tbuf, 1 /*short */); + PR_snprintf(buffer, sizeof(buffer), "%s.%s", loginfo.log_auditfail_file, tbuf ); + if (PR_Delete(buffer) != PR_SUCCESS) { + PRErrorCode prerr = PR_GetError(); + if (PR_FILE_NOT_FOUND_ERROR == prerr) { + slapi_log_error(SLAPI_LOG_TRACE, "LOGINFO", "File %s already removed\n", loginfo.log_auditfail_file); + } else { + slapi_log_error(SLAPI_LOG_TRACE, "LOGINFO", "Unable to remove file:%s.%s error %d (%s)\n", + loginfo.log_auditfail_file, tbuf, prerr, slapd_pr_strerror(prerr)); + } + } else { + slapi_log_error(SLAPI_LOG_TRACE, "LOGINFO", "Removed file:%s.%s because of (%s)\n", loginfo.log_auditfail_file, tbuf, logstr); + } + slapi_ch_free((void**)&delete_logp); + loginfo.log_numof_auditfail_logs--; + + return 1; +} + /****************************************************************************** * log__error_rotationinfof * @@ -3571,6 +4047,101 @@ log__audit_rotationinfof( char *pathname) return logfile_type; } +/****************************************************************************** +* log__auditfail_rotationinfof +* +* Try to open the log file. If we have one already, then try to read the +* header and update the information. +* +* Assumption: Lock has been acquired already +******************************************************************************/ +static int +log__auditfail_rotationinfof( char *pathname) +{ + long f_ctime; + PRInt64 f_size; + int main_log = 1; + time_t now; + FILE *fp; + int rval, logfile_type = LOGFILE_REOPENED; + + /* + ** Okay -- I confess, we want to use NSPR calls but I want to + ** use fgets and not use PR_Read() and implement a complicated + ** parsing module. Since this will be called only during the startup + ** and never aftre that, we can live by it. + */ + + if ((fp = fopen (pathname, "r")) == NULL) { + return LOGFILE_NEW; + } + + loginfo.log_numof_auditfail_logs = 0; + + /* + ** We have reopened the log audit file. Now we need to read the + ** log file info and update the values. + */ + while ((rval = log__extract_logheader(fp, &f_ctime, &f_size)) == LOG_CONTINUE) { + /* first we would get the main log info */ + if (f_ctime == 0 && f_size == 0) { + continue; + } + time (&now); + if (main_log) { + if (f_ctime > 0L) { + loginfo.log_auditfail_ctime = f_ctime; + } + else { + loginfo.log_auditfail_ctime = now; + } + main_log = 0; + } else { + struct logfileinfo *logp; + + logp = (struct logfileinfo *) slapi_ch_malloc (sizeof (struct logfileinfo)); + if (f_ctime > 0L) { + logp->l_ctime = f_ctime; + } + else { + logp->l_ctime = now; + } + if (f_size > 0) { + logp->l_size = f_size; + } + else { + /* make it the max log size */ + logp->l_size = loginfo.log_auditfail_maxlogsize; + } + + logp->l_next = loginfo.log_auditfail_logchain; + loginfo.log_auditfail_logchain = logp; + } + loginfo.log_numof_auditfail_logs++; + } + if (LOG_DONE == rval) { + rval = log__check_prevlogs(fp, pathname); + } + fclose (fp); + + if (LOG_ERROR == rval) { + if (LOG_SUCCESS == log__fix_rotationinfof(pathname)) { + logfile_type = LOGFILE_NEW; + } + } + + /* Check if there is a rotation overdue */ + if (loginfo.log_auditfail_rotationsync_enabled && + loginfo.log_auditfail_rotationunit != LOG_UNIT_HOURS && + loginfo.log_auditfail_rotationunit != LOG_UNIT_MINS && + loginfo.log_auditfail_ctime < loginfo.log_auditfail_rotationsyncclock - PR_ABS(loginfo.log_auditfail_rotationtime_secs)) + { + loginfo.log_auditfail_rotationsyncclock -= PR_ABS(loginfo.log_auditfail_rotationtime_secs); + } + + return logfile_type; +} + static void log__error_emergency(const char *errstr, int reopen, int locked) { @@ -3889,6 +4460,132 @@ log__open_auditlogfile(int logfile_state, int locked) if (!locked) LOG_AUDIT_UNLOCK_WRITE( ); return LOG_SUCCESS; } +/****************************************************************************** +* log__open_auditfaillogfile +* +* Open a new log file. If we have run out of the max logs we can have +* then delete the oldest file. +******************************************************************************/ +static int +log__open_auditfaillogfile(int logfile_state, int locked) +{ + + time_t now; + LOGFD fp; + LOGFD fpinfo = NULL; + char tbuf[TBUFSIZE]; + struct logfileinfo *logp; + char buffer[BUFSIZ]; + + if (!locked) LOG_AUDITFAIL_LOCK_WRITE( ); + + /* + ** Here we are trying to create a new log file. + ** If we alredy have one, then we need to rename it as + ** "filename.time", close it and update it's information + ** in the array stack. + */ + if (loginfo.log_auditfail_fdes != NULL) { + struct logfileinfo *log; + char newfile[BUFSIZ]; + PRInt64 f_size; + + + /* get rid of the old one */ + if ((f_size = log__getfilesize(loginfo.log_auditfail_fdes)) == -1) { + /* Then assume that we have the max size */ + f_size = loginfo.log_auditfail_maxlogsize; + } + + /* Check if I have to delete any old file, delete it if it is required. */ + while (log__delete_auditfail_logfile()); + + /* close the file */ + LOG_CLOSE(loginfo.log_auditfail_fdes); + loginfo.log_auditfail_fdes = NULL; + + if ( loginfo.log_auditfail_maxnumlogs > 1 ) { + log = (struct logfileinfo *) slapi_ch_malloc (sizeof (struct logfileinfo)); + log->l_ctime = loginfo.log_auditfail_ctime; + log->l_size = f_size; + + log_convert_time (log->l_ctime, tbuf, 1 /*short */); + PR_snprintf(newfile, sizeof(newfile), "%s.%s", loginfo.log_auditfail_file, tbuf); + if (PR_Rename (loginfo.log_auditfail_file, newfile) != PR_SUCCESS) { + PRErrorCode prerr = PR_GetError(); + /* Make "FILE EXISTS" error an exception. + Even if PR_Rename fails with the error, we continue logging. + */ + if (PR_FILE_EXISTS_ERROR != prerr) { + if (!locked) LOG_AUDITFAIL_UNLOCK_WRITE(); + slapi_ch_free((void**)&log); + return LOG_UNABLE_TO_OPENFILE; + } + } + + /* add the log to the chain */ + log->l_next = loginfo.log_auditfail_logchain; + loginfo.log_auditfail_logchain = log; + loginfo.log_numof_auditfail_logs++; + } + } + + /* open a new log file */ + if (! LOG_OPEN_APPEND(fp, loginfo.log_auditfail_file, loginfo.log_auditfail_mode)) { + LDAPDebug(LDAP_DEBUG_ANY, "WARNING: can't open file %s. " + "errno %d (%s)\n", + loginfo.log_auditfail_file, errno, slapd_system_strerror(errno)); + if (!locked) LOG_AUDITFAIL_UNLOCK_WRITE(); + /*if I have an old log file -- I should log a message + ** that I can't open the new file. Let the caller worry + ** about logging message. + */ + return LOG_UNABLE_TO_OPENFILE; + } + + loginfo.log_auditfail_fdes = fp; + if (logfile_state == LOGFILE_REOPENED) { + /* we have all the information */ + if (!locked) LOG_AUDITFAIL_UNLOCK_WRITE(); + return LOG_SUCCESS; + } + + loginfo.log_auditfail_state |= LOGGING_NEED_TITLE; + + if (! LOG_OPEN_WRITE(fpinfo, loginfo.log_auditfailinfo_file, loginfo.log_auditfail_mode)) { + LDAPDebug(LDAP_DEBUG_ANY, "WARNING: can't open file %s. " + "errno %d (%s)\n", + loginfo.log_auditfailinfo_file, errno, slapd_system_strerror(errno)); + if (!locked) LOG_AUDITFAIL_UNLOCK_WRITE(); + return LOG_UNABLE_TO_OPENFILE; + } + + /* write the header in the log */ + now = current_time(); + log_convert_time (now, tbuf, 2 /*long */); + PR_snprintf(buffer, sizeof(buffer), "LOGINFO:Log file created at: %s (%lu)\n", tbuf, now); + LOG_WRITE(fpinfo, buffer, strlen(buffer), 0); + + logp = loginfo.log_auditfail_logchain; + while ( logp) { + log_convert_time (logp->l_ctime, tbuf, 1 /*short */); + PR_snprintf(buffer, sizeof(buffer), "LOGINFO:%s%s.%s (%lu) (%" + NSPRI64 "d)\n", PREVLOGFILE, loginfo.log_auditfail_file, tbuf, + logp->l_ctime, logp->l_size); + LOG_WRITE(fpinfo, buffer, strlen(buffer), 0); + logp = logp->l_next; + } + /* Close the info file. We need only when we need to rotate to the + ** next log file. + */ + if (fpinfo) LOG_CLOSE(fpinfo); + + /* This is now the current audit log */ + loginfo.log_auditfail_ctime = now; + + if (!locked) LOG_AUDITFAIL_UNLOCK_WRITE( ); + return LOG_SUCCESS; +} /* ** Log Buffering diff --git a/ldap/servers/slapd/log.h b/ldap/servers/slapd/log.h index 40df6d7ea..6c5f4f1de 100644 --- a/ldap/servers/slapd/log.h +++ b/ldap/servers/slapd/log.h @@ -176,6 +176,32 @@ struct logging_opts { char *log_auditinfo_file; /* audit log rotation info file */ Slapi_RWLock *log_audit_rwlock; /* lock on audit*/ + /* These are auditfail log specific */ + int log_auditfail_state; + int log_auditfail_mode; /* access mode */ + int log_auditfail_maxnumlogs; /* Number of logs */ + PRInt64 log_auditfail_maxlogsize; /* max log size in bytes*/ + int log_auditfail_rotationtime; /* time in units. */ + int log_auditfail_rotationunit; /* time in units. */ + int log_auditfail_rotationtime_secs; /* time in seconds */ + int log_auditfail_rotationsync_enabled;/* 0 or 1*/ + int log_auditfail_rotationsynchour; /* 0-23 */ + int log_auditfail_rotationsyncmin; /* 0-59 */ + time_t log_auditfail_rotationsyncclock; /* clock in seconds */ + PRInt64 log_auditfail_maxdiskspace; /* space in bytes */ + PRInt64 log_auditfail_minfreespace; /* free space in bytes */ + int log_auditfail_exptime; /* time */ + int log_auditfail_exptimeunit; /* unit time */ + int log_auditfail_exptime_secs; /* time in secs */ + + char *log_auditfail_file; /* auditfail log name */ + LOGFD log_auditfail_fdes; /* auditfail log fdes */ + unsigned int log_numof_auditfail_logs; /* number of logs */ + time_t log_auditfail_ctime; /* log creation time */ + LogFileInfo *log_auditfail_logchain; /* all the logs info */ + char *log_auditfailinfo_file; /* auditfail log rotation info file */ + Slapi_RWLock *log_auditfail_rwlock; /* lock on auditfail */ + }; /* For log_state */ @@ -197,3 +223,8 @@ struct logging_opts { #define LOG_AUDIT_LOCK_WRITE() slapi_rwlock_wrlock(loginfo.log_audit_rwlock) #define LOG_AUDIT_UNLOCK_WRITE() slapi_rwlock_unlock(loginfo.log_audit_rwlock) +#define LOG_AUDITFAIL_LOCK_READ() slapi_rwlock_rdlock(loginfo.log_auditfail_rwlock) +#define LOG_AUDITFAIL_UNLOCK_READ() slapi_rwlock_unlock(loginfo.log_auditfail_rwlock) +#define LOG_AUDITFAIL_LOCK_WRITE() slapi_rwlock_wrlock(loginfo.log_auditfail_rwlock) +#define LOG_AUDITFAIL_UNLOCK_WRITE() slapi_rwlock_unlock(loginfo.log_auditfail_rwlock) + diff --git a/ldap/servers/slapd/modify.c b/ldap/servers/slapd/modify.c index 9225d3125..4c0beccdc 100644 --- a/ldap/servers/slapd/modify.c +++ b/ldap/servers/slapd/modify.c @@ -1079,6 +1079,11 @@ static void op_shared_modify (Slapi_PBlock *pb, int pw_change, char *old_pw) operation_out_of_disk_space(); goto free_and_return; } + /* If the disk is full we don't want to make it worse ... */ + if (operation_is_flag_set(operation,OP_FLAG_ACTION_LOG_AUDIT)) + { + write_auditfail_log_entry(pb); /* Record the operation in the audit log */ + } } } else diff --git a/ldap/servers/slapd/modrdn.c b/ldap/servers/slapd/modrdn.c index e80e1f99d..d0ef1b1ff 100644 --- a/ldap/servers/slapd/modrdn.c +++ b/ldap/servers/slapd/modrdn.c @@ -641,6 +641,12 @@ op_shared_rename(Slapi_PBlock *pb, int passin_args) slapi_pblock_get(pb, SLAPI_ENTRY_PRE_OP, &ecopy); /* GGOODREPL persistent search system needs the changenumber, oops. */ do_ps_service(pse, ecopy, LDAP_CHANGETYPE_MODDN, 0); + } else { + /* Should we also be doing a disk space check here? */ + if (operation_is_flag_set(operation,OP_FLAG_ACTION_LOG_AUDIT)) + { + write_auditfail_log_entry(pb); /* Record the operation in the audit log */ + } } } else diff --git a/ldap/servers/slapd/proto-slap.h b/ldap/servers/slapd/proto-slap.h index ff9dd0946..22e800714 100644 --- a/ldap/servers/slapd/proto-slap.h +++ b/ldap/servers/slapd/proto-slap.h @@ -293,6 +293,7 @@ int config_set_timelimit(const char *attrname, char *value, char *errorbuf, int int config_set_errorlog_level(const char *attrname, char *value, char *errorbuf, int apply ); int config_set_accesslog_level(const char *attrname, char *value, char *errorbuf, int apply ); int config_set_auditlog(const char *attrname, char *value, char *errorbuf, int apply ); +int config_set_auditfaillog(const char *attrname, char *value, char *errorbuf, int apply ); int config_set_userat(const char *attrname, char *value, char *errorbuf, int apply ); int config_set_accesslog(const char *attrname, char *value, char *errorbuf, int apply ); int config_set_errorlog(const char *attrname, char *value, char *errorbuf, int apply ); @@ -372,6 +373,7 @@ int config_set_disk_threshold( const char *attrname, char *value, char *errorbuf int config_set_disk_grace_period( const char *attrname, char *value, char *errorbuf, int apply ); int config_set_disk_logging_critical( const char *attrname, char *value, char *errorbuf, int apply ); int config_set_auditlog_unhashed_pw(const char *attrname, char *value, char *errorbuf, int apply); +int config_set_auditfaillog_unhashed_pw(const char *attrname, char *value, char *errorbuf, int apply); int config_set_ndn_cache_enabled(const char *attrname, char *value, char *errorbuf, int apply); int config_set_ndn_cache_max_size(const char *attrname, char *value, char *errorbuf, int apply); int config_set_unhashed_pw_switch(const char *attrname, char *value, char *errorbuf, int apply); @@ -476,12 +478,14 @@ char* config_get_useroc(); char *config_get_accesslog(); char *config_get_errorlog(); char *config_get_auditlog(); +char *config_get_auditfaillog(); long config_get_pw_maxage(); long config_get_pw_minage(); long config_get_pw_warning(); int config_get_errorlog_level(); int config_get_accesslog_level(); int config_get_auditlog_logging_enabled(); +int config_get_auditfaillog_logging_enabled(); char *config_get_referral_mode(void); int config_get_conntablesize(void); int config_check_referral_mode(void); @@ -503,6 +507,7 @@ char *config_get_saslpath(); char **config_get_errorlog_list(); char **config_get_accesslog_list(); char **config_get_auditlog_list(); +char **config_get_auditfaillog_list(); int config_get_attrname_exceptions(); int config_get_hash_filters(); int config_get_rewrite_rfc1274(); @@ -529,6 +534,7 @@ char *config_get_default_naming_context(void); int config_allowed_to_delete_attrs(const char *attr_type); void config_set_accesslog_enabled(int value); void config_set_auditlog_enabled(int value); +void config_set_auditfaillog_enabled(int value); int config_get_accesslog_logging_enabled(); int config_get_disk_monitoring(); PRInt64 config_get_disk_threshold(); @@ -743,18 +749,21 @@ int slapi_log_access( int level, char *fmt, ... ) ; #endif int slapd_log_audit_proc(char *buffer, int buf_len); +int slapd_log_auditfail_proc(char *buffer, int buf_len); void log_access_flush(); int access_log_openf( char *pathname, int locked); int error_log_openf( char *pathname, int locked); int audit_log_openf( char *pathname, int locked); +int auditfail_log_openf( char *pathname, int locked); void g_set_detached(int); void g_log_init(int log_enabled); char *g_get_access_log(); char *g_get_error_log(); char *g_get_audit_log(); +char *g_get_auditfail_log(); void g_set_accesslog_level(int val); int log_set_mode(const char *attrname, char *mode_str, int logtype, char *errorbuf, int apply); @@ -773,6 +782,7 @@ char **log_get_loglist(int logtype); int log_update_accesslogdir(char *pathname, int apply); int log_update_errorlogdir(char *pathname, int apply); int log_update_auditlogdir(char *pathname, int apply); +int log_update_auditfaillogdir(char *pathname, int apply); int log_set_logging (const char *attrname, char *value, int logtype, char *errorbuf, int apply); int check_log_max_size( char *maxdiskspace_str, @@ -1245,6 +1255,10 @@ void write_audit_log_entry( Slapi_PBlock *pb); void auditlog_hide_unhashed_pw(); void auditlog_expose_unhashed_pw(); +void write_auditfail_log_entry( Slapi_PBlock *pb); +void auditfaillog_hide_unhashed_pw(); +void auditfaillog_expose_unhashed_pw(); + /* * eventq.c */ diff --git a/ldap/servers/slapd/slap.h b/ldap/servers/slapd/slap.h index f78bc4622..3cc99cf4d 100644 --- a/ldap/servers/slapd/slap.h +++ b/ldap/servers/slapd/slap.h @@ -1883,6 +1883,7 @@ typedef struct _slapdEntryPoints { #define SLAPD_ACCESS_LOG 0x1 #define SLAPD_ERROR_LOG 0x2 #define SLAPD_AUDIT_LOG 0x4 +#define SLAPD_AUDITFAIL_LOG 0x8 #define CONFIG_DATABASE_ATTRIBUTE "nsslapd-database" #define CONFIG_PLUGIN_ATTRIBUTE "nsslapd-plugin" @@ -1907,48 +1908,63 @@ typedef struct _slapdEntryPoints { #define CONFIG_ACCESSLOG_MODE_ATTRIBUTE "nsslapd-accesslog-mode" #define CONFIG_ERRORLOG_MODE_ATTRIBUTE "nsslapd-errorlog-mode" #define CONFIG_AUDITLOG_MODE_ATTRIBUTE "nsslapd-auditlog-mode" +#define CONFIG_AUDITFAILLOG_MODE_ATTRIBUTE "nsslapd-auditfaillog-mode" #define CONFIG_ACCESSLOG_MAXNUMOFLOGSPERDIR_ATTRIBUTE "nsslapd-accesslog-maxlogsperdir" #define CONFIG_ERRORLOG_MAXNUMOFLOGSPERDIR_ATTRIBUTE "nsslapd-errorlog-maxlogsperdir" #define CONFIG_AUDITLOG_MAXNUMOFLOGSPERDIR_ATTRIBUTE "nsslapd-auditlog-maxlogsperdir" +#define CONFIG_AUDITFAILLOG_MAXNUMOFLOGSPERDIR_ATTRIBUTE "nsslapd-auditfaillog-maxlogsperdir" #define CONFIG_ACCESSLOG_MAXLOGSIZE_ATTRIBUTE "nsslapd-accesslog-maxlogsize" #define CONFIG_ERRORLOG_MAXLOGSIZE_ATTRIBUTE "nsslapd-errorlog-maxlogsize" #define CONFIG_AUDITLOG_MAXLOGSIZE_ATTRIBUTE "nsslapd-auditlog-maxlogsize" +#define CONFIG_AUDITFAILLOG_MAXLOGSIZE_ATTRIBUTE "nsslapd-auditfaillog-maxlogsize" #define CONFIG_ACCESSLOG_LOGROTATIONSYNCENABLED_ATTRIBUTE "nsslapd-accesslog-logrotationsync-enabled" #define CONFIG_ERRORLOG_LOGROTATIONSYNCENABLED_ATTRIBUTE "nsslapd-errorlog-logrotationsync-enabled" #define CONFIG_AUDITLOG_LOGROTATIONSYNCENABLED_ATTRIBUTE "nsslapd-auditlog-logrotationsync-enabled" +#define CONFIG_AUDITFAILLOG_LOGROTATIONSYNCENABLED_ATTRIBUTE "nsslapd-auditfaillog-logrotationsync-enabled" #define CONFIG_ACCESSLOG_LOGROTATIONSYNCHOUR_ATTRIBUTE "nsslapd-accesslog-logrotationsynchour" #define CONFIG_ERRORLOG_LOGROTATIONSYNCHOUR_ATTRIBUTE "nsslapd-errorlog-logrotationsynchour" #define CONFIG_AUDITLOG_LOGROTATIONSYNCHOUR_ATTRIBUTE "nsslapd-auditlog-logrotationsynchour" +#define CONFIG_AUDITFAILLOG_LOGROTATIONSYNCHOUR_ATTRIBUTE "nsslapd-auditfaillog-logrotationsynchour" #define CONFIG_ACCESSLOG_LOGROTATIONSYNCMIN_ATTRIBUTE "nsslapd-accesslog-logrotationsyncmin" #define CONFIG_ERRORLOG_LOGROTATIONSYNCMIN_ATTRIBUTE "nsslapd-errorlog-logrotationsyncmin" #define CONFIG_AUDITLOG_LOGROTATIONSYNCMIN_ATTRIBUTE "nsslapd-auditlog-logrotationsyncmin" +#define CONFIG_AUDITFAILLOG_LOGROTATIONSYNCMIN_ATTRIBUTE "nsslapd-auditfaillog-logrotationsyncmin" #define CONFIG_ACCESSLOG_LOGROTATIONTIME_ATTRIBUTE "nsslapd-accesslog-logrotationtime" #define CONFIG_ERRORLOG_LOGROTATIONTIME_ATTRIBUTE "nsslapd-errorlog-logrotationtime" #define CONFIG_AUDITLOG_LOGROTATIONTIME_ATTRIBUTE "nsslapd-auditlog-logrotationtime" +#define CONFIG_AUDITFAILLOG_LOGROTATIONTIME_ATTRIBUTE "nsslapd-auditfaillog-logrotationtime" #define CONFIG_ACCESSLOG_LOGROTATIONTIMEUNIT_ATTRIBUTE "nsslapd-accesslog-logrotationtimeunit" #define CONFIG_ERRORLOG_LOGROTATIONTIMEUNIT_ATTRIBUTE "nsslapd-errorlog-logrotationtimeunit" #define CONFIG_AUDITLOG_LOGROTATIONTIMEUNIT_ATTRIBUTE "nsslapd-auditlog-logrotationtimeunit" +#define CONFIG_AUDITFAILLOG_LOGROTATIONTIMEUNIT_ATTRIBUTE "nsslapd-auditfaillog-logrotationtimeunit" #define CONFIG_ACCESSLOG_MAXLOGDISKSPACE_ATTRIBUTE "nsslapd-accesslog-logmaxdiskspace" #define CONFIG_ERRORLOG_MAXLOGDISKSPACE_ATTRIBUTE "nsslapd-errorlog-logmaxdiskspace" #define CONFIG_AUDITLOG_MAXLOGDISKSPACE_ATTRIBUTE "nsslapd-auditlog-logmaxdiskspace" +#define CONFIG_AUDITFAILLOG_MAXLOGDISKSPACE_ATTRIBUTE "nsslapd-auditfaillog-logmaxdiskspace" #define CONFIG_ACCESSLOG_MINFREEDISKSPACE_ATTRIBUTE "nsslapd-accesslog-logminfreediskspace" #define CONFIG_ERRORLOG_MINFREEDISKSPACE_ATTRIBUTE "nsslapd-errorlog-logminfreediskspace" #define CONFIG_AUDITLOG_MINFREEDISKSPACE_ATTRIBUTE "nsslapd-auditlog-logminfreediskspace" +#define CONFIG_AUDITFAILLOG_MINFREEDISKSPACE_ATTRIBUTE "nsslapd-auditfaillog-logminfreediskspace" #define CONFIG_ACCESSLOG_LOGEXPIRATIONTIME_ATTRIBUTE "nsslapd-accesslog-logexpirationtime" #define CONFIG_ERRORLOG_LOGEXPIRATIONTIME_ATTRIBUTE "nsslapd-errorlog-logexpirationtime" #define CONFIG_AUDITLOG_LOGEXPIRATIONTIME_ATTRIBUTE "nsslapd-auditlog-logexpirationtime" +#define CONFIG_AUDITFAILLOG_LOGEXPIRATIONTIME_ATTRIBUTE "nsslapd-auditfaillog-logexpirationtime" #define CONFIG_ACCESSLOG_LOGEXPIRATIONTIMEUNIT_ATTRIBUTE "nsslapd-accesslog-logexpirationtimeunit" #define CONFIG_ERRORLOG_LOGEXPIRATIONTIMEUNIT_ATTRIBUTE "nsslapd-errorlog-logexpirationtimeunit" #define CONFIG_AUDITLOG_LOGEXPIRATIONTIMEUNIT_ATTRIBUTE "nsslapd-auditlog-logexpirationtimeunit" +#define CONFIG_AUDITFAILLOG_LOGEXPIRATIONTIMEUNIT_ATTRIBUTE "nsslapd-auditfaillog-logexpirationtimeunit" #define CONFIG_ACCESSLOG_LOGGING_ENABLED_ATTRIBUTE "nsslapd-accesslog-logging-enabled" #define CONFIG_ERRORLOG_LOGGING_ENABLED_ATTRIBUTE "nsslapd-errorlog-logging-enabled" #define CONFIG_AUDITLOG_LOGGING_ENABLED_ATTRIBUTE "nsslapd-auditlog-logging-enabled" +#define CONFIG_AUDITFAILLOG_LOGGING_ENABLED_ATTRIBUTE "nsslapd-auditfaillog-logging-enabled" #define CONFIG_AUDITLOG_LOGGING_HIDE_UNHASHED_PW "nsslapd-auditlog-logging-hide-unhashed-pw" +#define CONFIG_AUDITFAILLOG_LOGGING_HIDE_UNHASHED_PW "nsslapd-auditfaillog-logging-hide-unhashed-pw" #define CONFIG_UNHASHED_PW_SWITCH_ATTRIBUTE "nsslapd-unhashed-pw-switch" #define CONFIG_ROOTDN_ATTRIBUTE "nsslapd-rootdn" #define CONFIG_ROOTPW_ATTRIBUTE "nsslapd-rootpw" #define CONFIG_ROOTPWSTORAGESCHEME_ATTRIBUTE "nsslapd-rootpwstoragescheme" #define CONFIG_AUDITFILE_ATTRIBUTE "nsslapd-auditlog" +#define CONFIG_AUDITFAILFILE_ATTRIBUTE "nsslapd-auditfaillog" #define CONFIG_LASTMOD_ATTRIBUTE "nsslapd-lastmod" #define CONFIG_INCLUDE_ATTRIBUTE "nsslapd-include" #define CONFIG_DYNAMICCONF_ATTRIBUTE "nsslapd-dynamicconf" @@ -2042,6 +2058,7 @@ typedef struct _slapdEntryPoints { #define CONFIG_ACCESSLOG_LIST_ATTRIBUTE "nsslapd-accesslog-list" #define CONFIG_ERRORLOG_LIST_ATTRIBUTE "nsslapd-errorlog-list" #define CONFIG_AUDITLOG_LIST_ATTRIBUTE "nsslapd-auditlog-list" +#define CONFIG_AUDITFAILLOG_LIST_ATTRIBUTE "nsslapd-auditfaillog-list" #define CONFIG_REWRITE_RFC1274_ATTRIBUTE "nsslapd-rewrite-rfc1274" #define CONFIG_PLUGIN_BINDDN_TRACKING_ATTRIBUTE "nsslapd-plugin-binddn-tracking" #define CONFIG_MODDN_ACI_ATTRIBUTE "nsslapd-moddn-aci" @@ -2265,6 +2282,24 @@ typedef struct _slapdFrontendConfig { char *auditlog_exptimeunit; slapi_onoff_t auditlog_logging_hide_unhashed_pw; + /* AUDIT FAIL LOG */ + char *auditfaillog; + int auditfailloglevel; + slapi_onoff_t auditfaillog_logging_enabled; + char *auditfaillog_mode; + int auditfaillog_maxnumlogs; + int auditfaillog_maxlogsize; + slapi_onoff_t auditfaillog_rotationsync_enabled; + int auditfaillog_rotationsynchour; + int auditfaillog_rotationsyncmin; + int auditfaillog_rotationtime; + char *auditfaillog_rotationunit; + int auditfaillog_maxdiskspace; + int auditfaillog_minfreespace; + int auditfaillog_exptime; + char *auditfaillog_exptimeunit; + slapi_onoff_t auditfaillog_logging_hide_unhashed_pw; + slapi_onoff_t return_exact_case; /* Return attribute names with the same case as they appear in at.conf */
0
07477c6e944b9921436d5392c5b3a82dd71635bb
389ds/389-ds-base
Bump openssl from 0.10.52 to 0.10.55 in /src Bumps [openssl](https://github.com/sfackler/rust-openssl) from 0.10.52 to 0.10.55. - [Release notes](https://github.com/sfackler/rust-openssl/releases) - [Commits](https://github.com/sfackler/rust-openssl/compare/openssl-v0.10.52...openssl-v0.10.55) --- updated-dependencies: - dependency-name: openssl dependency-type: direct:production ... Signed-off-by: dependabot[bot] <[email protected]>
commit 07477c6e944b9921436d5392c5b3a82dd71635bb Author: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed Jun 21 22:56:54 2023 +0000 Bump openssl from 0.10.52 to 0.10.55 in /src Bumps [openssl](https://github.com/sfackler/rust-openssl) from 0.10.52 to 0.10.55. - [Release notes](https://github.com/sfackler/rust-openssl/releases) - [Commits](https://github.com/sfackler/rust-openssl/compare/openssl-v0.10.52...openssl-v0.10.55) --- updated-dependencies: - dependency-name: openssl dependency-type: direct:production ... Signed-off-by: dependabot[bot] <[email protected]> diff --git a/src/Cargo.lock b/src/Cargo.lock index b1dd53ea7..3112bca74 100644 --- a/src/Cargo.lock +++ b/src/Cargo.lock @@ -414,9 +414,9 @@ checksum = "b7e5500299e16ebb147ae15a00a942af264cf3688f47923b8fc2cd5858f23ad3" [[package]] name = "openssl" -version = "0.10.52" +version = "0.10.55" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01b8574602df80f7b85fdfc5392fa884a4e3b3f4f35402c070ab34c3d3f78d56" +checksum = "345df152bc43501c5eb9e4654ff05f794effb78d4efe3d53abc158baddc0703d" dependencies = [ "bitflags", "cfg-if", @@ -440,9 +440,9 @@ dependencies = [ [[package]] name = "openssl-sys" -version = "0.9.87" +version = "0.9.90" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e17f59264b2809d77ae94f0e1ebabc434773f370d6ca667bd223ea10e06cc7e" +checksum = "374533b0e45f3a7ced10fcaeccca020e66656bc03dac384f852e4e5a7a8104a6" dependencies = [ "cc", "libc",
0
98cf4246b6e5c8a99a1e9063eca9aad9560185bd
389ds/389-ds-base
Ticket #47750 - Creating a glue fails if one above level is a conflict or missing Description: This commit accidentally removed the code to add entry back to cache if it was replaced: Commit: 160cb3f686e433c01532d28770b2977ec957e73e Ticket #47750 - Creating a glue fails if one above level is a conflict or missing; Note: This is the cause of Ticket #47830 - usn tombstone entry not properly created
commit 98cf4246b6e5c8a99a1e9063eca9aad9560185bd Author: Ludwig Krispenz <[email protected]> Date: Mon Jun 30 14:15:06 2014 +0200 Ticket #47750 - Creating a glue fails if one above level is a conflict or missing Description: This commit accidentally removed the code to add entry back to cache if it was replaced: Commit: 160cb3f686e433c01532d28770b2977ec957e73e Ticket #47750 - Creating a glue fails if one above level is a conflict or missing; Note: This is the cause of Ticket #47830 - usn tombstone entry not properly created diff --git a/ldap/servers/slapd/back-ldbm/ldbm_delete.c b/ldap/servers/slapd/back-ldbm/ldbm_delete.c index a365ce5ed..3c29492f4 100644 --- a/ldap/servers/slapd/back-ldbm/ldbm_delete.c +++ b/ldap/servers/slapd/back-ldbm/ldbm_delete.c @@ -232,6 +232,11 @@ ldbm_back_delete( Slapi_PBlock *pb ) goto error_return; } } + /* reset original entry in cache */ + if (!e_in_cache) { + CACHE_ADD(&inst->inst_cache, e, NULL); + e_in_cache = 1; + } if (ruv_c_init) { /* reset the ruv txn stuff */ modify_term(&ruv_c, be); @@ -733,6 +738,8 @@ ldbm_back_delete( Slapi_PBlock *pb ) retval= -1; DEL_SET_ERROR(ldap_result_code, LDAP_OPERATIONS_ERROR, retry_count); goto error_return; + } else { + e_in_cache = 0; } } else { struct backentry *imposter = NULL;
0
5a6a5a18d0458bd147af57a06158245f329ddba3
389ds/389-ds-base
Ticket #49082 - Adjusted the CI test case to the fix. Description: Fix password expiration related shadow attributes
commit 5a6a5a18d0458bd147af57a06158245f329ddba3 Author: Noriko Hosoi <[email protected]> Date: Wed Jan 11 15:14:07 2017 -0800 Ticket #49082 - Adjusted the CI test case to the fix. Description: Fix password expiration related shadow attributes diff --git a/dirsrvtests/tests/tickets/ticket548_test.py b/dirsrvtests/tests/tickets/ticket548_test.py index 8c55254e0..40b804819 100644 --- a/dirsrvtests/tests/tickets/ticket548_test.py +++ b/dirsrvtests/tests/tickets/ticket548_test.py @@ -54,6 +54,13 @@ def set_global_pwpolicy(topology_st, min_=1, max_=10, warn=3): log.error('Failed to set passwordMinAge: error ' + e.message['desc']) assert False + log.info(" Set global password Expiration -- on\n") + try: + topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'passwordExp', 'on')]) + except ldap.LDAPError as e: + log.error('Failed to set passwordExp: error ' + e.message['desc']) + assert False + log.info(" Set global password Max Age -- %s days\n" % max_) try: topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'passwordMaxAge', '%s' % max_secs)])
0
7bbce966e820bdb160aff749387593b69679dbdc
389ds/389-ds-base
Ticket #12 - 389 DS DNA Plugin / Replication failing on GSSAPI https://fedorahosted.org/389/ticket/12 Resolves: Ticket #12 Bug Description: 389 DS DNA Plugin / Replication failing on GSSAPI Reviewed by: nhosoi (Thanks!) Branch: master Fix Description: The problem is due to timeout. The default DNA range request timeout is 10ms, which is far too short in WAN environments. The fix is two fold 1) make the default DNA range request timeout 10 minutes, the same as the default replication timeout 2) openldap uses errno to report the timeout, so be sure to print the errno and message when we get connection/bind failures. Platforms tested: RHEL6 x86_64 Flag Day: no Doc impact: no
commit 7bbce966e820bdb160aff749387593b69679dbdc Author: Rich Megginson <[email protected]> Date: Wed Jan 18 11:01:35 2012 -0700 Ticket #12 - 389 DS DNA Plugin / Replication failing on GSSAPI https://fedorahosted.org/389/ticket/12 Resolves: Ticket #12 Bug Description: 389 DS DNA Plugin / Replication failing on GSSAPI Reviewed by: nhosoi (Thanks!) Branch: master Fix Description: The problem is due to timeout. The default DNA range request timeout is 10ms, which is far too short in WAN environments. The fix is two fold 1) make the default DNA range request timeout 10 minutes, the same as the default replication timeout 2) openldap uses errno to report the timeout, so be sure to print the errno and message when we get connection/bind failures. Platforms tested: RHEL6 x86_64 Flag Day: no Doc impact: no diff --git a/ldap/servers/plugins/dna/dna.c b/ldap/servers/plugins/dna/dna.c index 32b6d1132..2c7876f52 100644 --- a/ldap/servers/plugins/dna/dna.c +++ b/ldap/servers/plugins/dna/dna.c @@ -74,7 +74,8 @@ #define DNA_FAILURE -1 /* Default range request timeout */ -#define DNA_DEFAULT_TIMEOUT 10 +/* use the default replication timeout */ +#define DNA_DEFAULT_TIMEOUT 600 * 1000 /* 600 seconds in milliseconds */ /** * DNA config types diff --git a/ldap/servers/slapd/ldaputil.c b/ldap/servers/slapd/ldaputil.c index c4cec1217..790143206 100644 --- a/ldap/servers/slapd/ldaputil.c +++ b/ldap/servers/slapd/ldaputil.c @@ -987,8 +987,8 @@ slapi_ldap_bind( if (LDAP_SUCCESS != rc) { slapi_log_error(SLAPI_LOG_FATAL, "slapi_ldap_bind", "Error: could not send startTLS request: " - "error %d (%s)\n", - rc, ldap_err2string(rc)); + "error %d (%s) errno %d (%s)\n", + rc, ldap_err2string(rc), errno, slapd_system_strerror(errno)); goto done; } slapi_log_error(SLAPI_LOG_SHELL, "slapi_ldap_bind", @@ -1026,10 +1026,10 @@ slapi_ldap_bind( rc = slapi_ldap_get_lderrno(ld, NULL, NULL); slapi_log_error(SLAPI_LOG_FATAL, "slapi_ldap_bind", "Error reading bind response for id " - "[%s] mech [%s]: error %d (%s)\n", + "[%s] mech [%s]: error %d (%s) errno %d (%s)\n", bindid ? bindid : "(anon)", mech ? mech : "SIMPLE", - rc, ldap_err2string(rc)); + rc, ldap_err2string(rc), errno, slapd_system_strerror(errno)); goto done; } else if (rc == 0) { /* timeout */ rc = LDAP_TIMEOUT; @@ -1050,10 +1050,10 @@ slapi_ldap_bind( 0)) != LDAP_SUCCESS) { slapi_log_error(SLAPI_LOG_FATAL, "slapi_ldap_bind", "Error: could not bind id " - "[%s] mech [%s]: error %d (%s)\n", + "[%s] mech [%s]: error %d (%s) errno %d (%s)\n", bindid ? bindid : "(anon)", mech ? mech : "SIMPLE", - rc, ldap_err2string(rc)); + rc, ldap_err2string(rc), errno, slapd_system_strerror(errno)); goto done; } } @@ -1064,10 +1064,10 @@ slapi_ldap_bind( rc = slapi_ldap_get_lderrno(ld, NULL, NULL); slapi_log_error(SLAPI_LOG_FATAL, "slapi_ldap_bind", "Error: could not read bind results for id " - "[%s] mech [%s]: error %d (%s)\n", + "[%s] mech [%s]: error %d (%s) errno %d (%s)\n", bindid ? bindid : "(anon)", mech ? mech : "SIMPLE", - rc, ldap_err2string(rc)); + rc, ldap_err2string(rc), errno, slapd_system_strerror(errno)); goto done; } } @@ -1407,10 +1407,12 @@ slapd_ldap_sasl_interactive_bind( rc = slapi_ldap_get_lderrno(ld, NULL, &errmsg); slapi_log_error(SLAPI_LOG_FATAL, "slapd_ldap_sasl_interactive_bind", "Error: could not perform interactive bind for id " - "[%s] mech [%s]: error %d (%s) (%s)\n", + "[%s] mech [%s]: LDAP error %d (%s) (%s) " + "errno %d (%s)\n", bindid ? bindid : "(anon)", mech ? mech : "SIMPLE", - rc, ldap_err2string(rc), errmsg); + rc, ldap_err2string(rc), errmsg, + errno, slapd_system_strerror(errno)); if (can_retry_bind(ld, mech, bindid, creds, rc, errmsg)) { ; /* pass through to retry one time */ } else {
0
957e2ae225803cb108e6ea3beec431eb015568ae
389ds/389-ds-base
Issue 50439 - Update docker integration for Fedora Bug Description: Fedora Dockerfile has been unbuildable/broken for sometime. Fix Description: Update the Dockerfile to make it work while mimicking ideas from the SUSE's counterpart. Additionaly, changing wget to curl in rpm.mk since wget does not seem to be available in the minimal image. Relates https://pagure.io/389-ds-base/issue/50439 Relates https://pagure.io/389-ds-base/pull-request/50441#comment-88961 Author: Matus Honek <[email protected]> Review by: firstyear, vashirov (thanks!)
commit 957e2ae225803cb108e6ea3beec431eb015568ae Author: Matus Honek <[email protected]> Date: Tue Nov 19 19:40:32 2019 +0100 Issue 50439 - Update docker integration for Fedora Bug Description: Fedora Dockerfile has been unbuildable/broken for sometime. Fix Description: Update the Dockerfile to make it work while mimicking ideas from the SUSE's counterpart. Additionaly, changing wget to curl in rpm.mk since wget does not seem to be available in the minimal image. Relates https://pagure.io/389-ds-base/issue/50439 Relates https://pagure.io/389-ds-base/pull-request/50441#comment-88961 Author: Matus Honek <[email protected]> Review by: firstyear, vashirov (thanks!) diff --git a/docker.mk b/docker.mk index 4f07ceca2..0f42b0bfe 100644 --- a/docker.mk +++ b/docker.mk @@ -1,3 +1,6 @@ suse: docker build -t 389-ds-suse:master -f docker/389-ds-suse/Dockerfile . + +fedora: + docker build -t 389-ds-fedora:master -f docker/389-ds-fedora/Dockerfile . diff --git a/docker/389-ds-fedora/Dockerfile b/docker/389-ds-fedora/Dockerfile index d61df8cba..ba901e130 100644 --- a/docker/389-ds-fedora/Dockerfile +++ b/docker/389-ds-fedora/Dockerfile @@ -6,41 +6,46 @@ # See LICENSE for details. # --- END COPYRIGHT BLOCK --- -FROM fedora:26 +FROM fedora:latest MAINTAINER [email protected] -EXPOSE 389 636 -ENV container docker - -RUN mkdir -p /usr/local/src -WORKDIR /usr/local/src +EXPOSE 3389 3636 ADD ./ /usr/local/src/389-ds-base - -RUN dnf upgrade -y && \ - dnf install --setopt=strict=False -y \ - @buildsys-build rpm-build make bzip2 git rsync \ - `grep -E "^(Build)?Requires" 389-ds-base/rpm/389-ds-base.spec.in | grep -v -E '(name|MODULE)' | awk '{ print $2 }' | sed 's/%{python3_pkgversion}/3/g' | grep -v "^/" | grep -v pkgversion | sort | uniq | tr '\n' ' '` && \ +WORKDIR /usr/local/src/389-ds-base + +# install dependencies +RUN dnf upgrade -y \ + && dnf install --setopt=strict=False -y @buildsys-build rpm-build make bzip2 git rsync \ + `grep -E "^(Build)?Requires" rpm/389-ds-base.spec.in \ + | grep -v -E '(name|MODULE)' \ + | awk '{ print $2 }' \ + | sed 's/%{python3_pkgversion}/3/g' \ + | grep -v "^/" \ + | grep -v pkgversion \ + | sort | uniq \ + | tr '\n' ' '` \ + && dnf clean all + +# build +RUN make -f rpm.mk rpms || sh -c 'echo "build failed, sleeping for some time to allow you debug" ; sleep 3600' + +RUN dnf install -y dist/rpms/*389*.rpm && \ dnf clean all +# Link some known static locations to point to /data +RUN mkdir -p /data/config && \ + mkdir -p /data/ssca && \ + mkdir -p /data/run && \ + mkdir -p /var/run/dirsrv && \ + ln -s /data/config /etc/dirsrv/slapd-localhost && \ + ln -s /data/ssca /etc/dirsrv/ssca && \ + ln -s /data/run /var/run/dirsrv -### CHANGE THIS TO A ./configure and build that way. - -RUN cd 389-ds-base && \ - PERL_ON=0 RUST_ON=1 make -f rpm.mk rpms - -RUN dnf install -y 389-ds-base/dist/rpms/*389*.rpm && \ - dnf clean all - -# Create the example setup inf. It's valid for containers! -# Build the instance from the new installer tools. -RUN /usr/sbin/dscreate create-template > /root/ds-setup.inf && /usr/sbin/dscreate -v from-file /root/ds-setup.inf --containerised +VOLUME /data -# Finally add the volumes, they will inherit the contents of these directories. -VOLUME /etc/dirsrv -VOLUME /var/log/dirsrv -VOLUME /var/lib/dirsrv +#USER dirsrv -# Or, run them as dirsrv -USER dirsrv -CMD ["/usr/sbin/ns-slapd", "-d", "0", "-D", "/etc/dirsrv/slapd-localhost", "-i", "/var/run/dirsrv/slapd-localhost.pid"] +HEALTHCHECK --start-period=5m --timeout=5s --interval=5s --retries=2 \ + CMD /usr/sbin/dscontainer -H +CMD [ "/usr/sbin/dscontainer", "-r" ] diff --git a/rpm.mk b/rpm.mk index a8574a454..886f049b1 100644 --- a/rpm.mk +++ b/rpm.mk @@ -78,7 +78,7 @@ tarballs: local-archive rm -rf dist/$(NAME_VERSION) cd dist/sources ; \ if [ $(BUNDLE_JEMALLOC) -eq 1 ]; then \ - wget $(JEMALLOC_URL) ; \ + curl -LO $(JEMALLOC_URL) ; \ fi rpmroot:
0
5cbcd502107d5d32ab5bea6584582903c8b43abd
389ds/389-ds-base
Bump tokio from 1.24.1 to 1.25.0 in /src (#5629) Update cargo.lock to upgrade "tokio" rust component to 1.25 Bumps [tokio](https://github.com/tokio-rs/tokio) from 1.24.1 to 1.25.0. - [Release notes](https://github.com/tokio-rs/tokio/releases) - [Commits](https://github.com/tokio-rs/tokio/compare/tokio-1.24.1...tokio-1.25.0) --- updated-dependencies: - dependency-name: tokio dependency-type: indirect ... Signed-off-by: dependabot[bot] <[email protected]> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
commit 5cbcd502107d5d32ab5bea6584582903c8b43abd Author: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue Feb 7 16:30:56 2023 +0100 Bump tokio from 1.24.1 to 1.25.0 in /src (#5629) Update cargo.lock to upgrade "tokio" rust component to 1.25 Bumps [tokio](https://github.com/tokio-rs/tokio) from 1.24.1 to 1.25.0. - [Release notes](https://github.com/tokio-rs/tokio/releases) - [Commits](https://github.com/tokio-rs/tokio/compare/tokio-1.24.1...tokio-1.25.0) --- updated-dependencies: - dependency-name: tokio dependency-type: indirect ... Signed-off-by: dependabot[bot] <[email protected]> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> diff --git a/src/Cargo.lock b/src/Cargo.lock index 92356975e..9aa8dabfa 100644 --- a/src/Cargo.lock +++ b/src/Cargo.lock @@ -673,9 +673,9 @@ dependencies = [ [[package]] name = "tokio" -version = "1.24.1" +version = "1.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d9f76183f91ecfb55e1d7d5602bd1d979e38a3a522fe900241cf195624d67ae" +checksum = "c8e00990ebabbe4c14c08aca901caed183ecd5c09562a12c824bb53d3c3fd3af" dependencies = [ "autocfg", "pin-project-lite",
0
a44290a979169726afc380a9cce79bbba0adacde
389ds/389-ds-base
Bug 613056 - fix coverify Defect Type: Null pointer dereferences issues 11892 - 11939 https://bugzilla.redhat.com/show_bug.cgi?id=613056 Resolves: bug 613056 Bug description: Fix coverify Defect Type: Null pointer dereferences issues 11892 - 11939 description: Catch possible NULL pointer in vlv_create_key() and vlv_trim_candidates(). coverity ID: 11926
commit a44290a979169726afc380a9cce79bbba0adacde Author: Noriko Hosoi <[email protected]> Date: Fri Aug 20 14:45:19 2010 -0700 Bug 613056 - fix coverify Defect Type: Null pointer dereferences issues 11892 - 11939 https://bugzilla.redhat.com/show_bug.cgi?id=613056 Resolves: bug 613056 Bug description: Fix coverify Defect Type: Null pointer dereferences issues 11892 - 11939 description: Catch possible NULL pointer in vlv_create_key() and vlv_trim_candidates(). coverity ID: 11926 diff --git a/ldap/servers/slapd/back-ldbm/vlv.c b/ldap/servers/slapd/back-ldbm/vlv.c index 84a42b8bb..9d749d6ec 100644 --- a/ldap/servers/slapd/back-ldbm/vlv.c +++ b/ldap/servers/slapd/back-ldbm/vlv.c @@ -555,6 +555,9 @@ vlv_create_key(struct vlvIndex* p, struct backentry* e) unsigned char char_min = 0x00; unsigned char char_max = 0xFF; struct vlv_key *key= vlv_key_new(); + struct berval **value = NULL; + int free_value = 0; + if(p->vlv_sortkey!=NULL) { /* Foreach sorted attribute... */ @@ -573,8 +576,8 @@ vlv_create_key(struct vlvIndex* p, struct backentry* e) /* xxxPINAKI */ /* need to free some stuff! */ Slapi_Value **cvalue = NULL; - struct berval **value = NULL, *lowest_value = NULL; - int free_value= 0; + struct berval *lowest_value = NULL; + if (attr != NULL && !valueset_isempty(&attr->a_present_values)) { /* Sorted attribute found. */ @@ -609,7 +612,12 @@ vlv_create_key(struct vlvIndex* p, struct backentry* e) valuearray_get_bervalarray(va,&bval); matchrule_values_to_keys(p->vlv_mrpb[sortattr],bval,&value); } - } + } + + if (!value) { + goto error; + } + for(totalattrs=0;value[totalattrs]!=NULL;totalattrs++) {}; /* Total Number of Attributes */ if(totalattrs==1) { @@ -617,7 +625,7 @@ vlv_create_key(struct vlvIndex* p, struct backentry* e) } else { - lowest_value = attr_value_lowest(value, slapi_berval_cmp); + lowest_value = attr_value_lowest(value, slapi_berval_cmp); } } /* end of if (attr != NULL && ...) */ if(p->vlv_sortkey[sortattr]->sk_reverseorder) @@ -680,8 +688,10 @@ vlv_create_key(struct vlvIndex* p, struct backentry* e) } if(free_value) { - ber_bvecfree(value); + ber_bvecfree(value); + free_value = 0; } + value = NULL; } sortattr++; } @@ -693,6 +703,11 @@ vlv_create_key(struct vlvIndex* p, struct backentry* e) vlv_key_addattr(key,&val); } return key; + +error: + if (free_value) ber_bvecfree(value); + vlv_key_delete(&key); + return NULL; } /* @@ -1420,7 +1435,7 @@ vlv_trim_candidates(backend *be, const IDList *candidates, const sort_spec* sort } } } - LDAPDebug( LDAP_DEBUG_TRACE, "<= vlv_trim_candidates: Trimmed list contains %lu entries.\n",(u_long)resultIdl->b_nids, 0, 0 ); + LDAPDebug( LDAP_DEBUG_TRACE, "<= vlv_trim_candidates: Trimmed list contains %lu entries.\n", (u_long)(resultIdl ? resultIdl->b_nids : 0), 0, 0 ); *trimmedCandidates= resultIdl; return return_value; }
0
fda63dc597956f93274274ba735f54d75fb777de
389ds/389-ds-base
Ticket 48184 - revert previous patch around unuc-stans shutdown crash https://pagure.io/389-ds-base/issue/48184
commit fda63dc597956f93274274ba735f54d75fb777de Author: Mark Reynolds <[email protected]> Date: Thu Mar 29 13:24:47 2018 -0400 Ticket 48184 - revert previous patch around unuc-stans shutdown crash https://pagure.io/389-ds-base/issue/48184 diff --git a/ldap/servers/slapd/conntable.c b/ldap/servers/slapd/conntable.c index 4fae1741d..62d5eac0b 100644 --- a/ldap/servers/slapd/conntable.c +++ b/ldap/servers/slapd/conntable.c @@ -91,19 +91,6 @@ connection_table_abandon_all_operations(Connection_Table *ct) } } -void -connection_table_disconnect_all(Connection_Table *ct) -{ - for (size_t i = 0; i < ct->size; i++) { - if (ct->c[i].c_mutex) { - Connection *c = &(ct->c[i]); - PR_EnterMonitor(c->c_mutex); - disconnect_server_nomutex(c, c->c_connid, -1, SLAPD_DISCONNECT_ABORT, ECANCELED); - PR_ExitMonitor(c->c_mutex); - } - } -} - /* Given a file descriptor for a socket, this function will return * a slot in the connection table to use. * diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c index 3358f34d7..5d191fa76 100644 --- a/ldap/servers/slapd/daemon.c +++ b/ldap/servers/slapd/daemon.c @@ -1176,30 +1176,6 @@ slapd_daemon(daemon_ports_t *ports, ns_thrpool_t *tp) housekeeping_stop(); /* Run this after op_thread_cleanup() logged sth */ disk_monitoring_stop(); - /* - * Now that they are abandonded, we need to mark them as done. - * In NS while it's safe to allow excess jobs to be cleaned by - * by the walk and ns_job_done of remaining queued events, the - * issue is that if we allow something to live past this point - * the CT is freed from underneath, and bad things happen (tm). - * - * NOTE: We do this after we stop psearch, because there could - * be a race between flagging the psearch done, and users still - * try to send on the connection. Similar with op_threads. - */ - connection_table_disconnect_all(the_connection_table); - - /* - * WARNING: Normally we should close the tp in main - * but because of issues in the current connection design - * we need to close it here to guarantee events won't fire! - * - * All the connection close jobs "should" complete before - * shutdown at least. - */ - ns_thrpool_shutdown(tp); - ns_thrpool_wait(tp); - threads = g_get_active_threadcnt(); if (threads > 0) { slapi_log_err(SLAPI_LOG_INFO, "slapd_daemon", @@ -1652,18 +1628,25 @@ ns_handle_closure(struct ns_job_t *job) Connection *c = (Connection *)ns_job_get_data(job); int do_yield = 0; +/* this function must be called from the event loop thread */ +#ifdef DEBUG + PR_ASSERT(0 == NS_JOB_IS_THREAD(ns_job_get_type(job))); +#else + /* This doesn't actually confirm it's in the event loop thread, but it's a start */ + if (NS_JOB_IS_THREAD(ns_job_get_type(job)) != 0) { + slapi_log_err(SLAPI_LOG_ERR, "ns_handle_closure", "Attempt to close outside of event loop thread %" PRIu64 " for fd=%d\n", + c->c_connid, c->c_sd); + return; + } +#endif + PR_EnterMonitor(c->c_mutex); - /* Assert we really have the right job state. */ - PR_ASSERT(job == c->c_job); connection_release_nolock_ext(c, 1); /* release ref acquired for event framework */ PR_ASSERT(c->c_ns_close_jobs == 1); /* should be exactly 1 active close job - this one */ c->c_ns_close_jobs--; /* this job is processing closure */ - /* Because handle closure will add a new job, we need to detach our current one. */ - c->c_job = NULL; do_yield = ns_handle_closure_nomutex(c); PR_ExitMonitor(c->c_mutex); - /* Remove this task now. */ ns_job_done(job); if (do_yield) { /* closure not done - another reference still outstanding */ @@ -1686,14 +1669,6 @@ ns_connection_post_io_or_closing(Connection *conn) return; } - /* - * Cancel any existing ns jobs we have registered. - */ - if (conn->c_job != NULL) { - ns_job_done(conn->c_job); - conn->c_job = NULL; - } - if (CONN_NEEDS_CLOSING(conn)) { /* there should only ever be 0 or 1 active closure jobs */ PR_ASSERT((conn->c_ns_close_jobs == 0) || (conn->c_ns_close_jobs == 1)); @@ -1703,10 +1678,13 @@ ns_connection_post_io_or_closing(Connection *conn) conn->c_connid, conn->c_sd); return; } else { + /* just make sure we schedule the event to be closed in a timely manner */ + tv.tv_sec = 0; + tv.tv_usec = slapd_wakeup_timer * 1000; conn->c_ns_close_jobs++; /* now 1 active closure job */ connection_acquire_nolock_ext(conn, 1 /* allow acquire even when closing */); /* event framework now has a reference */ - /* Close the job asynchronously. Why? */ - ns_result_t job_result = ns_add_job(conn->c_tp, NS_JOB_TIMER, ns_handle_closure, conn, &(conn->c_job)); + ns_result_t job_result = ns_add_timeout_job(conn->c_tp, &tv, NS_JOB_TIMER, + ns_handle_closure, conn, NULL); if (job_result != NS_SUCCESS) { if (job_result == NS_SHUTDOWN) { slapi_log_err(SLAPI_LOG_INFO, "ns_connection_post_io_or_closing", "post closure job " @@ -1750,7 +1728,7 @@ ns_connection_post_io_or_closing(Connection *conn) #endif ns_result_t job_result = ns_add_io_timeout_job(conn->c_tp, conn->c_prfd, &tv, NS_JOB_READ | NS_JOB_PRESERVE_FD, - ns_handle_pr_read_ready, conn, &(conn->c_job)); + ns_handle_pr_read_ready, conn, NULL); if (job_result != NS_SUCCESS) { if (job_result == NS_SHUTDOWN) { slapi_log_err(SLAPI_LOG_INFO, "ns_connection_post_io_or_closing", "post I/O job for " @@ -1779,12 +1757,19 @@ ns_handle_pr_read_ready(struct ns_job_t *job) int maxthreads = config_get_maxthreadsperconn(); Connection *c = (Connection *)ns_job_get_data(job); - PR_EnterMonitor(c->c_mutex); - /* Assert we really have the right job state. */ - PR_ASSERT(job == c->c_job); +/* this function must be called from the event loop thread */ +#ifdef DEBUG + PR_ASSERT(0 == NS_JOB_IS_THREAD(ns_job_get_type(job))); +#else + /* This doesn't actually confirm it's in the event loop thread, but it's a start */ + if (NS_JOB_IS_THREAD(ns_job_get_type(job)) != 0) { + slapi_log_err(SLAPI_LOG_ERR, "ns_handle_pr_read_ready", "Attempt to handle read ready outside of event loop thread %" PRIu64 " for fd=%d\n", + c->c_connid, c->c_sd); + return; + } +#endif - /* On all code paths we remove the job, so set it null now */ - c->c_job = NULL; + PR_EnterMonitor(c->c_mutex); slapi_log_err(SLAPI_LOG_CONNS, "ns_handle_pr_read_ready", "activity on conn %" PRIu64 " for fd=%d\n", c->c_connid, c->c_sd); @@ -1844,7 +1829,6 @@ ns_handle_pr_read_ready(struct ns_job_t *job) slapi_log_err(SLAPI_LOG_CONNS, "ns_handle_pr_read_ready", "queued conn %" PRIu64 " for fd=%d\n", c->c_connid, c->c_sd); } - /* Since we call done on the job, we need to remove it here. */ PR_ExitMonitor(c->c_mutex); ns_job_done(job); return; diff --git a/ldap/servers/slapd/fe.h b/ldap/servers/slapd/fe.h index f47bb6145..4d25a9fb8 100644 --- a/ldap/servers/slapd/fe.h +++ b/ldap/servers/slapd/fe.h @@ -100,7 +100,6 @@ extern Connection_Table *the_connection_table; /* JCM - Exported from globals.c Connection_Table *connection_table_new(int table_size); void connection_table_free(Connection_Table *ct); void connection_table_abandon_all_operations(Connection_Table *ct); -void connection_table_disconnect_all(Connection_Table *ct); Connection *connection_table_get_connection(Connection_Table *ct, int sd); int connection_table_move_connection_out_of_active_list(Connection_Table *ct, Connection *c); void connection_table_move_connection_on_to_active_list(Connection_Table *ct, Connection *c); diff --git a/ldap/servers/slapd/slap.h b/ldap/servers/slapd/slap.h index bbaa7a4a5..b44c0f922 100644 --- a/ldap/servers/slapd/slap.h +++ b/ldap/servers/slapd/slap.h @@ -1650,7 +1650,6 @@ typedef struct conn void *c_io_layer_cb_data; /* callback data */ struct connection_table *c_ct; /* connection table that this connection belongs to */ ns_thrpool_t *c_tp; /* thread pool for this connection */ - struct ns_job_t *c_job; /* If it exists, the current ns_job_t */ int c_ns_close_jobs; /* number of current close jobs */ char *c_ipaddr; /* ip address str - used by monitor */ } Connection;
0
9d8887ad91c121ec68bfb5c0f4871611ee6b8a41
389ds/389-ds-base
Ticket 49754 - instances created with dscreate can not be upgraded with setup-ds.pl Bug Description: If you create an instance with dscreate and try to upgrade that instance using setup-ds.pl it will fail. While you should not mix and match the python and perl tools, it is still possible and it should work. Fix Description: The first problem was that the instance dir (inst_dir) was not set during dscreate, and second we were also not setting the correct inst_dir in defaults.inf. Also dscreate does not create a backend by default, which caused 80upgradednformat.pl to fail on an upgrade. Finally updated UI's instance creation template to use the correct inst_dir. https://pagure.io/389-ds-base/issue/49754 Reviewed by: vashirov(Thanks!)
commit 9d8887ad91c121ec68bfb5c0f4871611ee6b8a41 Author: Mark Reynolds <[email protected]> Date: Wed Jun 6 15:43:29 2018 -0400 Ticket 49754 - instances created with dscreate can not be upgraded with setup-ds.pl Bug Description: If you create an instance with dscreate and try to upgrade that instance using setup-ds.pl it will fail. While you should not mix and match the python and perl tools, it is still possible and it should work. Fix Description: The first problem was that the instance dir (inst_dir) was not set during dscreate, and second we were also not setting the correct inst_dir in defaults.inf. Also dscreate does not create a backend by default, which caused 80upgradednformat.pl to fail on an upgrade. Finally updated UI's instance creation template to use the correct inst_dir. https://pagure.io/389-ds-base/issue/49754 Reviewed by: vashirov(Thanks!) diff --git a/ldap/admin/src/defaults.inf.in b/ldap/admin/src/defaults.inf.in index 564b64f44..bdc408191 100644 --- a/ldap/admin/src/defaults.inf.in +++ b/ldap/admin/src/defaults.inf.in @@ -37,7 +37,7 @@ run_dir = @localstatedir@/run/dirsrv # This is the expected location of ldapi. ldapi = @localstatedir@/run/slapd-{instance_name}.socket pid_file = @localstatedir@/run/dirsrv/slapd-{instance_name}.pid -inst_dir = @serverdir@ +inst_dir = @serverdir@/slapd-{instance_name} plugin_dir = @serverplugindir@ system_schema_dir = @systemschemadir@ diff --git a/ldap/admin/src/scripts/80upgradednformat.pl.in b/ldap/admin/src/scripts/80upgradednformat.pl.in index c566b42b8..2cb7b1a86 100644 --- a/ldap/admin/src/scripts/80upgradednformat.pl.in +++ b/ldap/admin/src/scripts/80upgradednformat.pl.in @@ -36,7 +36,7 @@ sub runinst { my $backend_entry; my $mtentry = $conn->search($mappingtree, "onelevel", "(cn=*)", 0, @attr); if (!$mtentry) { - return ("error_no_mapping_tree_entries", $!); + return (); } my $db_config_entry = diff --git a/src/cockpit/389-console/js/servers.js b/src/cockpit/389-console/js/servers.js index cc46c2dbc..6e4d79047 100644 --- a/src/cockpit/389-console/js/servers.js +++ b/src/cockpit/389-console/js/servers.js @@ -40,7 +40,7 @@ var create_full_template = "user = USER\n" + "group = GROUP\n" + "initconfig_dir = /etc/sysconfig\n" + - "inst_dir = /usr/lib64/dirsrv\n" + + "inst_dir = /usr/lib64/dirsrv/slapd-{instance_name}\n" + "instance_name = localhost\n" + "ldif_dir = /var/lib/dirsrv/slapd-{instance_name}/ldif\n" + "lib_dir = /usr/lib64\n" + @@ -72,7 +72,7 @@ var create_inf_template = "root_password = ROOTPW\n" + "secure_port = SECURE_PORT\n" + "self_sign_cert = SELF_SIGN\n"; - // TODO LDAPI Option (WIP) + function load_server_config() { var mark = document.getElementById("server-config-title"); diff --git a/src/lib389/lib389/instance/setup.py b/src/lib389/lib389/instance/setup.py index d3f56cb02..a6d41be8b 100644 --- a/src/lib389/lib389/instance/setup.py +++ b/src/lib389/lib389/instance/setup.py @@ -399,7 +399,7 @@ class SetupDs(object): ldif_dir=slapd['ldif_dir'], bak_dir=slapd['backup_dir'], run_dir=slapd['run_dir'], - inst_dir="", + inst_dir=slapd['inst_dir'], log_dir=slapd['log_dir'], fqdn=general['full_machine_name'], ds_port=slapd['port'],
0
051e0eb50c74d5208355253ff53b301c262c7df2
389ds/389-ds-base
Ticket 48978 - Update the logging function to accept sev level Description: Change LDAPDebug() and slapi_log_error() to accept a security level(syslog level). https://fedorahosted.org/389/ticket/48978 Reviewed by: ?
commit 051e0eb50c74d5208355253ff53b301c262c7df2 Author: Mark Reynolds <[email protected]> Date: Wed Sep 7 11:37:34 2016 -0400 Ticket 48978 - Update the logging function to accept sev level Description: Change LDAPDebug() and slapi_log_error() to accept a security level(syslog level). https://fedorahosted.org/389/ticket/48978 Reviewed by: ? diff --git a/ldap/include/ldaplog.h b/ldap/include/ldaplog.h index f348c0578..ae8d798c3 100644 --- a/ldap/include/ldaplog.h +++ b/ldap/include/ldaplog.h @@ -44,11 +44,11 @@ extern "C" { /* debugging stuff */ /* Disable by default */ -#define LDAPDebug( level, fmt, arg1, arg2, arg3 ) +#define LDAPDebug( level, sev, fmt, arg1, arg2, arg3 ) #define LDAPDebugLevelIsSet( level ) (0) -#define LDAPDebug0Args( level, fmt ) -#define LDAPDebug1Arg( level, fmt, arg ) -#define LDAPDebug2Args( level, fmt, arg1, arg2 ) +#define LDAPDebug0Args( level, sev, fmt ) +#define LDAPDebug1Arg( level, sev, fmt, arg ) +#define LDAPDebug2Args( level, sev, fmt, arg1, arg2 ) #ifdef LDAP_DEBUG # undef LDAPDebug @@ -58,28 +58,28 @@ extern "C" { # undef LDAPDebugLevelIsSet extern int slapd_ldap_debug; -# define LDAPDebug( level, fmt, arg1, arg2, arg3 ) \ +# define LDAPDebug( level, sev, fmt, arg1, arg2, arg3 ) \ { \ if ( slapd_ldap_debug & level ) { \ - slapd_log_error_proc( NULL, fmt, arg1, arg2, arg3 ); \ + slapd_log_error_proc( NULL, sev, fmt, arg1, arg2, arg3 ); \ } \ } -# define LDAPDebug0Args( level, fmt ) \ +# define LDAPDebug0Args( level, sev, fmt ) \ { \ if ( slapd_ldap_debug & level ) { \ - slapd_log_error_proc( NULL, fmt ); \ + slapd_log_error_proc( NULL, sev, fmt ); \ } \ } -# define LDAPDebug1Arg( level, fmt, arg ) \ +# define LDAPDebug1Arg( level, sev, fmt, arg ) \ { \ if ( slapd_ldap_debug & level ) { \ - slapd_log_error_proc( NULL, fmt, arg ); \ + slapd_log_error_proc( NULL, sev, fmt, arg ); \ } \ } -# define LDAPDebug2Args( level, fmt, arg1, arg2 ) \ +# define LDAPDebug2Args( level, sev, fmt, arg1, arg2 ) \ { \ if ( slapd_ldap_debug & level ) { \ - slapd_log_error_proc( NULL, fmt, arg1, arg2 ); \ + slapd_log_error_proc( NULL, sev, fmt, arg1, arg2 ); \ } \ } # define LDAPDebugLevelIsSet( level ) (0 != (slapd_ldap_debug & level)) diff --git a/ldap/servers/slapd/log.c b/ldap/servers/slapd/log.c index dece8575b..e5c6e11f9 100644 --- a/ldap/servers/slapd/log.c +++ b/ldap/servers/slapd/log.c @@ -91,7 +91,7 @@ static PRInt64 log__getfilesize(LOGFD fp); static PRInt64 log__getfilesize_with_filename(char *filename); static int log__enough_freespace(char *path); -static int vslapd_log_error(LOGFD fp, char *subsystem, char *fmt, va_list ap, int locked ); +static int vslapd_log_error(LOGFD fp, int sev_level, char *subsystem, char *fmt, va_list ap, int locked ); static int vslapd_log_access(char *fmt, va_list ap ); static void log_convert_time (time_t ctime, char *tbuf, int type); static time_t log_reverse_convert_time (char *tbuf); @@ -104,8 +104,9 @@ static void vslapd_log_emergency_error(LOGFD fp, const char *msg, int locked); static int slapd_log_error_proc_internal( - char *subsystem, /* omitted if NULL */ - char *fmt, + char *subsystem, /* omitted if NULL */ + int *sev_level, + char *fmt, va_list ap_err, va_list ap_file); @@ -2096,8 +2097,9 @@ slapd_log_auditfail_internal ( ******************************************************************************/ int slapd_log_error_proc( - char *subsystem, /* omitted if NULL */ - char *fmt, + char *subsystem, /* omitted if NULL */ + int *sev_level, + char *fmt, ... ) { int rc = LDAP_SUCCESS; @@ -2107,7 +2109,7 @@ slapd_log_error_proc( if (loginfo.log_backend & LOGGING_BACKEND_INTERNAL) { va_start( ap_err, fmt ); va_start( ap_file, fmt ); - rc = slapd_log_error_proc_internal( subsystem, fmt, ap_err, ap_file ); + rc = slapd_log_error_proc_internal( subsystem, sev_level, fmt, ap_err, ap_file ); va_end(ap_file); va_end(ap_err); } @@ -2118,7 +2120,7 @@ slapd_log_error_proc( va_start( ap_err, fmt ); /* va_start( ap_file, fmt ); */ /* This returns void, so we hope it worked */ - vsyslog(LOG_ERROR, fmt, ap_err); + vsyslog(sev_level, fmt, ap_err); /* vsyslog(LOG_ERROR, fmt, ap_file); */ /* va_end(ap_file); */ va_end(ap_err); @@ -2128,7 +2130,7 @@ slapd_log_error_proc( va_start( ap_err, fmt ); /* va_start( ap_file, fmt ); */ /* This isn't handling RC nicely ... */ - rc = sd_journal_printv(LOG_ERROR, fmt, ap_err); + rc = sd_journal_printv(sev_level, fmt, ap_err); /* rc = sd_journal_printv(LOG_ERROR, fmt, ap_file); */ /* va_end(ap_file); */ va_end(ap_err); @@ -2139,8 +2141,9 @@ slapd_log_error_proc( static int slapd_log_error_proc_internal( - char *subsystem, /* omitted if NULL */ - char *fmt, + char *subsystem, /* omitted if NULL */ + int *sev_level, + char *fmt, va_list ap_err, va_list ap_file) { @@ -2162,19 +2165,19 @@ slapd_log_error_proc_internal( } if (!(detached)) { - rc = vslapd_log_error( NULL, subsystem, fmt, ap_err, 1 ); + rc = vslapd_log_error( NULL, sev_level, subsystem, fmt, ap_err, 1 ); } if ( loginfo.log_error_fdes != NULL ) { if (loginfo.log_error_state & LOGGING_NEED_TITLE) { log_write_title(loginfo.log_error_fdes); loginfo.log_error_state &= ~LOGGING_NEED_TITLE; } - rc = vslapd_log_error( loginfo.log_error_fdes, subsystem, fmt, ap_file, 1 ); + rc = vslapd_log_error( loginfo.log_error_fdes, sev_level, subsystem, fmt, ap_file, 1 ); } LOG_ERROR_UNLOCK_WRITE(); } else { /* log the problem in the stderr */ - rc = vslapd_log_error( NULL, subsystem, fmt, ap_err, 0 ); + rc = vslapd_log_error( NULL, sev_level, subsystem, fmt, ap_err, 0 ); } return( rc ); } @@ -2230,6 +2233,7 @@ vslapd_log_emergency_error(LOGFD fp, const char *msg, int locked) static int vslapd_log_error( LOGFD fp, + iont sev_level, char *subsystem, /* omitted if NULL */ char *fmt, va_list ap, @@ -2291,9 +2295,11 @@ vslapd_log_error( /* blen = strlen(buffer); */ /* This truncates again .... But we have the nice smprintf above! */ if (subsystem == NULL) { - PR_snprintf (buffer+blen, sizeof(buffer)-blen, "%s", vbuf); + PR_snprintf (buffer+blen, sizeof(buffer)-blen, "%s - %s", + vbuf, toupper(prioritynames[sev_level].c_name)); } else { - PR_snprintf (buffer+blen, sizeof(buffer)-blen, "%s - %s", subsystem, vbuf); + PR_snprintf (buffer+blen, sizeof(buffer)-blen, "%s - %s - %s", + subsystem, toupper(prioritynames[sev_level].c_name), vbuf); } buffer[sizeof(buffer)-1] = '\0'; @@ -2330,26 +2336,32 @@ vslapd_log_error( return( 0 ); } +/* + * Log a message to the errors log + * + * loglevel - The logging level: replication, plugin, etc + * severity - LOG_ERR, LOG_WARNING, LOG_INFO, etc + */ int -slapi_log_error( int severity, char *subsystem, char *fmt, ... ) +slapi_log_error( int loglevel, int severity, char *subsystem, char *fmt, ... ) { va_list ap_err; va_list ap_file; int rc = LDAP_SUCCESS; int lbackend = loginfo.log_backend; /* We copy this to make these next checks atomic */ - if ( severity < SLAPI_LOG_MIN || severity > SLAPI_LOG_MAX ) { + if ( loglevel < SLAPI_LOG_MIN || loglevel > SLAPI_LOG_MAX ) { (void)slapd_log_error_proc( subsystem, "slapi_log_error: invalid severity %d (message %s)\n", - severity, fmt ); + loglevel, fmt ); return( -1 ); } - if ( slapd_ldap_debug & slapi_log_map[ severity ] ) { + if ( slapd_ldap_debug & slapi_log_map[ loglevel ] ) { if (lbackend & LOGGING_BACKEND_INTERNAL) { va_start( ap_err, fmt ); va_start( ap_file, fmt ); - rc = slapd_log_error_proc_internal( subsystem, fmt, ap_err, ap_file ); + rc = slapd_log_error_proc_internal( subsystem, severity, fmt, ap_err, ap_file ); va_end(ap_file); va_end(ap_err); } @@ -2360,7 +2372,7 @@ slapi_log_error( int severity, char *subsystem, char *fmt, ... ) va_start( ap_err, fmt ); /* va_start( ap_file, fmt ); */ /* This returns void, so we hope it worked */ - vsyslog(LOG_ERROR, fmt, ap_err); + vsyslog(severity, fmt, ap_err); /* vsyslog(LOG_ERROR, fmt, ap_file); */ /* va_end(ap_file); */ va_end(ap_err); @@ -2370,7 +2382,7 @@ slapi_log_error( int severity, char *subsystem, char *fmt, ... ) va_start( ap_err, fmt ); /* va_start( ap_file, fmt ); */ /* This isn't handling RC nicely ... */ - rc = sd_journal_printv(LOG_ERROR, fmt, ap_err); + rc = sd_journal_printv(severity, fmt, ap_err); /* rc = sd_journal_printv(LOG_ERROR, fmt, ap_file); */ /* va_end(ap_file); */ va_end(ap_err);
0
62d967419616d77891c78e4ee3553482cbea1c17
389ds/389-ds-base
Bug 537466 - nsslapd-distribution-plugin should not require plugin name to begin with "lib" https://bugzilla.redhat.com/show_bug.cgi?id=537466 Resolves: bug 537466 Bug Description: nsslapd-distribution-plugin should not require plugin name to begin with "lib" Reviewed by: nhosoi (Thanks!) Branch: HEAD Fix Description: We use the function PL_GetLibraryName from NSPR to get the platform specific DLL/shared lib filename suffix. Unfortunately, this function also prepends the string "lib" to the given name if the given file has no suffix. If the given name already has the correct suffix, it does not prepend the "lib" to the name. get_plugin_name() should look for the original library name in the string returned by PL_GetLibraryName. If it is there, and has something before it in the string, see if it is "/lib" - if so, remove the extraneous "lib" string. If "/lib" is not there, then just pass the string through as is. To summarize: /full/path/to/name.so -> /full/path/to/name.so - error if /full/path/to/name.so does not exist name -> /default/plugin/path/libname.so -> /default/plugin/path/name.so name.so -> /default/plugin/path/name.so Platforms tested: RHEL5 x86_64 Flag Day: no Doc impact: no
commit 62d967419616d77891c78e4ee3553482cbea1c17 Author: Rich Megginson <[email protected]> Date: Tue Jan 26 11:15:18 2010 -0700 Bug 537466 - nsslapd-distribution-plugin should not require plugin name to begin with "lib" https://bugzilla.redhat.com/show_bug.cgi?id=537466 Resolves: bug 537466 Bug Description: nsslapd-distribution-plugin should not require plugin name to begin with "lib" Reviewed by: nhosoi (Thanks!) Branch: HEAD Fix Description: We use the function PL_GetLibraryName from NSPR to get the platform specific DLL/shared lib filename suffix. Unfortunately, this function also prepends the string "lib" to the given name if the given file has no suffix. If the given name already has the correct suffix, it does not prepend the "lib" to the name. get_plugin_name() should look for the original library name in the string returned by PL_GetLibraryName. If it is there, and has something before it in the string, see if it is "/lib" - if so, remove the extraneous "lib" string. If "/lib" is not there, then just pass the string through as is. To summarize: /full/path/to/name.so -> /full/path/to/name.so - error if /full/path/to/name.so does not exist name -> /default/plugin/path/libname.so -> /default/plugin/path/name.so name.so -> /default/plugin/path/name.so Platforms tested: RHEL5 x86_64 Flag Day: no Doc impact: no diff --git a/ldap/servers/slapd/dynalib.c b/ldap/servers/slapd/dynalib.c index 4acae45bb..a659a5e5b 100644 --- a/ldap/servers/slapd/dynalib.c +++ b/ldap/servers/slapd/dynalib.c @@ -174,18 +174,31 @@ symload_report_error( const char *libpath, char *symbol, char *plugin, int libop /* PR_GetLibraryName does almost everything we need, and unfortunately a little bit more - it adds "lib" to be beginning of the library - name. So we have to strip that part off. + name if the library name does not end with the current platform + DLL suffix - so + foo.so -> /path/foo.so + libfoo.so -> /path/libfoo.so + BUT + foo -> /path/libfoo.so + libfoo -> /path/liblibfoo.so */ static char * get_plugin_name(const char *path, const char *lib) { + const char *libstr = "/lib"; + size_t libstrlen = 4; char *fullname = PR_GetLibraryName(path, lib); - char *ptr = PL_strrstr(fullname, "/lib"); - - if (ptr) { - ++ptr; /* now points at the "l" */ - /* just copy the remainder of the string on top of here */ - memmove(ptr, ptr+3, strlen(ptr+3)+1); + char *ptr = PL_strrstr(fullname, lib); + + /* see if /lib was added */ + if (ptr && ((ptr - fullname) >= libstrlen)) { + /* ptr is at the libname in fullname, and there is something before it */ + ptr -= libstrlen; /* ptr now points at the "/" in "/lib" if it is there */ + if (0 == PL_strncmp(ptr, libstr, libstrlen)) { + /* just copy the remainder of the string on top of here */ + ptr++; /* ptr now points at the "l" in "/lib" - keep the "/" */ + memmove(ptr, ptr+3, strlen(ptr+3)+1); + } } return fullname;
0
41f984ca92fe3d3d9f5297ff0c89f546749a322b
389ds/389-ds-base
Ticket 47853 - client hangs in add if memberof fails Bug Description: If memberof plugin is enabled and the client adds an entry with a membership attr it hangs if the memberof_postop_add rejects the operation. Fix Description: The problem is due to an unexpected error code being set in the pblock (-1). Setting the proper LDAP error code resolves the hang. https://fedorahosted.org/389/ticket/47853 Reviewed by: lkrispenz(Thanks!)
commit 41f984ca92fe3d3d9f5297ff0c89f546749a322b Author: Mark Reynolds <[email protected]> Date: Tue Jul 15 10:46:09 2014 -0400 Ticket 47853 - client hangs in add if memberof fails Bug Description: If memberof plugin is enabled and the client adds an entry with a membership attr it hangs if the memberof_postop_add rejects the operation. Fix Description: The problem is due to an unexpected error code being set in the pblock (-1). Setting the proper LDAP error code resolves the hang. https://fedorahosted.org/389/ticket/47853 Reviewed by: lkrispenz(Thanks!) diff --git a/ldap/servers/plugins/memberof/memberof.c b/ldap/servers/plugins/memberof/memberof.c index 8257e9d46..526e95f46 100644 --- a/ldap/servers/plugins/memberof/memberof.c +++ b/ldap/servers/plugins/memberof/memberof.c @@ -1604,9 +1604,8 @@ memberof_modop_one_replace_r(Slapi_PBlock *pb, MemberOfConfig *config, slapi_entry_attr_find( e, config->groupattrs[i], &members ); if(members) { - if(memberof_mod_attr_list_r(pb, config, mod_op, group_sdn, - op_this_sdn, members, ll) != 0){ - rc = -1; + if((rc = memberof_mod_attr_list_r(pb, config, mod_op, group_sdn, + op_this_sdn, members, ll)) != 0){ goto bail; } }
0
ba8c3c948e434b4d346799bdd3a86604c8e437bf
389ds/389-ds-base
bump version to 1.2.8.a1
commit ba8c3c948e434b4d346799bdd3a86604c8e437bf Author: Rich Megginson <[email protected]> Date: Wed Dec 15 10:14:07 2010 -0700 bump version to 1.2.8.a1 diff --git a/VERSION.sh b/VERSION.sh index 23e16b785..c0ae82414 100644 --- a/VERSION.sh +++ b/VERSION.sh @@ -10,11 +10,11 @@ vendor="389 Project" # PACKAGE_VERSION is constructed from these VERSION_MAJOR=1 VERSION_MINOR=2 -VERSION_MAINT=7.4 +VERSION_MAINT=8 # if this is a PRERELEASE, set VERSION_PREREL # otherwise, comment it out # be sure to include the dot prefix in the prerel -#VERSION_PREREL=.a1 +VERSION_PREREL=.a1 # NOTES on VERSION_PREREL # use aN for an alpha release e.g. a1, a2, etc. # use rcN for a release candidate e.g. rc1, rc2, etc.
0
a4a53e1e2214c4489dc2ae8f4468863f63a96881
389ds/389-ds-base
Issue 4548 - CLI - dsconf needs better root DN access control plugin validation Description: There is no validation done for any of the root DN access control plugin settings. Relates: https://github.com/389ds/389-ds-base/issues/4548 Reviewed by: spichugi & firstyear (Thanks!!)
commit a4a53e1e2214c4489dc2ae8f4468863f63a96881 Author: Mark Reynolds <[email protected]> Date: Tue Jan 19 16:06:03 2021 -0500 Issue 4548 - CLI - dsconf needs better root DN access control plugin validation Description: There is no validation done for any of the root DN access control plugin settings. Relates: https://github.com/389ds/389-ds-base/issues/4548 Reviewed by: spichugi & firstyear (Thanks!!) diff --git a/src/lib389/lib389/cli_conf/plugins/rootdn_ac.py b/src/lib389/lib389/cli_conf/plugins/rootdn_ac.py index 1bd974dd6..a8fb15fbc 100644 --- a/src/lib389/lib389/cli_conf/plugins/rootdn_ac.py +++ b/src/lib389/lib389/cli_conf/plugins/rootdn_ac.py @@ -6,7 +6,9 @@ # See LICENSE for details. # --- END COPYRIGHT BLOCK --- +import socket from lib389.plugins import RootDNAccessControlPlugin +from lib389.utils import is_valid_hostname from lib389.cli_conf import add_generic_plugin_parsers, generic_object_edit arg_to_attr = { @@ -20,8 +22,71 @@ arg_to_attr = { } +def validate_args(args): + # validate the args + if args.close_time is not None: + try: + int(args.close_time) + except: + raise ValueError("The close time must be a 4 digit number: HHMM") + if len(args.close_time) != 4: + raise ValueError("The close time must be a 4 digit number: HHMM") + hour = int(args.close_time[:2]) + if hour < 0 or hour > 23: + raise ValueError(f"The hour portion of the time is invalid: {hour} Must be between 0 and 23") + min = int(args.close_time[-2:]) + if min < 0 or min > 59: + raise ValueError(f"The minute portion of the time is invalid: {min} Must be between 1 and 59") + + if args.open_time is not None: + try: + int(args.open_time) + except: + raise ValueError("The open time must be a 4 digit number: HHMM") + if len(args.open_time) != 4: + raise ValueError("The open time must be a 4 digit number: HHMM") + hour = int(args.open_time[:2]) + if hour < 0 or hour > 23: + raise ValueError(f"The hour portion of the time is invalid: {hour} Must be between 0 and 23") + min = int(args.open_time[-2:]) + if min < 0 or min > 59: + raise ValueError(f"The minute portion of the time is invalid: {min} Must be between 1 and 59") + + if args.days_allowed is not None: + valid_days = ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun'] + choosen_days = args.days_allowed.lower().replace(' ', '').split(',') + for day in choosen_days: + if day not in valid_days: + raise ValueError(f"Invalid day entered ({day}), valid days are: Mon, Tue, Wed, Thu, Fri, Sat, Sun") + + if args.allow_ip is not None: + for ip in args.allow_ip: + try: + socket.inet_aton(ip) + except socket.error: + raise ValueError(f"Invalid IP address ({ip}) for '--allow-ip'") + + if args.deny_ip is not None: + for ip in args.deny_ip: + try: + socket.inet_aton(ip) + except socket.error: + raise ValueError(f"Invalid IP address ({ip}) for '--deny-ip'") + + if args.allow_host is not None: + for hostname in args.allow_host: + if not is_valid_hostname(hostname): + raise ValueError(f"Invalid hostname ({hostname}) for '--allow-host'") + + if args.deny_host is not None: + for hostname in args.deny_host: + if not is_valid_hostname(hostname): + raise ValueError(f"Invalid hostname ({hostname}) for '--deny-host'") + + def rootdn_edit(inst, basedn, log, args): log = log.getChild('rootdn_edit') + validate_args(args) plugin = RootDNAccessControlPlugin(inst) generic_object_edit(plugin, log, args, arg_to_attr) diff --git a/src/lib389/lib389/utils.py b/src/lib389/lib389/utils.py index b2ab009a9..620acc77d 100644 --- a/src/lib389/lib389/utils.py +++ b/src/lib389/lib389/utils.py @@ -1418,3 +1418,12 @@ def cmp(self, x, y): and strictly positive if x > y. """ return (x > y) - (x < y) + + +def is_valid_hostname(hostname): + if len(hostname) > 255: + return False + if hostname[-1] == ".": + hostname = hostname[:-1] # strip exactly one dot from the right, if present + allowed = re.compile("(?!-)[A-Z\d-]{1,63}(?<!-)$", re.IGNORECASE) + return all(allowed.match(x) for x in hostname.split("."))
0
4b74d1e9a1c0edb914f165795c440b9b498e7198
389ds/389-ds-base
Issue 4443 - Internal unindexed searches in syncrepl/retro changelog Bug Description: When a non-system index is added to a backend it is disabled until the database is initialized or reindexed. So in the case of the retro changelog the changenumber index is alway disabled by default since it is never initialized. This leads to unexpected unindexed searches of the retro changelog. Fix Description: If an index has "nsSystemIndex" set to "true" then enable it immediately. relates: https://github.com/389ds/389-ds-base/issues/4443 Reviewed by: spichugi & tbordaz(Thanks!!)
commit 4b74d1e9a1c0edb914f165795c440b9b498e7198 Author: Mark Reynolds <[email protected]> Date: Tue Jul 13 14:18:03 2021 -0400 Issue 4443 - Internal unindexed searches in syncrepl/retro changelog Bug Description: When a non-system index is added to a backend it is disabled until the database is initialized or reindexed. So in the case of the retro changelog the changenumber index is alway disabled by default since it is never initialized. This leads to unexpected unindexed searches of the retro changelog. Fix Description: If an index has "nsSystemIndex" set to "true" then enable it immediately. relates: https://github.com/389ds/389-ds-base/issues/4443 Reviewed by: spichugi & tbordaz(Thanks!!) diff --git a/dirsrvtests/tests/suites/retrocl/basic_test.py b/dirsrvtests/tests/suites/retrocl/basic_test.py index 71879cef3..c2f04dd1a 100644 --- a/dirsrvtests/tests/suites/retrocl/basic_test.py +++ b/dirsrvtests/tests/suites/retrocl/basic_test.py @@ -8,7 +8,6 @@ import logging import ldap -import time import pytest from lib389.topologies import topology_st from lib389.plugins import RetroChangelogPlugin @@ -18,7 +17,8 @@ from lib389.tasks import * from lib389.cli_base import FakeArgs, connect_instance, disconnect_instance from lib389.cli_base.dsrc import dsrc_arg_concat from lib389.cli_conf.plugins.retrochangelog import retrochangelog_add_attr -from lib389.idm.user import UserAccount, UserAccounts, nsUserAccounts +from lib389.idm.user import UserAccount, UserAccounts +from lib389._mapped_object import DSLdapObjects pytestmark = pytest.mark.tier1 @@ -77,7 +77,7 @@ def test_retrocl_exclude_attr_add(topology_st): log.info('Adding user1') try: - user1 = users.create(properties={ + users.create(properties={ 'sn': '1', 'cn': 'user 1', 'uid': 'user1', @@ -92,17 +92,18 @@ def test_retrocl_exclude_attr_add(topology_st): except ldap.ALREADY_EXISTS: pass except ldap.LDAPError as e: - log.error("Failed to add user1") + log.error("Failed to add user1: " + str(e)) log.info('Verify homePhone and carLicense attrs are in the changelog changestring') try: - cllist = st.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, '(targetDn=%s)' % USER1_DN) + retro_changelog_suffix = DSLdapObjects(st, basedn=RETROCL_SUFFIX) + cllist = retro_changelog_suffix.filter(f'(targetDn={USER1_DN})') except ldap.LDAPError as e: - log.fatal("Changelog search failed, error: " +str(e)) + log.fatal("Changelog search failed, error: " + str(e)) assert False assert len(cllist) > 0 - if cllist[0].hasAttr('changes'): - clstr = (cllist[0].getValue('changes')).decode() + if cllist[0].present('changes'): + clstr = str(cllist[0].get_attr_vals_utf8('changes')) assert ATTR_HOMEPHONE in clstr assert ATTR_CARLICENSE in clstr @@ -133,7 +134,7 @@ def test_retrocl_exclude_attr_add(topology_st): log.info('Adding user2') try: - user2 = users.create(properties={ + users.create(properties={ 'sn': '2', 'cn': 'user 2', 'uid': 'user2', @@ -148,18 +149,18 @@ def test_retrocl_exclude_attr_add(topology_st): except ldap.ALREADY_EXISTS: pass except ldap.LDAPError as e: - log.error("Failed to add user2") + log.error("Failed to add user2: " + str(e)) log.info('Verify homePhone attr is not in the changelog changestring') try: - cllist = st.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, '(targetDn=%s)' % USER2_DN) + cllist = retro_changelog_suffix.filter(f'(targetDn={USER2_DN})') assert len(cllist) > 0 - if cllist[0].hasAttr('changes'): - clstr = (cllist[0].getValue('changes')).decode() + if cllist[0].present('changes'): + clstr = str(cllist[0].get_attr_vals_utf8('changes')) assert ATTR_HOMEPHONE not in clstr assert ATTR_CARLICENSE in clstr except ldap.LDAPError as e: - log.fatal("Changelog search failed, error: " +str(e)) + log.fatal("Changelog search failed, error: " + str(e)) assert False #unstable or unstatus tests, skipped for now @@ -222,19 +223,20 @@ def test_retrocl_exclude_attr_mod(topology_st): 'homeDirectory': '/home/user1', 'userpassword': USER_PW}) except ldap.ALREADY_EXISTS: - pass + user1 = UserAccount(st, dn=USER1_DN) except ldap.LDAPError as e: - log.error("Failed to add user1") + log.error("Failed to add user1: " + str(e)) log.info('Verify homePhone and carLicense attrs are in the changelog changestring') try: - cllist = st.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, '(targetDn=%s)' % USER1_DN) + retro_changelog_suffix = DSLdapObjects(st, basedn=RETROCL_SUFFIX) + cllist = retro_changelog_suffix.filter(f'(targetDn={USER1_DN})') except ldap.LDAPError as e: - log.fatal("Changelog search failed, error: " +str(e)) + log.fatal("Changelog search failed, error: " + str(e)) assert False assert len(cllist) > 0 - if cllist[0].hasAttr('changes'): - clstr = (cllist[0].getValue('changes')).decode() + if cllist[0].present('changes'): + clstr = str(cllist[0].get_attr_vals_utf8('changes')) assert ATTR_HOMEPHONE in clstr assert ATTR_CARLICENSE in clstr @@ -265,24 +267,25 @@ def test_retrocl_exclude_attr_mod(topology_st): log.info('Modify user1 carLicense attribute') try: - st.modify_s(USER1_DN, [(ldap.MOD_REPLACE, ATTR_CARLICENSE, b"123WX321")]) + user1.replace(ATTR_CARLICENSE, "123WX321") except ldap.LDAPError as e: log.fatal('test_retrocl_exclude_attr_mod: Failed to update user1 attribute: error ' + e.message['desc']) assert False log.info('Verify carLicense attr is not in the changelog changestring') try: - cllist = st.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, '(targetDn=%s)' % USER1_DN) + cllist = retro_changelog_suffix.filter(f'(targetDn={USER1_DN})') assert len(cllist) > 0 # There will be 2 entries in the changelog for this user, we are only #interested in the second one, the modify operation. - if cllist[1].hasAttr('changes'): - clstr = (cllist[1].getValue('changes')).decode() + if cllist[1].present('changes'): + clstr = str(cllist[1].get_attr_vals_utf8('changes')) assert ATTR_CARLICENSE not in clstr except ldap.LDAPError as e: - log.fatal("Changelog search failed, error: " +str(e)) + log.fatal("Changelog search failed, error: " + str(e)) assert False + if __name__ == '__main__': # Run isolated # -s for DEBUG mode diff --git a/dirsrvtests/tests/suites/retrocl/retrocl_indexing_test.py b/dirsrvtests/tests/suites/retrocl/retrocl_indexing_test.py new file mode 100644 index 000000000..b1dfe962c --- /dev/null +++ b/dirsrvtests/tests/suites/retrocl/retrocl_indexing_test.py @@ -0,0 +1,68 @@ +import logging +import pytest +import os +from lib389._constants import RETROCL_SUFFIX, DEFAULT_SUFFIX +from lib389.topologies import topology_st as topo +from lib389.plugins import RetroChangelogPlugin +from lib389.idm.user import UserAccounts +from lib389._mapped_object import DSLdapObjects +log = logging.getLogger(__name__) + + +def test_indexing_is_online(topo): + """Test that the changenmumber index is online right after enabling the plugin + + :id: 16f4c001-9e0c-4448-a2b3-08ac1e85d40f + :setup: Standalone Instance + :steps: + 1. Enable retro cl + 2. Perform some updates + 3. Search for "(changenumber>=-1)", and it is not partially unindexed + 4. Search for "(&(changenumber>=-1)(targetuniqueid=*))", and it is not partially unindexed + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + """ + + # Enable plugin + topo.standalone.config.set('nsslapd-accesslog-logbuffering', 'off') + plugin = RetroChangelogPlugin(topo.standalone) + plugin.enable() + topo.standalone.restart() + + # Do a bunch of updates + users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + user_entry = users.create(properties={ + 'sn': '1', + 'cn': 'user 1', + 'uid': 'user1', + 'uidNumber': '11', + 'gidNumber': '111', + 'givenname': 'user1', + 'homePhone': '0861234567', + 'carLicense': '131D16674', + 'mail': '[email protected]', + 'homeDirectory': '/home' + }) + for count in range(0, 10): + user_entry.replace('mail', f'test{count}@test.com') + + # Search the retro cl, and check for error messages + filter_simple = '(changenumber>=-1)' + filter_compound = '(&(changenumber>=-1)(targetuniqueid=*))' + retro_changelog_suffix = DSLdapObjects(topo.standalone, basedn=RETROCL_SUFFIX) + retro_changelog_suffix.filter(filter_simple) + assert not topo.standalone.searchAccessLog('Partially Unindexed Filter') + + # Search the retro cl again with compound filter + retro_changelog_suffix.filter(filter_compound) + assert not topo.standalone.searchAccessLog('Partially Unindexed Filter') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main(["-s", CURRENT_FILE]) diff --git a/ldap/servers/plugins/retrocl/retrocl_create.c b/ldap/servers/plugins/retrocl/retrocl_create.c index 571e6899f..5bfde7831 100644 --- a/ldap/servers/plugins/retrocl/retrocl_create.c +++ b/ldap/servers/plugins/retrocl/retrocl_create.c @@ -133,7 +133,7 @@ retrocl_create_be(const char *bedir) val.bv_len = strlen(val.bv_val); slapi_entry_add_values(e, "cn", vals); - val.bv_val = "false"; + val.bv_val = "true"; /* enables the index */ val.bv_len = strlen(val.bv_val); slapi_entry_add_values(e, "nssystemindex", vals); diff --git a/ldap/servers/slapd/back-ldbm/ldbm_index_config.c b/ldap/servers/slapd/back-ldbm/ldbm_index_config.c index 9722d0ce7..38e7368e1 100644 --- a/ldap/servers/slapd/back-ldbm/ldbm_index_config.c +++ b/ldap/servers/slapd/back-ldbm/ldbm_index_config.c @@ -25,7 +25,7 @@ int ldbm_instance_index_config_delete_callback(Slapi_PBlock *pb, Slapi_Entry *en #define INDEXTYPE_NONE 1 static int -ldbm_index_parse_entry(ldbm_instance *inst, Slapi_Entry *e, const char *trace_string, char **index_name, char *err_buf) +ldbm_index_parse_entry(ldbm_instance *inst, Slapi_Entry *e, const char *trace_string, char **index_name, PRBool *is_system_index, char *err_buf) { Slapi_Attr *attr; const struct berval *attrValue; @@ -78,6 +78,15 @@ ldbm_index_parse_entry(ldbm_instance *inst, Slapi_Entry *e, const char *trace_st } } + *is_system_index = PR_FALSE; + if (0 == slapi_entry_attr_find(e, "nsSystemIndex", &attr)) { + slapi_attr_first_value(attr, &sval); + attrValue = slapi_value_get_berval(sval); + if (strcasecmp(attrValue->bv_val, "true") == 0) { + *is_system_index = PR_TRUE; + } + } + /* ok the entry is good to process, pass it to attr_index_config */ if (attr_index_config(inst->inst_be, (char *)trace_string, 0, e, 0, 0, err_buf)) { slapi_ch_free_string(index_name); @@ -101,9 +110,10 @@ ldbm_index_init_entry_callback(Slapi_PBlock *pb __attribute__((unused)), void *arg) { ldbm_instance *inst = (ldbm_instance *)arg; + PRBool is_system_index = PR_FALSE; returntext[0] = '\0'; - *returncode = ldbm_index_parse_entry(inst, e, "from ldbm instance init", NULL, NULL); + *returncode = ldbm_index_parse_entry(inst, e, "from ldbm instance init", NULL, &is_system_index /* not used */, NULL); if (*returncode == LDAP_SUCCESS) { return SLAPI_DSE_CALLBACK_OK; } else { @@ -126,17 +136,21 @@ ldbm_instance_index_config_add_callback(Slapi_PBlock *pb __attribute__((unused)) { ldbm_instance *inst = (ldbm_instance *)arg; char *index_name = NULL; + PRBool is_system_index = PR_FALSE; returntext[0] = '\0'; - *returncode = ldbm_index_parse_entry(inst, e, "from DSE add", &index_name, returntext); + *returncode = ldbm_index_parse_entry(inst, e, "from DSE add", &index_name, &is_system_index, returntext); if (*returncode == LDAP_SUCCESS) { struct attrinfo *ai = NULL; /* if the index is a "system" index, we assume it's being added by * by the server, and it's okay for the index to go online immediately. * if not, we set the index "offline" so it won't actually be used * until someone runs db2index on it. + * If caller wants to add an index that they want to be online + * immediately they can also set "nsSystemIndex" to "true" in the + * index config entry (e.g. is_system_index). */ - if (!ldbm_attribute_always_indexed(index_name)) { + if (!is_system_index && !ldbm_attribute_always_indexed(index_name)) { ainfo_get(inst->inst_be, index_name, &ai); PR_ASSERT(ai != NULL); ai->ai_indexmask |= INDEX_OFFLINE; @@ -386,13 +400,14 @@ ldbm_instance_index_config_enable_index(ldbm_instance *inst, Slapi_Entry *e) char *index_name = NULL; int rc = LDAP_SUCCESS; struct attrinfo *ai = NULL; + PRBool is_system_index = PR_FALSE; index_name = slapi_entry_attr_get_charptr(e, "cn"); if (index_name) { ainfo_get(inst->inst_be, index_name, &ai); } if (!ai) { - rc = ldbm_index_parse_entry(inst, e, "from DSE add", &index_name, NULL); + rc = ldbm_index_parse_entry(inst, e, "from DSE add", &index_name, &is_system_index /* not used */, NULL); } if (rc == LDAP_SUCCESS) { /* Assume the caller knows if it is OK to go online immediately */ diff --git a/src/lib389/lib389/_mapped_object.py b/src/lib389/lib389/_mapped_object.py index 1bf30ead0..48d3879a3 100644 --- a/src/lib389/lib389/_mapped_object.py +++ b/src/lib389/lib389/_mapped_object.py @@ -148,6 +148,19 @@ class DSLdapObject(DSLogging, DSLint): return True + def search(self, scope="subtree", filter='objectclass=*'): + search_scope = ldap.SCOPE_SUBTREE + if scope == 'base': + search_scope = ldap.SCOPE_BASE + elif scope == 'one': + search_scope = ldap.SCOPE_ONE + elif scope == 'subtree': + search_scope = ldap.SCOPE_SUBTREE + return self._instance.search_ext_s(self._dn, search_scope, filter, + serverctrls=self._server_controls, + clientctrls=self._client_controls, + escapehatch='i am sure') + def display(self, attrlist=['*']): """Get an entry but represent it as a string LDIF
0
674aefb1c064e2c9c983ea93b4cbd654aabea024
389ds/389-ds-base
Issue 4894 - IPA failure in ipa user-del --preserve (#4907) Bug Description: Starting with 389-ds 2.0.8 on rawhide, any call to ipa user-del --preserve fails with This entry already exists. Fix Description: We should split 'dn' parameter in searchAllSubtrees into parent and target. As one of them is used for excluding the subtree checks and another one for searching. Improve 'superior' processing when we don't change the parent. Rename variables in a more sane way. Fixes: https://github.com/389ds/389-ds-base/issues/4894 Reviewed by: @Firstyear, @tbordaz, @progier389 (Thanks!)
commit 674aefb1c064e2c9c983ea93b4cbd654aabea024 Author: Simon Pichugin <[email protected]> Date: Fri Sep 10 14:17:41 2021 -0700 Issue 4894 - IPA failure in ipa user-del --preserve (#4907) Bug Description: Starting with 389-ds 2.0.8 on rawhide, any call to ipa user-del --preserve fails with This entry already exists. Fix Description: We should split 'dn' parameter in searchAllSubtrees into parent and target. As one of them is used for excluding the subtree checks and another one for searching. Improve 'superior' processing when we don't change the parent. Rename variables in a more sane way. Fixes: https://github.com/389ds/389-ds-base/issues/4894 Reviewed by: @Firstyear, @tbordaz, @progier389 (Thanks!) diff --git a/ldap/servers/plugins/uiduniq/uid.c b/ldap/servers/plugins/uiduniq/uid.c index 9924623a7..5b763b551 100644 --- a/ldap/servers/plugins/uiduniq/uid.c +++ b/ldap/servers/plugins/uiduniq/uid.c @@ -770,13 +770,13 @@ search_one_berval(Slapi_DN *baseDN, const char **attrNames, const struct berval * * Return: * LDAP_SUCCESS - no matches, or the attribute matches the - * target dn. + * source (target) dn. * LDAP_CONSTRAINT_VIOLATION - an entry was found that already * contains the attribute value. * LDAP_OPERATIONS_ERROR - a server failure. */ static int -searchAllSubtrees(Slapi_DN **subtrees, Slapi_DN **exclude_subtrees, const char **attrNames, Slapi_Attr *attr, struct berval **values, const char *requiredObjectClass, Slapi_DN *dn, PRBool unique_in_all_subtrees) +searchAllSubtrees(Slapi_DN **subtrees, Slapi_DN **exclude_subtrees, const char **attrNames, Slapi_Attr *attr, struct berval **values, const char *requiredObjectClass, Slapi_DN *destinationSDN, Slapi_DN *sourceSDN, PRBool unique_in_all_subtrees) { int result = LDAP_SUCCESS; int i; @@ -788,12 +788,12 @@ searchAllSubtrees(Slapi_DN **subtrees, Slapi_DN **exclude_subtrees, const char * * are unique in all the monitored subtrees */ - /* First check the target entry is in one of + /* First check the destination entry is in one of * the monitored subtree, so adding 'values' would * violate constraint */ for (i = 0; subtrees && subtrees[i]; i++) { - if (slapi_sdn_issuffix(dn, subtrees[i])) { + if (slapi_sdn_issuffix(destinationSDN, subtrees[i])) { in_a_subtree = PR_TRUE; break; } @@ -808,7 +808,7 @@ searchAllSubtrees(Slapi_DN **subtrees, Slapi_DN **exclude_subtrees, const char * if (exclude_subtrees != NULL) { PRBool in_a_subtree = PR_FALSE; for (i = 0; exclude_subtrees && exclude_subtrees[i]; i++) { - if (slapi_sdn_issuffix(dn, exclude_subtrees[i])) { + if (slapi_sdn_issuffix(destinationSDN, exclude_subtrees[i])) { in_a_subtree = PR_TRUE; break; } @@ -820,7 +820,7 @@ searchAllSubtrees(Slapi_DN **subtrees, Slapi_DN **exclude_subtrees, const char * /* * For each DN in the managed list, do uniqueness checking if - * the target DN is a subnode in the tree. + * the destination (target) DN is a subnode in the tree. */ for (i = 0; subtrees && subtrees[i]; i++) { Slapi_DN *sufdn = subtrees[i]; @@ -828,8 +828,8 @@ searchAllSubtrees(Slapi_DN **subtrees, Slapi_DN **exclude_subtrees, const char * * The DN should already be normalized, so we don't have to * worry about that here. */ - if (unique_in_all_subtrees || slapi_sdn_issuffix(dn, sufdn)) { - result = search(sufdn, attrNames, attr, values, requiredObjectClass, dn, exclude_subtrees); + if (unique_in_all_subtrees || slapi_sdn_issuffix(destinationSDN, sufdn)) { + result = search(sufdn, attrNames, attr, values, requiredObjectClass, sourceSDN, exclude_subtrees); if (result) break; } @@ -903,20 +903,20 @@ getArguments(Slapi_PBlock *pb, char **attrName, char **markerObjectClass, char * * * Return: * LDAP_SUCCESS - no matches, or the attribute matches the - * target dn. + * source (target) dn. * LDAP_CONSTRAINT_VIOLATION - an entry was found that already * contains the attribute value. * LDAP_OPERATIONS_ERROR - a server failure. */ static int -findSubtreeAndSearch(Slapi_DN *parentDN, const char **attrNames, Slapi_Attr *attr, struct berval **values, const char *requiredObjectClass, Slapi_DN *target, const char *markerObjectClass, Slapi_DN **excludes) +findSubtreeAndSearch(Slapi_DN *destinationSDN, const char **attrNames, Slapi_Attr *attr, struct berval **values, const char *requiredObjectClass, Slapi_DN *sourceSDN, const char *markerObjectClass, Slapi_DN **excludes) { int result = LDAP_SUCCESS; Slapi_PBlock *spb = NULL; Slapi_DN *curpar = slapi_sdn_new(); Slapi_DN *newpar = NULL; - slapi_sdn_get_parent(parentDN, curpar); + slapi_sdn_get_parent(destinationSDN, curpar); while (slapi_sdn_get_dn(curpar) != NULL) { if ((spb = dnHasObjectClass(curpar, markerObjectClass))) { freePblock(spb); @@ -925,7 +925,7 @@ findSubtreeAndSearch(Slapi_DN *parentDN, const char **attrNames, Slapi_Attr *att * to have the attribute already. */ result = search(curpar, attrNames, attr, values, requiredObjectClass, - target, excludes); + sourceSDN, excludes); break; } newpar = slapi_sdn_new(); @@ -964,7 +964,7 @@ preop_add(Slapi_PBlock *pb) int err; char *markerObjectClass = NULL; char *requiredObjectClass = NULL; - Slapi_DN *sdn = NULL; + Slapi_DN *targetSDN = NULL; int isupdatedn; Slapi_Entry *e; Slapi_Attr *attr; @@ -998,16 +998,16 @@ preop_add(Slapi_PBlock *pb) attr_friendly = config->attr_friendly; /* - * Get the target DN for this add operation + * Get the target SDN for this add operation */ - err = slapi_pblock_get(pb, SLAPI_ADD_TARGET_SDN, &sdn); + err = slapi_pblock_get(pb, SLAPI_ADD_TARGET_SDN, &targetSDN); if (err) { result = uid_op_error(51); break; } #ifdef DEBUG - slapi_log_err(SLAPI_LOG_PLUGIN, plugin_name, "preop_add - ADD target=%s\n", slapi_sdn_get_dn(sdn)); + slapi_log_err(SLAPI_LOG_PLUGIN, plugin_name, "preop_add - ADD target=%s\n", slapi_sdn_get_dn(targetSDN)); #endif /* @@ -1040,13 +1040,13 @@ preop_add(Slapi_PBlock *pb) */ if (NULL != markerObjectClass) { /* Subtree defined by location of marker object class */ - result = findSubtreeAndSearch(sdn, attrNames, attr, NULL, - requiredObjectClass, sdn, + result = findSubtreeAndSearch(targetSDN, attrNames, attr, NULL, + requiredObjectClass, targetSDN, markerObjectClass, config->exclude_subtrees); } else { /* Subtrees listed on invocation line */ result = searchAllSubtrees(config->subtrees, config->exclude_subtrees, attrNames, attr, NULL, - requiredObjectClass, sdn, config->unique_in_all_subtrees); + requiredObjectClass, targetSDN, targetSDN, config->unique_in_all_subtrees); } if (result != LDAP_SUCCESS) { break; @@ -1120,7 +1120,7 @@ preop_modify(Slapi_PBlock *pb) int modcount = 0; int ii; LDAPMod *mod; - Slapi_DN *sdn = NULL; + Slapi_DN *targetSDN = NULL; int isupdatedn; int i = 0; @@ -1186,8 +1186,8 @@ preop_modify(Slapi_PBlock *pb) break; /* no mods to check, we are done */ } - /* Get the target DN */ - err = slapi_pblock_get(pb, SLAPI_MODIFY_TARGET_SDN, &sdn); + /* Get the target SDN */ + err = slapi_pblock_get(pb, SLAPI_MODIFY_TARGET_SDN, &targetSDN); if (err) { result = uid_op_error(11); break; @@ -1197,7 +1197,7 @@ preop_modify(Slapi_PBlock *pb) * Check if it has the required object class */ if (requiredObjectClass && - !(spb = dnHasObjectClass(sdn, requiredObjectClass))) { + !(spb = dnHasObjectClass(targetSDN, requiredObjectClass))) { break; } @@ -1213,13 +1213,13 @@ preop_modify(Slapi_PBlock *pb) mod = checkmods[ii]; if (NULL != markerObjectClass) { /* Subtree defined by location of marker object class */ - result = findSubtreeAndSearch(sdn, attrNames, NULL, + result = findSubtreeAndSearch(targetSDN, attrNames, NULL, mod->mod_bvalues, requiredObjectClass, - sdn, markerObjectClass, config->exclude_subtrees); + targetSDN, markerObjectClass, config->exclude_subtrees); } else { /* Subtrees listed on invocation line */ result = searchAllSubtrees(config->subtrees, config->exclude_subtrees, attrNames, NULL, - mod->mod_bvalues, requiredObjectClass, sdn, config->unique_in_all_subtrees); + mod->mod_bvalues, requiredObjectClass, targetSDN, targetSDN, config->unique_in_all_subtrees); } } END @@ -1271,8 +1271,8 @@ preop_modrdn(Slapi_PBlock *pb) int err; char *markerObjectClass = NULL; char *requiredObjectClass = NULL; - Slapi_DN *sdn = NULL; - Slapi_DN *superior; + Slapi_DN *sourceSDN = NULL; + Slapi_DN *destinationSDN; char *rdn; int deloldrdn = 0; int isupdatedn; @@ -1311,14 +1311,14 @@ preop_modrdn(Slapi_PBlock *pb) } /* Get the DN of the entry being renamed */ - err = slapi_pblock_get(pb, SLAPI_MODRDN_TARGET_SDN, &sdn); + err = slapi_pblock_get(pb, SLAPI_MODRDN_TARGET_SDN, &sourceSDN); if (err) { result = uid_op_error(31); break; } /* Get superior value - unimplemented in 3.0/4.0/5.0 DS */ - err = slapi_pblock_get(pb, SLAPI_MODRDN_NEWSUPERIOR_SDN, &superior); + err = slapi_pblock_get(pb, SLAPI_MODRDN_NEWSUPERIOR_SDN, &destinationSDN); if (err) { result = uid_op_error(32); break; @@ -1326,11 +1326,11 @@ preop_modrdn(Slapi_PBlock *pb) /* * No superior means the entry is just renamed at - * its current level in the tree. Use the target DN for + * its current level in the tree. Use the source SDN for * determining which managed tree this belongs to */ - if (!superior) - superior = sdn; + if (!destinationSDN) + slapi_sdn_get_parent(sourceSDN, destinationSDN); /* Get the new RDN - this has the attribute values */ err = slapi_pblock_get(pb, SLAPI_MODRDN_NEWRDN, &rdn); @@ -1352,10 +1352,10 @@ preop_modrdn(Slapi_PBlock *pb) /* Get the entry that is being renamed so we can make a dummy copy * of what it will look like after the rename. */ - err = slapi_search_get_entry(&entry_pb, sdn, NULL, &e, plugin_identity); + err = slapi_search_get_entry(&entry_pb, sourceSDN, NULL, &e, plugin_identity); if (err != LDAP_SUCCESS) { result = uid_op_error(35); - /* We want to return a no such object error if the target doesn't exist. */ + /* We want to return a no such object error if the source SDN doesn't exist. */ if (err == LDAP_NO_SUCH_OBJECT) { result = err; } @@ -1364,7 +1364,7 @@ preop_modrdn(Slapi_PBlock *pb) /* Apply the rename operation to the dummy entry. */ /* slapi_entry_rename does not expect rdn normalized */ - err = slapi_entry_rename(e, rdn, deloldrdn, superior); + err = slapi_entry_rename(e, rdn, deloldrdn, destinationSDN); if (err != LDAP_SUCCESS) { result = uid_op_error(36); break; @@ -1392,13 +1392,13 @@ preop_modrdn(Slapi_PBlock *pb) */ if (NULL != markerObjectClass) { /* Subtree defined by location of marker object class */ - result = findSubtreeAndSearch(slapi_entry_get_sdn(e), attrNames, attr, NULL, - requiredObjectClass, superior, + result = findSubtreeAndSearch(destinationSDN, attrNames, attr, NULL, + requiredObjectClass, sourceSDN, markerObjectClass, config->exclude_subtrees); } else { /* Subtrees listed on invocation line */ result = searchAllSubtrees(config->subtrees, config->exclude_subtrees, attrNames, attr, NULL, - requiredObjectClass, superior, config->unique_in_all_subtrees); + requiredObjectClass, destinationSDN, sourceSDN, config->unique_in_all_subtrees); } if (result != LDAP_SUCCESS) { break;
0
be57c970629e65df13921d4628dddc30457110cc
389ds/389-ds-base
539618 - Replication bulk import reports Invalid read/write https://bugzilla.redhat.com/show_bug.cgi?id=539618 Back off this commit: commit 4205086e4f237a52eb9113cd95f9cf87b39e9ed4 Date: Mon Feb 22 08:49:49 2010 -0800 since this change could cause the deadlock between the thread eventually calling prot_free, which acquired the agreement lock, and other threads waiting for the agreement lock, which prevents the protocol stop. Instead of waiting for prot_thread_main done in prot_free, let prot_thread_main check the existence of the protocol field in the agreement. If it's not available, prot_thread_main quits.
commit be57c970629e65df13921d4628dddc30457110cc Author: Noriko Hosoi <[email protected]> Date: Wed Mar 3 10:37:18 2010 -0800 539618 - Replication bulk import reports Invalid read/write https://bugzilla.redhat.com/show_bug.cgi?id=539618 Back off this commit: commit 4205086e4f237a52eb9113cd95f9cf87b39e9ed4 Date: Mon Feb 22 08:49:49 2010 -0800 since this change could cause the deadlock between the thread eventually calling prot_free, which acquired the agreement lock, and other threads waiting for the agreement lock, which prevents the protocol stop. Instead of waiting for prot_thread_main done in prot_free, let prot_thread_main check the existence of the protocol field in the agreement. If it's not available, prot_thread_main quits. diff --git a/ldap/servers/plugins/replication/repl5.h b/ldap/servers/plugins/replication/repl5.h index 332bfb8bb..97ce55699 100644 --- a/ldap/servers/plugins/replication/repl5.h +++ b/ldap/servers/plugins/replication/repl5.h @@ -410,7 +410,7 @@ void prot_initialize_replica(Repl_Protocol *rp); /* stop protocol session in progress */ void prot_stop(Repl_Protocol *rp); void prot_delete(Repl_Protocol **rpp); -void prot_free(Repl_Protocol **rpp, int wait_for_done); +void prot_free(Repl_Protocol **rpp); PRBool prot_set_active_protocol (Repl_Protocol *rp, PRBool total); void prot_clear_active_protocol (Repl_Protocol *rp); Repl_Connection *prot_get_connection(Repl_Protocol *rp); diff --git a/ldap/servers/plugins/replication/repl5_agmt.c b/ldap/servers/plugins/replication/repl5_agmt.c index 2571d33cf..13db1acde 100644 --- a/ldap/servers/plugins/replication/repl5_agmt.c +++ b/ldap/servers/plugins/replication/repl5_agmt.c @@ -558,7 +558,7 @@ agmt_start(Repl_Agmt *ra) if (ra->protocol != NULL) { slapi_log_error(SLAPI_LOG_REPL, repl_plugin_name, "replication already started for agreement \"%s\"\n", agmt_get_long_name(ra)); PR_Unlock(ra->lock); - prot_free(&prot, 0); + prot_free(&prot); return 0; } @@ -606,7 +606,7 @@ windows_agmt_start(Repl_Agmt *ra) if (ra->protocol != NULL) { slapi_log_error(SLAPI_LOG_REPL, repl_plugin_name, "replication already started for agreement \"%s\"\n", agmt_get_long_name(ra)); PR_Unlock(ra->lock); - prot_free(&prot, 0); + prot_free(&prot); return 0; } @@ -645,7 +645,7 @@ agmt_stop(Repl_Agmt *ra) PR_Lock(ra->lock); ra->stop_in_progress = PR_FALSE; /* we do not reuse the protocol object so free it */ - prot_free(&ra->protocol, 1); + prot_free(&ra->protocol); PR_Unlock(ra->lock); return return_value; } @@ -2261,3 +2261,11 @@ ReplicaId agmt_get_consumerRID(Repl_Agmt *ra) return ra->consumerRID; } +int +agmt_has_protocol(Repl_Agmt *agmt) +{ + if (agmt) { + return NULL != agmt->protocol; + } + return 0; +} diff --git a/ldap/servers/plugins/replication/repl5_protocol.c b/ldap/servers/plugins/replication/repl5_protocol.c index e909ed459..927c450ac 100644 --- a/ldap/servers/plugins/replication/repl5_protocol.c +++ b/ldap/servers/plugins/replication/repl5_protocol.c @@ -77,7 +77,6 @@ typedef struct repl_protocol /* States */ #define STATE_FINISHED 503 -#define STATE_DONE 504 #define STATE_BAD_STATE_SHOULD_NEVER_HAPPEN 599 /* Forward declarations */ @@ -174,10 +173,8 @@ prot_get_agreement(Repl_Protocol *rp) -/* - */ void -prot_free(Repl_Protocol **rpp, int wait_for_done) +prot_free(Repl_Protocol **rpp) { Repl_Protocol *rp = NULL; PRIntervalTime interval; @@ -185,30 +182,6 @@ prot_free(Repl_Protocol **rpp, int wait_for_done) if (rpp == NULL || *rpp == NULL) return; rp = *rpp; - /* - * This function has to wait until prot_thread_main exits if - * prot_start is successfully called and prot_thread_main is - * running. Otherwise, we may free Repl_Protocol while it's - * being used. - * - * This function is supposed to be called when the protocol is - * stopped either after prot_stop is called or when protocol - * hasn't been started. - * - * The latter case: prot_free is called with wait_for_done = 0. - * The former case: prot_free is called with wait_for_done = 1. - * prot_stop had set STATE_FINISHED to next_state and stopped - * the current activity. But depending upon the threads' - * scheduling, prot_thread_main may not have gotten out of the - * while loop at this moment. To make sure prot_thread_main - * finished referring Repl_Protocol, we wait for the state set - * to STATE_DONE. - */ - interval = PR_MillisecondsToInterval(1000); - while (wait_for_done && STATE_DONE != rp->state) - { - DS_Sleep(interval); - } PR_Lock(rp->lock); if (NULL != rp->prp_incremental) @@ -247,7 +220,7 @@ prot_delete(Repl_Protocol **rpp) if (NULL != rp) { prot_stop(rp); - prot_free(rpp, 1); + prot_free(rpp); } } @@ -319,11 +292,13 @@ prot_thread_main(void *arg) { Repl_Protocol *rp = (Repl_Protocol *)arg; int done; + Repl_Agmt *agmt = NULL; PR_ASSERT(NULL != rp); - if (rp->agmt) { - set_thread_private_agmtname (agmt_get_long_name(rp->agmt)); + agmt = rp->agmt; + if (agmt) { + set_thread_private_agmtname (agmt_get_long_name(agmt)); } done = 0; @@ -355,7 +330,7 @@ prot_thread_main(void *arg) dev_debug("prot_thread_main(STATE_PERFORMING_TOTAL_UPDATE): end"); /* update the agreement entry to notify clients that replica initialization is completed. */ - agmt_replica_init_done (rp->agmt); + agmt_replica_init_done (agmt); break; case STATE_FINISHED: @@ -363,9 +338,15 @@ prot_thread_main(void *arg) done = 1; break; } - rp->state = rp->next_state; + if (agmt_has_protocol(agmt)) + { + rp->state = rp->next_state; + } + else + { + done = 1; + } } - rp->state = STATE_DONE; }
0
799cdae8b27d5bff8413aa16e37011a95ab0052e
389ds/389-ds-base
Issue 6519 - Add basic dsidm account tests Automating basic dsidm account tests Relates to: https://github.com/389ds/389-ds-base/issues/6519 Author: Lenka Doudova Reviewed by: Simon Pichugin
commit 799cdae8b27d5bff8413aa16e37011a95ab0052e Author: Lenka Doudova <[email protected]> Date: Mon Jan 20 14:19:51 2025 +0100 Issue 6519 - Add basic dsidm account tests Automating basic dsidm account tests Relates to: https://github.com/389ds/389-ds-base/issues/6519 Author: Lenka Doudova Reviewed by: Simon Pichugin diff --git a/dirsrvtests/tests/suites/clu/dsidm_account_test.py b/dirsrvtests/tests/suites/clu/dsidm_account_test.py index 4b48a11a5..c600e31fd 100644 --- a/dirsrvtests/tests/suites/clu/dsidm_account_test.py +++ b/dirsrvtests/tests/suites/clu/dsidm_account_test.py @@ -6,22 +6,19 @@ # See LICENSE for details. # --- END COPYRIGHT BLOCK --- # + import logging import os import json import pytest import ldap from lib389 import DEFAULT_SUFFIX -from lib389.cli_idm.account import ( - get_dn, - lock, - unlock, - entry_status, - subtree_status, -) +from lib389.cli_idm.account import list, get_dn, lock, unlock, delete, modify, rename, entry_status, \ + subtree_status, reset_password, change_password +from lib389.cli_idm.user import create from lib389.topologies import topology_st from lib389.cli_base import FakeArgs -from lib389.utils import ds_is_older +from lib389.utils import ds_is_older, is_a_dn from lib389.idm.user import nsUserAccounts from . import check_value_in_log_and_reset @@ -30,13 +27,28 @@ pytestmark = pytest.mark.tier0 logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) +test_user_name = 'test_user_1000' @pytest.fixture(scope="function") def create_test_user(topology_st, request): log.info('Create test user') users = nsUserAccounts(topology_st.standalone, DEFAULT_SUFFIX) - test_user = users.create_test_user() - log.info('Created test user: %s', test_user.dn) + + if users.exists(test_user_name): + test_user = users.get(test_user_name) + test_user.delete() + + properties = FakeArgs() + properties.uid = test_user_name + properties.cn = test_user_name + properties.sn = test_user_name + properties.uidNumber = '1000' + properties.gidNumber = '2000' + properties.homeDirectory = '/home/test_user_1000' + properties.displayName = test_user_name + + create(topology_st.standalone, DEFAULT_SUFFIX, topology_st.logcap.log, properties) + test_user = users.get(test_user_name) def fin(): log.info('Delete test user') @@ -74,7 +86,7 @@ def test_dsidm_account_entry_status_with_lock(topology_st, create_test_user): standalone = topology_st.standalone users = nsUserAccounts(standalone, DEFAULT_SUFFIX) - test_user = users.get('test_user_1000') + test_user = users.get(test_user_name) entry_list = ['Entry DN: {}'.format(test_user.dn), 'Entry Creation Date', @@ -169,8 +181,389 @@ def test_dsidm_account_entry_get_by_dn(topology_st, create_test_user): assert json_result['dn'] == user_dn [email protected](ds_is_older("1.4.2"), reason="Not implemented") +def test_dsidm_account_delete(topology_st, create_test_user): + """ Test dsidm account delete option + + :id: a7960bc2-0282-4a82-8dfb-3af2088ec661 + :setup: Standalone + :steps: + 1. Run dsidm account delete on a created account + 2. Check that a message is provided on deletion + 3. Check that the account no longer exists + :expectedresults: + 1. Success + 2. Success + 3. Success + """ + + standalone = topology_st.standalone + accounts = nsUserAccounts(standalone, DEFAULT_SUFFIX) + test_account = accounts.get(test_user_name) + output = 'Successfully deleted {}'.format(test_account.dn) + + args = FakeArgs() + args.dn = test_account.dn + + log.info('Test dsidm account delete') + delete(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args, warn=False) + check_value_in_log_and_reset(topology_st, check_value=output) + + log.info('Check that the account no longer exists') + assert not test_account.exists() + + [email protected](ds_is_older("1.4.2"), reason="Not implemented") +def test_dsidm_account_list(topology_st, create_test_user): + """ Test dsidm account list option + + :id: 4d173a3e-ee36-4a8b-8d0d-4955c792faca + :setup: Standalone instance + :steps: + 1. Run dsidm account list without json + 2. Check the output content is correct + 3. Run dsidm account list with json + 4. Check the output content is correct + 5. Test full_dn option with list + 6. Delete the account + 7. Check the account is not in the list with json + 8. Check the account is not in the list without json + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + 8. Success + """ + + standalone = topology_st.standalone + args = FakeArgs() + args.json = False + args.full_dn = False + json_list = ['type', + 'list', + 'items'] + + log.info('Empty the log file to prevent false data to check about group') + topology_st.logcap.flush() + + log.info('Test dsidm account list without json') + list(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args) + check_value_in_log_and_reset(topology_st, check_value=test_user_name) + + log.info('Test dsidm account list with json') + args.json = True + list(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args) + check_value_in_log_and_reset(topology_st, content_list=json_list, check_value=test_user_name) + + log.info('Test full_dn option with list') + args.full_dn = True + list(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args) + result = topology_st.logcap.get_raw_outputs() + json_result = json.loads(result[0]) + assert is_a_dn(json_result['items'][0]) + args.full_dn = False + topology_st.logcap.flush() + + log.info('Delete the account') + accounts = nsUserAccounts(standalone, DEFAULT_SUFFIX) + test_account = accounts.get(test_user_name) + test_account.delete() + + log.info('Test empty dsidm account list with json') + list(standalone,DEFAULT_SUFFIX, topology_st.logcap.log, args) + check_value_in_log_and_reset(topology_st, content_list=json_list, check_value_not=test_user_name) + + log.info('Test empty dsidm account list without json') + args.json = False + list(standalone,DEFAULT_SUFFIX, topology_st.logcap.log, args) + check_value_in_log_and_reset(topology_st, check_value_not=test_user_name) + + [email protected](reason='DS6515') [email protected](ds_is_older("1.4.2"), reason="Not implemented") +def test_dsidm_account_get_by_dn(topology_st, create_test_user): + """ Test dsidm account get-by-dn option + + :id: 07945577-2da0-4fd9-9237-43dd2823f7b8 + :setup: Standalone instance + :steps: + 1. Run dsidm account get-by-dn for an account without json + 2. Check the output content is correct + 3. Run dsidm account get-by-dn for an account with json + 4. Check the output content is correct + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + """ + + standalone = topology_st.standalone + accounts = nsUserAccounts(standalone, DEFAULT_SUFFIX) + test_account = accounts.get(test_user_name) + + args = FakeArgs() + args.dn = test_account.dn + args.json = False + + account_content = ['dn: {}'.format(test_account.dn), + 'cn: {}'.format(test_account.rdn), + 'displayName: {}'.format(test_user_name), + 'gidNumber: 2000', + 'homeDirectory: /home/{}'.format(test_user_name), + 'objectClass: top', + 'objectClass: nsPerson', + 'objectClass: nsAccount', + 'objectClass: nsOrgPerson', + 'objectClass: posixAccount', + 'uid: {}'.format(test_user_name), + 'uidNumber: 1000'] + + json_content = ['attrs', + 'objectclass', + 'top', + 'nsPerson', + 'nsAccount', + 'nsOrgPerson', + 'posixAccount', + 'cn', + test_account.rdn, + 'gidnumber', + '2000', + 'homedirectory', + '/home/{}'.format(test_user_name), + 'displayname', + test_user_name, + 'uidnumber', + '1000', + 'creatorsname', + 'cn=directory manager', + 'modifiersname', + 'createtimestamp', + 'modifytimestamp', + 'nsuniqueid', + 'parentid', + 'entryid', + 'entryuuid', + 'dsentrydn', + 'entrydn', + test_account.dn] + + log.info('Empty the log file to prevent false data to check about the account') + topology_st.logcap.flush() + + log.info('Test dsidm account get-by-dn without json') + get_dn(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args) + check_value_in_log_and_reset(topology_st, content_list=account_content) + + log.info('Test dsidm account get-by-dn with json') + args.json = True + get_dn(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args) + check_value_in_log_and_reset(topology_st, content_list=json_content) + + [email protected](ds_is_older("1.4.2"), reason="Not implemented") +def test_dsidm_account_modify_by_dn(topology_st, create_test_user): + """ Test dsidm account modify-by-dn + + :id: e7288f8c-f0a8-4d8d-a00f-1b243eb117bc + :setup: Standalone instance + :steps: + 1. Run dsidm account modify-by-dn add description value + 2. Run dsidm account modify-by-dn replace description value + 3. Run dsidm account modify-by-dn delete description value + :expectedresults: + 1. A description value is added + 2. The original description value is replaced and the previous is not present + 3. The replaced description value is deleted + """ + + standalone = topology_st.standalone + accounts = nsUserAccounts(standalone, DEFAULT_SUFFIX) + test_account = accounts.get(test_user_name) + output = 'Successfully modified {}'.format(test_account.dn) + + args = FakeArgs() + args.dn = test_account.dn + args.changes = ['add:description:new_description'] + + log.info('Test dsidm account modify add') + modify(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args, warn=False) + check_value_in_log_and_reset(topology_st, check_value=output) + assert test_account.present('description', 'new_description') + + log.info('Test dsidm account modify replace') + args.changes = ['replace:description:replaced_description'] + modify(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args, warn=False) + check_value_in_log_and_reset(topology_st, check_value=output) + assert test_account.present('description', 'replaced_description') + assert not test_account.present('description', 'new_description') + + log.info('Test dsidm account modify delete') + args.changes = ['delete:description:replaced_description'] + modify(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args, warn=False) + check_value_in_log_and_reset(topology_st, check_value=output) + assert not test_account.present('description', 'replaced_description') + + [email protected](ds_is_older("1.4.2"), reason="Not implemented") +def test_dsidm_account_rename_by_dn(topology_st, create_test_user): + """ Test dsidm account rename-by-dn option + + :id: f4b8e491-35b1-4113-b9c4-e0a80f8985f3 + :setup: Standalone instance + :steps: + 1. Run dsidm account rename option on existing account + 2. Check the account does not have another uid attribute with the old rdn + 3. Check the old account is deleted + :expectedresults: + 1. Success + 2. Success + 3. Success + """ + + standalone = topology_st.standalone + accounts = nsUserAccounts(standalone, DEFAULT_SUFFIX) + test_account = accounts.get(test_user_name) + + args = FakeArgs() + args.dn = test_account.dn + args.new_name = 'renamed_account' + args.new_dn = 'uid=renamed_account,ou=people,{}'.format(DEFAULT_SUFFIX) + args.keep_old_rdn = False + + log.info('Test dsidm account rename-by-dn') + rename(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args) + new_account = accounts.get(args.new_name) + + try: + output = 'Successfully renamed to {}'.format(new_account.dn) + check_value_in_log_and_reset(topology_st, check_value=output) + + log.info('Verify the new account does not have a uid attribute with the old rdn') + assert not new_account.present('uid', test_user_name) + assert new_account.present('displayName', test_user_name) + + log.info('Verify the old account does not exist') + assert not test_account.exists() + finally: + log.info('Clean up') + new_account.delete() + + [email protected](ds_is_older("1.4.2"), reason="Not implemented") +def test_dsidm_account_rename_by_dn_keep_old_rdn(topology_st, create_test_user): + """ Test dsidm account rename-by-dn option with keep-old-rdn + + :id: a128bdbb-c0a4-4d9d-9a95-9be2d3780094 + :setup: Standalone instance + :steps: + 1. Run dsidm account rename option on existing account + 2. Check the account has another uid attribute with the old rdn + 3. Check the old account is deleted + :expectedresults: + 1. Success + 2. Success + 3. Success + """ + + standalone = topology_st.standalone + accounts = nsUserAccounts(standalone, DEFAULT_SUFFIX) + test_account = accounts.get(test_user_name) + + args = FakeArgs() + args.dn = test_account.dn + args.new_name = 'renamed_account' + args.new_dn = 'uid=renamed_account,ou=people,{}'.format(DEFAULT_SUFFIX) + args.keep_old_rdn = True + + log.info('Test dsidm account rename-by-dn') + rename(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args) + new_account = accounts.get(args.new_name) + + try: + output = 'Successfully renamed to {}'.format(new_account.dn) + check_value_in_log_and_reset(topology_st, check_value=output) + + log.info('Verify the new account does not have a uid attribute with the old rdn') + assert new_account.present('uid', test_user_name) + assert new_account.present('displayName', test_user_name) + + log.info('Verify the old account does not exist') + assert not test_account.exists() + finally: + log.info('Clean up') + new_account.delete() + + [email protected](ds_is_older("1.4.2"), reason="Not implemented") +def test_dsidm_account_reset_password(topology_st, create_test_user): + """ Test dsidm account reset_password option + + :id: 02ffa044-08ae-40c5-9108-b02d0c3b0521 + :setup: Standalone instance + :steps: + 1. Run dsidm account reset_password on an existing user + 2. Verify that the user has now userPassword attribute set + :expectedresults: + 1. Success + 2. Success + """ + + standalone = topology_st.standalone + accounts = nsUserAccounts(standalone, DEFAULT_SUFFIX) + test_account = accounts.get(test_user_name) + + args = FakeArgs() + args.dn = test_account.dn + args.new_password = 'newpasswd' + output = 'reset password for {}'.format(test_account.dn) + + log.info('Test dsidm account reset_password') + reset_password(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args) + check_value_in_log_and_reset(topology_st, check_value=output) + + log.info('Verify the userPassword attribute is set') + assert test_account.present('userPassword') + + [email protected](ds_is_older("1.4.2"), reason="Not implemented") +def test_dsidm_account_change_password(topology_st, create_test_user): + """ Test dsidm account change_password option + + :id: 24c25b8f-df2b-4d43-a88e-47e24bc4ff36 + :setup: Standalone instance + :steps: + 1. Run dsidm account change_password on an existing user + 2. Verify that the user has userPassword attribute set + :expectedresults: + 1. Success + 2. Success + """ + + standalone = topology_st.standalone + accounts = nsUserAccounts(standalone, DEFAULT_SUFFIX) + test_account = accounts.get(test_user_name) + + args = FakeArgs() + args.dn = test_account.dn + args.new_password = 'newpasswd' + output = 'changed password for {}'.format(test_account.dn) + + log.info('Test dsidm account change_password') + change_password(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args) + check_value_in_log_and_reset(topology_st, check_value=output) + + log.info('Verify the userPassword attribute is set') + assert test_account.present('userPassword') + + if __name__ == '__main__': # Run isolated # -s for DEBUG mode CURRENT_FILE = os.path.realpath(__file__) - pytest.main("-s %s" % CURRENT_FILE) + pytest.main("-s {}".format(CURRENT_FILE)) \ No newline at end of file diff --git a/src/lib389/lib389/cli_idm/account.py b/src/lib389/lib389/cli_idm/account.py index 8b6f99549..9877c533a 100644 --- a/src/lib389/lib389/cli_idm/account.py +++ b/src/lib389/lib389/cli_idm/account.py @@ -12,10 +12,12 @@ import ldap import math from datetime import datetime from lib389.idm.account import Account, Accounts, AccountState -from lib389.cli_base import ( - _generic_get_dn, +from lib389.cli_idm import ( _generic_list, _generic_delete, + _generic_get_dn +) +from lib389.cli_base import ( _generic_modify_dn, _get_arg, _get_dn_arg,
0
ecff84560f99a21d289637b6848d6a5edfac5092
389ds/389-ds-base
remove redundant source file and associated Makefile line
commit ecff84560f99a21d289637b6848d6a5edfac5092 Author: David Boreham <[email protected]> Date: Mon Apr 18 18:11:43 2005 +0000 remove redundant source file and associated Makefile line diff --git a/ldap/servers/plugins/replication/Makefile b/ldap/servers/plugins/replication/Makefile index 8bcb7a6a8..b2eb92084 100644 --- a/ldap/servers/plugins/replication/Makefile +++ b/ldap/servers/plugins/replication/Makefile @@ -121,7 +121,6 @@ LOCAL_OBJS= \ repl5_updatedn_list.o\ windows_inc_protocol.o \ windows_tot_protocol.o \ - windows_total.o \ windows_protocol_util.o \ windows_private.o \ windows_connection.o diff --git a/ldap/servers/plugins/replication/windows_total.c b/ldap/servers/plugins/replication/windows_total.c deleted file mode 100644 index 9b99f1bcb..000000000 --- a/ldap/servers/plugins/replication/windows_total.c +++ /dev/null @@ -1,750 +0,0 @@ -/** BEGIN COPYRIGHT BLOCK - * This Program is free software; you can redistribute it and/or modify it under - * the terms of the GNU General Public License as published by the Free Software - * Foundation; version 2 of the License. - * - * This Program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS - * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License along with - * this Program; if not, write to the Free Software Foundation, Inc., 59 Temple - * Place, Suite 330, Boston, MA 02111-1307 USA. - * - * In addition, as a special exception, Red Hat, Inc. gives You the additional - * right to link the code of this Program with code not covered under the GNU - * General Public License ("Non-GPL Code") and to distribute linked combinations - * including the two, subject to the limitations in this paragraph. Non-GPL Code - * permitted under this exception must only link to the code of this Program - * through those well defined interfaces identified in the file named EXCEPTION - * found in the source code files (the "Approved Interfaces"). The files of - * Non-GPL Code may instantiate templates or use macros or inline functions from - * the Approved Interfaces without causing the resulting work to be covered by - * the GNU General Public License. Only Red Hat, Inc. may make changes or - * additions to the list of Approved Interfaces. You must obey the GNU General - * Public License in all respects for all of the Program code and other code used - * in conjunction with the Program except the Non-GPL Code covered by this - * exception. If you modify this file, you may extend this exception to your - * version of the file, but you are not obligated to do so. If you do not wish to - * do so, delete this exception statement from your version. - * - * - * Copyright (C) 2001 Sun Microsystems, Inc. Used by permission. - * Copyright (C) 2005 Red Hat, Inc. - * All rights reserved. - * END COPYRIGHT BLOCK **/ - - - -#include "repl5.h" -#include "slap.h" - -#define CSN_TYPE_VALUE_UPDATED_ON_WIRE 1 -#define CSN_TYPE_VALUE_DELETED_ON_WIRE 2 -#define CSN_TYPE_VALUE_DISTINGUISHED_ON_WIRE 3 - -/* #define GORDONS_PATENTED_BER_DEBUG 1 */ -#ifdef GORDONS_PATENTED_BER_DEBUG -#define BER_DEBUG(a) printf(a) -#else -#define BER_DEBUG(a) -#endif - -/* Forward declarations */ -static int my_ber_printf_csn(BerElement *ber, const CSN *csn, const CSNType t); -static int my_ber_printf_value(BerElement *ber, const char *type, - const Slapi_Value *value, PRBool deleted); -static int my_ber_printf_attr (BerElement *ber, Slapi_Attr *attr, PRBool deleted); -static int my_ber_scanf_attr (BerElement *ber, Slapi_Attr **attr, PRBool *deleted); -static int my_ber_scanf_value(BerElement *ber, Slapi_Value **value, PRBool *deleted); - -/* - * Helper function - convert a CSN to a string and ber_printf() it. - */ -static int -my_ber_printf_csn(BerElement *ber, const CSN *csn, const CSNType t) -{ - char csn_str[CSN_STRSIZE]; - unsigned long len; - int rc = -1; - int csn_type_as_ber = -1; - - LDAPDebug( LDAP_DEBUG_TRACE, "=> my_ber_printf_csn\n", 0, 0, 0 ); - - switch (t) - { - case CSN_TYPE_VALUE_UPDATED: - csn_type_as_ber = CSN_TYPE_VALUE_UPDATED_ON_WIRE; - break; - case CSN_TYPE_VALUE_DELETED: - csn_type_as_ber = CSN_TYPE_VALUE_DELETED_ON_WIRE; - break; - case CSN_TYPE_VALUE_DISTINGUISHED: - csn_type_as_ber = CSN_TYPE_VALUE_DISTINGUISHED_ON_WIRE; - break; - case CSN_TYPE_ATTRIBUTE_DELETED: - break; - default: - slapi_log_error(SLAPI_LOG_FATAL, windows_repl_plugin_name, "my_ber_printf_csn: unknown " - "csn type %d encountered.\n", (int)t); - LDAPDebug( LDAP_DEBUG_TRACE, "<= my_ber_printf_csn\n", 0, 0, 0 ); - return -1; - } - - csn_as_string(csn,PR_FALSE,csn_str); - - /* we don't send type for attr csn since there is only one */ - if (t == CSN_TYPE_ATTRIBUTE_DELETED) - { - rc = ber_printf(ber, "s", csn_str); - BER_DEBUG("s(csn_str)"); - } - else - { - len = CSN_STRSIZE; - rc = ber_printf(ber, "{es}", csn_type_as_ber, csn_str); - BER_DEBUG("{e(csn type)s(csn)}"); - } - - LDAPDebug( LDAP_DEBUG_TRACE, "<= my_ber_printf_csn\n", 0, 0, 0 ); - - return rc; -} - - -/* - * Send a single annotated attribute value. - */ -static int -my_ber_printf_value(BerElement *ber, const char *type, const Slapi_Value *value, PRBool deleted) -{ - const struct berval *bval = NULL; - int rc = -1; - const CSNSet *csnset; - void *cookie; - CSN *csn; - CSNType t; - - LDAPDebug( LDAP_DEBUG_TRACE, "=> my_ber_printf_value\n", 0, 0, 0 ); - - bval = slapi_value_get_berval(value); - BER_DEBUG("{o(value)"); - if (ber_printf(ber, "{o", bval->bv_val, bval->bv_len) == -1) /* Start sequence */ - { - goto done; - } - -/* if (ber_printf(ber, "o", bval->bv_val, bval->bv_len) == -1) - { - goto done; - } */ - - if (deleted) - { - BER_DEBUG("b(deleted flag)"); - if (ber_printf (ber, "b", PR_TRUE) == -1) - { - goto done; - } - } - /* Send value CSN list */ - BER_DEBUG("{"); - if (ber_printf(ber, "{") == -1) /* Start set */ - { - goto done; - } - - /* Iterate over the sequence of CSNs. */ - csnset = value_get_csnset (value); - if (csnset) - { - for (cookie = csnset_get_first_csn (csnset, &csn, &t); NULL != cookie; - cookie = csnset_get_next_csn (csnset, cookie, &csn, &t)) - { - /* Don't send any adcsns, since that was already sent */ - if (t != CSN_TYPE_ATTRIBUTE_DELETED) - { - if (my_ber_printf_csn(ber, csn, t) == -1) - { - goto done; - } - } - } - } - - BER_DEBUG("}"); - if (ber_printf(ber, "}") == -1) /* End CSN sequence */ - { - goto done; - } - BER_DEBUG("}"); - if (ber_printf(ber, "}") == -1) /* End sequence */ - { - goto done; - } - - /* Everything's ok */ - rc = 0; - -done: - LDAPDebug( LDAP_DEBUG_TRACE, "<= my_ber_printf_value\n", 0, 0, 0 ); - return rc; - -} - -/* send a single attribute */ -static int -my_ber_printf_attr (BerElement *ber, Slapi_Attr *attr, PRBool deleted) -{ - Slapi_Value *value; - char *type; - int i; - const CSN *csn; - - LDAPDebug( LDAP_DEBUG_TRACE, "=> my_ber_printf_attr\n", 0, 0, 0 ); - - /* First, send the type */ - slapi_attr_get_type(attr, &type); - BER_DEBUG("{s(type "); - BER_DEBUG(type); - BER_DEBUG(")"); - if (ber_printf(ber, "{s", type) == -1) /* Begin sequence for this type */ - { - goto loser; - } - - /* Send the attribute deletion CSN if present */ - csn = attr_get_deletion_csn(attr); - if (csn) - { - if (my_ber_printf_csn(ber, csn, CSN_TYPE_ATTRIBUTE_DELETED) == -1) - { - goto loser; - } - } - - /* only send "is deleted" flag for deleted attributes since it defaults to false */ - if (deleted) - { - BER_DEBUG("b(del flag)"); - if (ber_printf (ber, "b", PR_TRUE) == -1) - { - goto loser; - } - } - - /* - * Iterate through all the values. - */ - BER_DEBUG("["); - if (ber_printf(ber, "[") == -1) /* Begin set */ - { - goto loser; - } - - /* - * Process the non-deleted values first. - */ - i = slapi_attr_first_value(attr, &value); - while (i != -1) - { - if (my_ber_printf_value(ber, type, value, PR_FALSE) == -1) - { - goto loser; - } - i= slapi_attr_next_value(attr, i, &value); - } - - /* - * Now iterate over all of the deleted values. - */ - i= attr_first_deleted_value(attr, &value); - while (i != -1) - { - if (my_ber_printf_value(ber, type, value, PR_TRUE) == -1) - { - goto loser; - } - i= attr_next_deleted_value(attr, i, &value); - } - BER_DEBUG("]"); - if (ber_printf(ber, "]") == -1) /* End set */ - { - goto loser; - } - - BER_DEBUG("}"); - if (ber_printf(ber, "}") == -1) /* End sequence for this type */ - { - goto loser; - } - - LDAPDebug( LDAP_DEBUG_TRACE, "<= my_ber_printf_attr\n", 0, 0, 0 ); - return 0; -loser: - LDAPDebug( LDAP_DEBUG_TRACE, "<= my_ber_printf_attr - loser\n", 0, 0, 0 ); - return -1; -} - -/* - * Get an annotated value from the BerElement. Returns 0 on - * success, -1 on failure. - */ -static int -my_ber_scanf_value(BerElement *ber, Slapi_Value **value, PRBool *deleted) -{ - struct berval *attrval = NULL; - unsigned long len; - unsigned long tag; - CSN *csn = NULL; - char csnstring[CSN_STRSIZE + 1]; - CSNType csntype; - char *lasti; - - LDAPDebug( LDAP_DEBUG_TRACE, "=> my_ber_scanf_value\n", 0, 0, 0 ); - - PR_ASSERT(ber && value && deleted); - - *value = NULL; - - if (NULL == ber && NULL == value) - { - slapi_log_error(SLAPI_LOG_FATAL, windows_repl_plugin_name, "my_ber_scanf_value BAD 1\n"); - goto loser; - } - - /* Each value is a sequence */ - if (ber_scanf(ber, "{O", &attrval) == -1) - { - slapi_log_error(SLAPI_LOG_FATAL, windows_repl_plugin_name, "my_ber_scanf_value BAD 2\n"); - goto loser; - } - /* Allocate and fill in the attribute value */ - if ((*value = slapi_value_new_berval(attrval)) == NULL) - { - slapi_log_error(SLAPI_LOG_FATAL, windows_repl_plugin_name, "my_ber_scanf_value BAD 3\n"); - goto loser; - } - - /* check if this is a deleted value */ - if (ber_peek_tag(ber, &len) == LBER_BOOLEAN) - { - if (ber_scanf(ber, "b", deleted) == -1) - { - slapi_log_error(SLAPI_LOG_FATAL, windows_repl_plugin_name, "my_ber_scanf_value BAD 4\n"); - goto loser; - } - } - - else /* default is present value */ - { - *deleted = PR_FALSE; - } - - /* Read the sequence of CSNs */ - for (tag = ber_first_element(ber, &len, &lasti); - tag != LBER_ERROR && tag != LBER_END_OF_SEQORSET; - tag = ber_next_element(ber, &len, lasti)) - { - long csntype_tmp; - /* Each CSN is in a sequence that includes a csntype and CSN */ - len = CSN_STRSIZE; - if (ber_scanf(ber, "{es}", &csntype_tmp, csnstring, &len) == -1) - { - slapi_log_error(SLAPI_LOG_FATAL, windows_repl_plugin_name, "my_ber_scanf_value BAD 7 - bval is %s\n", attrval->bv_val); - goto loser; - } - switch (csntype_tmp) - { - case CSN_TYPE_VALUE_UPDATED_ON_WIRE: - csntype = CSN_TYPE_VALUE_UPDATED; - break; - case CSN_TYPE_VALUE_DELETED_ON_WIRE: - csntype = CSN_TYPE_VALUE_DELETED; - break; - case CSN_TYPE_VALUE_DISTINGUISHED_ON_WIRE: - csntype = CSN_TYPE_VALUE_DISTINGUISHED; - break; - default: - slapi_log_error(SLAPI_LOG_FATAL, windows_repl_plugin_name, "Error: preposterous CSN type " - "%d received during total update.\n", csntype_tmp); - goto loser; - } - csn = csn_new_by_string(csnstring); - if (csn == NULL) - { - slapi_log_error(SLAPI_LOG_FATAL, windows_repl_plugin_name, "my_ber_scanf_value BAD 8\n"); - goto loser; - } - value_add_csn(*value, csntype, csn); - csn_free (&csn); - } - - if (ber_scanf(ber, "}") == -1) /* End of annotated attribute value seq */ - { - slapi_log_error(SLAPI_LOG_FATAL, windows_repl_plugin_name, "my_ber_scanf_value BAD 10\n"); - goto loser; - } - - if (attrval) - ber_bvfree(attrval); - LDAPDebug( LDAP_DEBUG_TRACE, "<= my_ber_scanf_value\n", 0, 0, 0 ); - return 0; - -loser: - /* Free any stuff we allocated */ - if (csn) - csn_free (&csn); - if (attrval) - ber_bvfree(attrval); - if (value) - { - slapi_value_free (value); - } - LDAPDebug( LDAP_DEBUG_TRACE, "<= my_ber_scanf_value - loser\n", 0, 0, 0 ); - return -1; -} - -static int -my_ber_scanf_attr (BerElement *ber, Slapi_Attr **attr, PRBool *deleted) -{ - char *attrtype = NULL; - CSN *attr_deletion_csn = NULL; - PRBool val_deleted; - char *lasti; - unsigned long len; - unsigned long tag; - char *str; - int rc; - Slapi_Value *value; - - LDAPDebug( LDAP_DEBUG_TRACE, "=> my_ber_scanf_attr\n", 0, 0, 0 ); - - PR_ASSERT (ber && attr && deleted); - - /* allocate the attribute */ - *attr = slapi_attr_new (); - if (attr == NULL) - { - goto loser; - } - - if (ber_scanf(ber, "{a", &attrtype) == -1) /* Begin sequence for this attr */ - { - goto loser; - } - - - slapi_attr_init(*attr, attrtype); - slapi_ch_free ((void **)&attrtype); - - /* The attribute deletion CSN is next and is optional? */ - if (ber_peek_tag(ber, &len) == LBER_OCTETSTRING) - { - if (ber_scanf(ber, "a", &str) == -1) - { - goto loser; - } - attr_deletion_csn = csn_new_by_string(str); - slapi_ch_free((void **)&str); - } - - if (attr_deletion_csn) - { - rc = attr_set_deletion_csn(*attr, attr_deletion_csn); - csn_free (&attr_deletion_csn); - if (rc != 0) - { - goto loser; - } - } - - /* The "attribute deleted" flag is next, and is optional */ - if (ber_peek_tag(ber, &len) == LBER_BOOLEAN) - { - if (ber_scanf(ber, "b", deleted) == -1) - { - goto loser; - } - } - else /* default is present */ - { - *deleted = PR_FALSE; - } - - /* loop over the list of attribute values */ - for (tag = ber_first_element(ber, &len, &lasti); - tag != LBER_ERROR && tag != LBER_END_OF_SEQORSET; - tag = ber_next_element(ber, &len, lasti)) - { - - value = NULL; - if (my_ber_scanf_value(ber, &value, &val_deleted) == -1) - { - goto loser; - } - - if (val_deleted) - { - /* Add the value to the attribute */ - if (attr_add_deleted_value(*attr, value) == -1) /* attr has ownership of value */ - { - goto loser; - } - } - else - { - /* Add the value to the attribute */ - if (slapi_attr_add_value(*attr, value) == -1) /* attr has ownership of value */ - { - goto loser; - } - } - if (value) - slapi_value_free(&value); - } - - if (ber_scanf(ber, "}") == -1) /* End sequence for this attribute */ - { - goto loser; - } - - LDAPDebug( LDAP_DEBUG_TRACE, "<= my_ber_scanf_attr\n", 0, 0, 0 ); - return 0; -loser: - if (*attr) - slapi_attr_free (attr); - if (value) - slapi_value_free (&value); - - LDAPDebug( LDAP_DEBUG_TRACE, "<= my_ber_scanf_attr - loser\n", 0, 0, 0 ); - return -1; -} - -/* - * Extract the payload from a total update extended operation, - * decode it, and produce a Slapi_Entry structure representing a new - * entry to be added to the local database. - */ -static int -decode_total_update_extop(Slapi_PBlock *pb, Slapi_Entry **ep) -{ - BerElement *tmp_bere = NULL; - Slapi_Entry *e = NULL; - Slapi_Attr *attr = NULL; - char *str = NULL; - CSN *dn_csn = NULL; - struct berval *extop_value = NULL; - char *extop_oid = NULL; - unsigned long len; - char *lasto; - unsigned long tag; - int rc; - PRBool deleted; - - LDAPDebug( LDAP_DEBUG_TRACE, "=> decode_total_update_extop\n", 0, 0, 0 ); - - PR_ASSERT(NULL != pb); - PR_ASSERT(NULL != ep); - - slapi_pblock_get(pb, SLAPI_EXT_OP_REQ_OID, &extop_oid); - slapi_pblock_get(pb, SLAPI_EXT_OP_REQ_VALUE, &extop_value); - - if (NULL == extop_oid || - strcmp(extop_oid, REPL_NSDS50_REPLICATION_ENTRY_REQUEST_OID) != 0 || - NULL == extop_value) - { - /* Bogus */ - goto loser; - } - - if ((tmp_bere = ber_init(extop_value)) == NULL) - { - goto loser; - } - - if ((e = slapi_entry_alloc()) == NULL) - { - goto loser; - } - - if (ber_scanf(tmp_bere, "{") == -1) /* Begin outer sequence */ - { - goto loser; - } - - /* The entry's uniqueid is first */ - if (ber_scanf(tmp_bere, "a", &str) == -1) - { - goto loser; - } - slapi_entry_set_uniqueid(e, str); - str = NULL; /* Slapi_Entry now owns the uniqueid */ - - /* The entry's DN is next */ - if (ber_scanf(tmp_bere, "a", &str) == -1) - { - goto loser; - } - slapi_entry_set_dn(e, str); - str = NULL; /* Slapi_Entry now owns the dn */ - - /* Get the attributes */ - for ( tag = ber_first_element( tmp_bere, &len, &lasto ); - tag != LBER_ERROR && tag != LBER_END_OF_SEQORSET; - tag = ber_next_element( tmp_bere, &len, lasto ) ) - { - - if (my_ber_scanf_attr (tmp_bere, &attr, &deleted) != 0) - { - goto loser; - } - - /* Add the attribute to the entry */ - if (deleted) - entry_add_deleted_attribute_wsi(e, attr); /* entry now owns attr */ - else - entry_add_present_attribute_wsi(e, attr); /* entry now owns attr */ - attr = NULL; - } - - if (ber_scanf(tmp_bere, "}") == -1) /* End sequence for this entry */ - { - goto loser; - } - - /* Check for ldapsubentries and tombstone entries to set flags properly */ - slapi_entry_attr_find(e, "objectclass", &attr); - if (attr != NULL) { - struct berval bv; - bv.bv_val = "ldapsubentry"; - bv.bv_len = strlen(bv.bv_val); - if (slapi_attr_value_find(attr, &bv) == 0) { - slapi_entry_set_flag(e, SLAPI_ENTRY_LDAPSUBENTRY); - } - bv.bv_val = SLAPI_ATTR_VALUE_TOMBSTONE; - bv.bv_len = strlen(bv.bv_val); - if (slapi_attr_value_find(attr, &bv) == 0) { - slapi_entry_set_flag(e, SLAPI_ENTRY_FLAG_TOMBSTONE); - } - } - - /* If we get here, the entry is properly constructed. Return it. */ - - rc = 0; - *ep = e; - goto free_and_return; - -loser: - rc = -1; - /* slapi_ch_free accepts NULL pointer */ - slapi_ch_free((void **)&str); - - if (NULL != dn_csn) - { - csn_free(&dn_csn); - } - if (attr != NULL) - { - slapi_attr_free (&attr); - } - - if (NULL != e) - { - slapi_entry_free (e); - } - *ep = NULL; - slapi_log_error(SLAPI_LOG_FATAL, windows_repl_plugin_name, "Error: could not decode extended " - "operation containing entry for total update.\n"); - -free_and_return: - if (NULL != tmp_bere) - { - ber_free(tmp_bere, 1); - tmp_bere = NULL; - } - LDAPDebug( LDAP_DEBUG_TRACE, "<= decode_total_update_extop\n", 0, 0, 0 ); - return rc; -} - -/* - * This plugin entry point is called whenever an NSDS50ReplicationEntry - * extended operation is received. - */ -int -___multimaster_extop_NSDS50ReplicationEntry(Slapi_PBlock *pb) -{ - int rc; - Slapi_Entry *e = NULL; - Slapi_Connection *conn = NULL; - int connid, opid; - - LDAPDebug( LDAP_DEBUG_TRACE, "=> ___multimaster_extop_NSDS50ReplicationEntry\n", 0, 0, 0 ); - - connid = 0; - slapi_pblock_get(pb, SLAPI_CONN_ID, &connid); - opid = 0; - slapi_pblock_get(pb, SLAPI_OPERATION_ID, &opid); - - /* Decode the extended operation */ - rc = decode_total_update_extop(pb, &e); - - if (0 == rc) - { -#ifdef notdef - /* - * Just spew LDIF so we're sure we got it right. Later we'll firehose - * this into the database import code - */ - int len; - char *str = slapi_entry2str_with_options(e, &len,SLAPI_DUMP_UNIQUEID); - puts(str); - free(str); -#endif - - rc = slapi_import_entry (pb, e); - /* slapi_import_entry return an LDAP error in case of problem - * LDAP_BUSY is used to indicate that the import queue is full - * and that flow control must happen to stop the supplier - * from sending entries - */ - if ((rc != LDAP_SUCCESS) && (rc != LDAP_BUSY)) - { - const char *dn = slapi_entry_get_dn_const(e); - slapi_log_error(SLAPI_LOG_REPL, windows_repl_plugin_name, - "Error %d: could not import entry dn %s " - "for total update operation conn=%d op=%d\n", - rc, dn, connid, opid); - rc = -1; - } - - } - else - { - slapi_log_error(SLAPI_LOG_REPL, windows_repl_plugin_name, - "Error %d: could not decode the total update extop " - "for total update operation conn=%d op=%d\n", - rc, connid, opid); - } - - if ((rc != 0) && (rc != LDAP_BUSY)) - { - /* just disconnect from the supplier. bulk import is stopped when - connection object is destroyed */ - slapi_pblock_get (pb, SLAPI_CONNECTION, &conn); - if (conn) - { - slapi_disconnect_server(conn); - } - - /* cleanup */ - if (e) - { - slapi_entry_free (e); - } - } - - LDAPDebug( LDAP_DEBUG_TRACE, "<= ___multimaster_extop_NSDS50ReplicationEntry\n", 0, 0, 0 ); - - return rc; -}
0
791ae2ed693312b0c79798fb3b06930d07c8f68a
389ds/389-ds-base
Change the way ldapserver links with SASL. Rather than specifying a hard coded link path of /usr/lib, which will not work on 64 bit systems, just don't specify a -L option for SASL if we want the linker to just use the default path. The linker will find the system SASL in /usr/lib or /usr/lib64 or wherever it is.
commit 791ae2ed693312b0c79798fb3b06930d07c8f68a Author: Rich Megginson <[email protected]> Date: Fri Oct 13 23:03:27 2006 +0000 Change the way ldapserver links with SASL. Rather than specifying a hard coded link path of /usr/lib, which will not work on 64 bit systems, just don't specify a -L option for SASL if we want the linker to just use the default path. The linker will find the system SASL in /usr/lib or /usr/lib64 or wherever it is. diff --git a/components.mk b/components.mk index 860fa0880..efe62f779 100644 --- a/components.mk +++ b/components.mk @@ -365,8 +365,11 @@ LIBLDAP = $(addprefix $(LDAP_LIBPATH)/, $(LDAPOBJNAME)) ifeq ($(ARCH), Linux) ifeq ($(BUILD_ARCH), RHEL3) SASL_LIBPATH = /usr/kerberos/lib + SASL_LINK = -L$(SASL_LIBPATH) else - SASL_LIBPATH = /usr/lib +# just use default linker path + SASL_LIBPATH = + SASL_LINK = endif SASL_INCDIR = /usr/include/sasl else @@ -377,6 +380,7 @@ else SASL_LIBPATH = $(SASL_BUILD_DIR)/lib SASL_INCDIR = $(SASL_BUILD_DIR)/include/sasl endif + SASL_LINK = -L$(SASL_LIBPATH) endif SASL_INCLUDE = $(SASL_INCDIR) @@ -403,7 +407,7 @@ else endif endif - SASL_LINK = -L$(SASL_LIBPATH) -l$(SASL_LIB_ROOT_NAME) $(GSSAPI_LIBS) + SASL_LINK += -l$(SASL_LIB_ROOT_NAME) $(GSSAPI_LIBS) endif ###########################################################
0
696e2f7911dae50b0ec15dcd01015f712189ed7d
389ds/389-ds-base
Bug 613056 - fix coverify Defect Type: Null pointer dereferences issues 11892 - 11939 https://bugzilla.redhat.com/show_bug.cgi?id=613056 Resolves: bug 613056 Bug description: Fix coverify Defect Type: Null pointer dereferences issues 11892 - 11939 description: Catch possible NULL pointer in substr_dn_normalize_orig().
commit 696e2f7911dae50b0ec15dcd01015f712189ed7d Author: Endi S. Dewata <[email protected]> Date: Fri Jul 9 20:47:32 2010 -0500 Bug 613056 - fix coverify Defect Type: Null pointer dereferences issues 11892 - 11939 https://bugzilla.redhat.com/show_bug.cgi?id=613056 Resolves: bug 613056 Bug description: Fix coverify Defect Type: Null pointer dereferences issues 11892 - 11939 description: Catch possible NULL pointer in substr_dn_normalize_orig(). diff --git a/ldap/servers/slapd/dn.c b/ldap/servers/slapd/dn.c index d067d8c35..180d8094a 100644 --- a/ldap/servers/slapd/dn.c +++ b/ldap/servers/slapd/dn.c @@ -333,6 +333,11 @@ substr_dn_normalize_orig( char *dn, char *end ) } } else if ( *s == '"' ) { state = B4SEPARATOR; + if (!value) { + LDAPDebug( LDAP_DEBUG_ANY, + "slapi_dn_normalize - missing value\n", 0, 0, 0 ); + break; + } if ( value_separator == dn /* 2 or more separators */ || ISSPACE( value[1] ) || ISSPACE( d[-1] ) ) { *d++ = *s;
0
3ec3639a80d4381459bc696649f8c30de32fcbc9
389ds/389-ds-base
Bug 622903 - fix coverity Defect Type: Code maintainability issues https://bugzilla.redhat.com/show_bug.cgi?id=622903 Comment: The "p" assignments on line 260, 264, and 268 can be removed because they are not needed.
commit 3ec3639a80d4381459bc696649f8c30de32fcbc9 Author: Noriko Hosoi <[email protected]> Date: Tue Aug 10 17:05:26 2010 -0700 Bug 622903 - fix coverity Defect Type: Code maintainability issues https://bugzilla.redhat.com/show_bug.cgi?id=622903 Comment: The "p" assignments on line 260, 264, and 268 can be removed because they are not needed. diff --git a/ldap/servers/slapd/back-ldbm/ldbm_attr.c b/ldap/servers/slapd/back-ldbm/ldbm_attr.c index a73a53200..e67c07450 100644 --- a/ldap/servers/slapd/back-ldbm/ldbm_attr.c +++ b/ldap/servers/slapd/back-ldbm/ldbm_attr.c @@ -183,7 +183,6 @@ attr_index_config( char **index_rules = NULL; struct attrinfo *a; int return_value = -1; - char *p; int *substrlens = NULL; attrs = slapi_str2charray( argv[0], "," ); @@ -272,15 +271,15 @@ attr_index_config( Slapi_PBlock* pb = NULL; int do_continue = 0; /* can we skip the RULE parsing stuff? */ - if ((p = strstr(index_rules[j], INDEX_ATTR_SUBSTRBEGIN))) { + if (strstr(index_rules[j], INDEX_ATTR_SUBSTRBEGIN)) { _set_attr_substrlen(INDEX_SUBSTRBEGIN, index_rules[j], &substrlens); do_continue = 1; /* done with j - next j */ - } else if ((p = strstr(index_rules[j], INDEX_ATTR_SUBSTRMIDDLE))) { + } else if (strstr(index_rules[j], INDEX_ATTR_SUBSTRMIDDLE)) { _set_attr_substrlen(INDEX_SUBSTRMIDDLE, index_rules[j], &substrlens); do_continue = 1; /* done with j - next j */ - } else if ((p = strstr(index_rules[j], INDEX_ATTR_SUBSTREND))) { + } else if (strstr(index_rules[j], INDEX_ATTR_SUBSTREND)) { _set_attr_substrlen(INDEX_SUBSTREND, index_rules[j], &substrlens); do_continue = 1; /* done with j - next j */
0
193d79d4a459b709c5a55cea88794105fa60c453
389ds/389-ds-base
Ticket #48224 - redux - logconv.pl should handle *.tar.xz, *.txz, *.xz log files https://fedorahosted.org/389/ticket/48224 Reviewed by: Branch: master Fix Description: Fix Requires: in spec file Platforms tested: Fedora 21, RHEL 7.2 candidate Flag Day: no Doc impact: no
commit 193d79d4a459b709c5a55cea88794105fa60c453 Author: Rich Megginson <[email protected]> Date: Thu Jul 16 09:06:45 2015 -0600 Ticket #48224 - redux - logconv.pl should handle *.tar.xz, *.txz, *.xz log files https://fedorahosted.org/389/ticket/48224 Reviewed by: Branch: master Fix Description: Fix Requires: in spec file Platforms tested: Fedora 21, RHEL 7.2 candidate Flag Day: no Doc impact: no diff --git a/rpm/389-ds-base.spec.in b/rpm/389-ds-base.spec.in index 5c64bd78c..64541f11d 100644 --- a/rpm/389-ds-base.spec.in +++ b/rpm/389-ds-base.spec.in @@ -121,9 +121,6 @@ Requires: perl-Socket6 Requires: perl-Socket %endif Requires: perl-NetAddr-IP -# for logconv compressed file support -Requires: perl-IO-Compress -Requires: perl-IO-Compress-Lzma Source0: http://port389.org/sources/%{name}-%{version}%{?prerel}.tar.bz2 # 389-ds-git.sh should be used to generate the source tarball from git
0
068ee18c9f18f198bbef4ca19c26913478d0d19d
389ds/389-ds-base
Ticket 437853 - Missing newline at end of the error log messages in memberof Description: Add missing newlines characters at the end of error messages. https://fedorahosted.org/389/ticket/47853 Reviewed by: mreynolds
commit 068ee18c9f18f198bbef4ca19c26913478d0d19d Author: Mark Reynolds <[email protected]> Date: Tue Jul 15 12:28:18 2014 -0400 Ticket 437853 - Missing newline at end of the error log messages in memberof Description: Add missing newlines characters at the end of error messages. https://fedorahosted.org/389/ticket/47853 Reviewed by: mreynolds diff --git a/ldap/servers/plugins/memberof/memberof.c b/ldap/servers/plugins/memberof/memberof.c index 526e95f46..654971808 100644 --- a/ldap/servers/plugins/memberof/memberof.c +++ b/ldap/servers/plugins/memberof/memberof.c @@ -558,7 +558,7 @@ int memberof_postop_del(Slapi_PBlock *pb) */ if((ret = memberof_del_dn_from_groups(pb, &configCopy, sdn))){ slapi_log_error(SLAPI_LOG_FATAL, MEMBEROF_PLUGIN_SUBSYSTEM, - "memberof_postop_del: error deleting dn (%s) from group. Error (%d)", + "memberof_postop_del: error deleting dn (%s) from group. Error (%d)\n", slapi_sdn_get_dn(sdn),ret); memberof_unlock(); memberof_free_config(&configCopy); @@ -578,7 +578,7 @@ int memberof_postop_del(Slapi_PBlock *pb) { if((ret = memberof_del_attr_list(pb, &configCopy, sdn, attr))){ slapi_log_error( SLAPI_LOG_FATAL, MEMBEROF_PLUGIN_SUBSYSTEM, - "memberof_postop_del: error deleting attr list - dn (%s). Error (%d)", + "memberof_postop_del: error deleting attr list - dn (%s). Error (%d)\n", slapi_sdn_get_dn(sdn),ret); } @@ -1282,7 +1282,7 @@ int memberof_postop_add(Slapi_PBlock *pb) { if((ret = memberof_add_attr_list(pb, &configCopy, sdn, attr))){ slapi_log_error( SLAPI_LOG_FATAL, MEMBEROF_PLUGIN_SUBSYSTEM, - "memberof_postop_add: failed to add dn(%s), error (%d)", + "memberof_postop_add: failed to add dn(%s), error (%d)\n", slapi_sdn_get_dn(sdn), ret); break; }
0
36c88761a14eda3278167ca6ec76f65a3c80492a
389ds/389-ds-base
fix redefinition of OP_FLAG found by jenkins
commit 36c88761a14eda3278167ca6ec76f65a3c80492a Author: Ludwig Krispenz <[email protected]> Date: Fri Sep 4 10:43:08 2015 +0200 fix redefinition of OP_FLAG found by jenkins diff --git a/ldap/servers/plugins/acctpolicy/acctpolicy.h b/ldap/servers/plugins/acctpolicy/acctpolicy.h index 64f37fb7a..77da17810 100644 --- a/ldap/servers/plugins/acctpolicy/acctpolicy.h +++ b/ldap/servers/plugins/acctpolicy/acctpolicy.h @@ -21,8 +21,6 @@ Hewlett-Packard Development Company, L.P. #include <limits.h> /* ULONG_MAX */ #include "nspr.h" -#define SLAPI_OP_FLAG_BYPASS_REFERRALS 0x40000 - #define CFG_LASTLOGIN_STATE_ATTR "stateAttrName" #define CFG_ALT_LASTLOGIN_STATE_ATTR "altStateAttrName" #define CFG_SPEC_ATTR "specAttrName"
0
a26ba73fb5040383c27872997bc07ab0c2006459
389ds/389-ds-base
509472 db2index all does not reindex all the db backends correctly db2index all (internally, called upgradedb) reads through the main db id2entry.db# and reindex all the associated indexed attributes. The reindex borrows the import code where the entry id is newly assigned. The new entry id's are connective. On the other hand, entry id's of the entries in the db to be reindexed are not. The borrowed import code assumes the entry id and the index of the fifo are tightly coupled and the timing when the writing to and reading from the fifo are calculated based upon the assumption. The assumption should have been revised so that the entry id which is available up to is kept in ready_EID in the job structure and entry id from each entry (entry->ep_id) is compared with ready_EID instead of ready_ID that holds the sequential number. Additionally, I eliminated unused variable "shift" from import_fifo_fetch. Also, _dblayer_delete_instance_dir cleans up files and directories, recursively.
commit a26ba73fb5040383c27872997bc07ab0c2006459 Author: Noriko Hosoi <[email protected]> Date: Thu Aug 6 13:16:01 2009 -0700 509472 db2index all does not reindex all the db backends correctly db2index all (internally, called upgradedb) reads through the main db id2entry.db# and reindex all the associated indexed attributes. The reindex borrows the import code where the entry id is newly assigned. The new entry id's are connective. On the other hand, entry id's of the entries in the db to be reindexed are not. The borrowed import code assumes the entry id and the index of the fifo are tightly coupled and the timing when the writing to and reading from the fifo are calculated based upon the assumption. The assumption should have been revised so that the entry id which is available up to is kept in ready_EID in the job structure and entry id from each entry (entry->ep_id) is compared with ready_EID instead of ready_ID that holds the sequential number. Additionally, I eliminated unused variable "shift" from import_fifo_fetch. Also, _dblayer_delete_instance_dir cleans up files and directories, recursively. diff --git a/ldap/servers/slapd/back-ldbm/dblayer.c b/ldap/servers/slapd/back-ldbm/dblayer.c index 42642b24d..7cf407c41 100644 --- a/ldap/servers/slapd/back-ldbm/dblayer.c +++ b/ldap/servers/slapd/back-ldbm/dblayer.c @@ -4324,7 +4324,7 @@ static int _dblayer_delete_instance_dir(ldbm_instance *inst, int startdb) } else { - rval = PR_Delete(filename); + rval = ldbm_delete_dirs(filename); } } PR_CloseDir(dirhandle); diff --git a/ldap/servers/slapd/back-ldbm/import-threads.c b/ldap/servers/slapd/back-ldbm/import-threads.c index b5e4e07fd..1e07147d4 100644 --- a/ldap/servers/slapd/back-ldbm/import-threads.c +++ b/ldap/servers/slapd/back-ldbm/import-threads.c @@ -651,11 +651,11 @@ void import_producer(void *param) old_ep = job->fifo.item[idx].entry; if (old_ep) { /* for the slot to be recycled, it needs to be already absorbed - * by the foreman (id >= ready_ID), and all the workers need to + * by the foreman (id >= ready_EID), and all the workers need to * be finished with it (refcount = 0). */ while (((old_ep->ep_refcnt > 0) || - (old_ep->ep_id >= job->ready_ID)) + (old_ep->ep_id >= job->ready_EID)) && (info->command != ABORT) && !(job->flags & FLAG_ABORT)) { info->state = WAITING; DS_Sleep(sleeptime); @@ -887,11 +887,11 @@ void index_producer(void *param) old_ep = job->fifo.item[idx].entry; if (old_ep) { /* for the slot to be recycled, it needs to be already absorbed - * by the foreman (id >= ready_ID), and all the workers need to + * by the foreman (id >= ready_EID), and all the workers need to * be finished with it (refcount = 0). */ while (((old_ep->ep_refcnt > 0) || - (old_ep->ep_id >= job->ready_ID)) + (old_ep->ep_id >= job->ready_EID)) && (info->command != ABORT) && !(job->flags & FLAG_ABORT)) { info->state = WAITING; DS_Sleep(sleeptime); @@ -980,7 +980,7 @@ import_wait_for_space_in_fifo(ImportJob *job, size_t new_esize) for ( i = 0, slot_found = 0 ; i < job->fifo.size ; i++ ) { temp_ep = job->fifo.item[i].entry; if (temp_ep) { - if (temp_ep->ep_refcnt == 0 && temp_ep->ep_id <= job->ready_ID) { + if (temp_ep->ep_refcnt == 0 && temp_ep->ep_id <= job->ready_EID) { job->fifo.item[i].entry = NULL; if (job->fifo.c_bsize > job->fifo.item[i].esize) job->fifo.c_bsize -= job->fifo.item[i].esize; @@ -1094,7 +1094,6 @@ void import_foreman(void *param) int ret = 0; struct attrinfo *parentid_ai; Slapi_PBlock *pb = slapi_pblock_new(); - int shift = 0; PR_ASSERT(info != NULL); PR_ASSERT(inst != NULL); @@ -1136,10 +1135,13 @@ void import_foreman(void *param) info->state = RUNNING; /* Read that entry from the cache */ - fi = import_fifo_fetch(job, id, 0, shift); - if (! fi) { + fi = import_fifo_fetch(job, id, 0); + if (NULL == fi) { import_log_notice(job, "WARNING: entry id %d is missing", id); - shift++; + continue; + } + if (NULL == fi->entry) { + import_log_notice(job, "WARNING: entry for id %d is missing", id); continue; } @@ -1249,17 +1251,20 @@ void import_foreman(void *param) } - /* Remove the entry from the cache (caused by id2entry_add) */ - if (!(job->flags & FLAG_REINDEXING))/* reindex reads data from id2entry */ - cache_remove(&inst->inst_cache, fi->entry); + /* Remove the entry from the cache (Put in the cache in id2entry_add) */ + if (!(job->flags & FLAG_REINDEXING)) { + /* reindex reads data from id2entry */ + cache_remove(&inst->inst_cache, fi->entry); + } fi->entry->ep_refcnt = job->number_indexers; - cont: +cont: if (job->flags & FLAG_ABORT) { goto error; } job->ready_ID = id; + job->ready_EID = fi->entry->ep_id; info->last_ID_processed = id; id++; @@ -1377,7 +1382,7 @@ void import_worker(void *param) info->state = RUNNING; /* Read that entry from the cache */ - fi = import_fifo_fetch(job, id, 1, 0); + fi = import_fifo_fetch(job, id, 1); ep = fi ? fi->entry : NULL; if (!ep) { /* skipping an entry that turned out to be bad */ @@ -1706,9 +1711,9 @@ static int bulk_import_queue(ImportJob *job, Slapi_Entry *entry) /* the producer could be running thru the fifo while * everyone else is cycling to a new pass... - * double-check that this entry is < ready_ID + * double-check that this entry is < ready_EID */ - while ((old_ep->ep_id >= job->ready_ID) && !(job->flags & FLAG_ABORT)) + while ((old_ep->ep_id >= job->ready_EID) && !(job->flags & FLAG_ABORT)) { DS_Sleep(PR_MillisecondsToInterval(import_sleep_time)); } diff --git a/ldap/servers/slapd/back-ldbm/import.c b/ldap/servers/slapd/back-ldbm/import.c index 48e19c81d..8c334d434 100644 --- a/ldap/servers/slapd/back-ldbm/import.c +++ b/ldap/servers/slapd/back-ldbm/import.c @@ -95,23 +95,22 @@ static int import_fifo_init(ImportJob *job) return 0; } -FifoItem *import_fifo_fetch(ImportJob *job, ID id, int worker, int shift) +FifoItem *import_fifo_fetch(ImportJob *job, ID id, int worker) { int idx = id % job->fifo.size; FifoItem *fi; if (job->fifo.item) { - fi = &(job->fifo.item[idx]); + fi = &(job->fifo.item[idx]); } else { - return NULL; + return NULL; } if (fi->entry) { - if (id != fi->entry->ep_id - shift) - fi = NULL; - else if (worker) { - if (fi->bad) return NULL; - PR_ASSERT(fi->entry->ep_refcnt > 0); + if (worker && fi->bad) { + import_log_notice(job, "WARNING: bad entry: ID %d", id); + return NULL; } + PR_ASSERT(fi->entry->ep_refcnt > 0); } return fi; } diff --git a/ldap/servers/slapd/back-ldbm/import.h b/ldap/servers/slapd/back-ldbm/import.h index 0ae56c1db..aa3201ce7 100644 --- a/ldap/servers/slapd/back-ldbm/import.h +++ b/ldap/servers/slapd/back-ldbm/import.h @@ -124,6 +124,7 @@ typedef struct { ID first_ID; /* Import pass starts at this ID */ ID lead_ID; /* Highest ID available in the cache */ ID ready_ID; /* Highest ID the foreman is done with */ + ID ready_EID; /* Highest Entry ID the foreman is done with */ ID trailing_ID; /* Lowest ID still available in the cache */ int current_pass; /* un-merged pass number in a multi-pass import */ int total_pass; /* total pass number in a multi-pass import */ @@ -204,7 +205,7 @@ struct _import_worker_info { /* import.c */ -FifoItem *import_fifo_fetch(ImportJob *job, ID id, int worker, int shift); +FifoItem *import_fifo_fetch(ImportJob *job, ID id, int worker); void import_free_job(ImportJob *job); void import_log_notice(ImportJob *job, char *format, ...) #ifdef __GNUC__
0
47c59c424a1246e1d1dd2fb1605991081db685c9
389ds/389-ds-base
Bug 611790 - fix coverify Defect Type: Null pointer dereferences issues 11940 - 12166 https://bugzilla.redhat.com/show_bug.cgi?id=611790 Resolves: bug 611790 Bug description: Fix coverify Defect Type: Null pointer dereferences issues 11940 - 12166 Fix description: Catch possible NULL pointer in map_dn_values(), map_entry_dn_outbound(), and windows_generate_update_mods().
commit 47c59c424a1246e1d1dd2fb1605991081db685c9 Author: Endi S. Dewata <[email protected]> Date: Tue Jul 6 12:19:49 2010 -0500 Bug 611790 - fix coverify Defect Type: Null pointer dereferences issues 11940 - 12166 https://bugzilla.redhat.com/show_bug.cgi?id=611790 Resolves: bug 611790 Bug description: Fix coverify Defect Type: Null pointer dereferences issues 11940 - 12166 Fix description: Catch possible NULL pointer in map_dn_values(), map_entry_dn_outbound(), and windows_generate_update_mods(). diff --git a/ldap/servers/plugins/replication/windows_protocol_util.c b/ldap/servers/plugins/replication/windows_protocol_util.c index ba7559cdb..2d71a55eb 100644 --- a/ldap/servers/plugins/replication/windows_protocol_util.c +++ b/ldap/servers/plugins/replication/windows_protocol_util.c @@ -428,6 +428,11 @@ map_dn_values(Private_Repl_Protocol *prp,Slapi_ValueSet *original_values, Slapi_ } /* Make a sdn from the string */ original_dn = slapi_sdn_new_dn_byref(original_dn_string); + if (!original_dn) { + slapi_log_error(SLAPI_LOG_REPL, NULL, "map_dn_values: unable to create Slapi_DN from %s.\n", original_dn_string); + return; + } + if (to_windows) { Slapi_Entry *local_entry = NULL; @@ -526,10 +531,7 @@ map_dn_values(Private_Repl_Protocol *prp,Slapi_ValueSet *original_values, Slapi_ } /* If not then we skip it */ i = slapi_valueset_next_value(original_values,i,&original_value); - if (original_dn) - { - slapi_sdn_free(&original_dn); - } + slapi_sdn_free(&original_dn); }/* while */ if (new_vs) { @@ -3289,7 +3291,13 @@ map_entry_dn_outbound(Slapi_Entry *e, Slapi_DN **dn, Private_Repl_Protocol *prp, int rc = 0; Slapi_Entry *remote_entry = NULL; new_dn = make_dn_from_guid(guid, is_nt4, suffix); - slapi_ch_free_string(&guid); + if (!new_dn) { + slapi_log_error(SLAPI_LOG_REPL, repl_plugin_name, + "%s: map_entry_dn_outbound: unable to make dn from guid %s.\n", + agmt_get_long_name(prp->agmt), guid); + retval = -1; + goto done; + } /* There are certain cases where we will have a GUID, but the entry does not exist in * AD. This happens when you delete an entry, then add it back elsewhere in the tree * without removing the ntUniqueID attribute. We should verify that the entry really @@ -3337,9 +3345,7 @@ map_entry_dn_outbound(Slapi_Entry *e, Slapi_DN **dn, Private_Repl_Protocol *prp, new_dn_string = PR_smprintf("cn=%s,%s%s", cn_string, container_str, suffix); if (new_dn_string) { - if (new_dn) { - slapi_sdn_free(&new_dn); - } + slapi_sdn_free(&new_dn); new_dn = slapi_sdn_new_dn_byval(new_dn_string); PR_smprintf_free(new_dn_string); } @@ -3433,6 +3439,7 @@ map_entry_dn_outbound(Slapi_Entry *e, Slapi_DN **dn, Private_Repl_Protocol *prp, slapi_entry_free(remote_entry); } } +done: if (new_dn) { *dn = new_dn; @@ -4125,20 +4132,20 @@ windows_generate_update_mods(Private_Repl_Protocol *prp,Slapi_Entry *remote_entr /* Now do a compare on the values, generating mods to bring them into consistency (if any) */ /* We ignore any DNs that are outside the scope of the agreement (on both sides) */ slapi_attr_get_valueset(local_attr,&local_values); - map_dn_values(prp,local_values,&restricted_local_values,!to_windows,1); - if (restricted_local_values) - { - windows_generate_dn_value_mods(local_type,local_attr,smods,mapped_remote_values,restricted_local_values,do_modify); - slapi_valueset_free(restricted_local_values); - restricted_local_values = NULL; - } - slapi_valueset_free(mapped_remote_values); - mapped_remote_values = NULL; if (local_values) { + map_dn_values(prp,local_values,&restricted_local_values,!to_windows,1); + if (restricted_local_values) + { + windows_generate_dn_value_mods(local_type,local_attr,smods,mapped_remote_values,restricted_local_values,do_modify); + slapi_valueset_free(restricted_local_values); + restricted_local_values = NULL; + } slapi_valueset_free(local_values); local_values = NULL; } + slapi_valueset_free(mapped_remote_values); + mapped_remote_values = NULL; } } } else
0
54df38eeba00b97009e978c0030110f67825215a
389ds/389-ds-base
Ticket 50622 - ds_selinux_enabled may crash on suse Bug Description: SUSE doesn't have the python-selinux module by default, so this tool crashes as it can't find the python module for import. Fix Description: ATtempt to import the library, and on failure return false for enabled. https://pagure.io/389-ds-base/issue/50622 Author: William Brown <[email protected]> Review by: mhonek (Thanks!)
commit 54df38eeba00b97009e978c0030110f67825215a Author: William Brown <[email protected]> Date: Wed Sep 25 12:19:37 2019 +1000 Ticket 50622 - ds_selinux_enabled may crash on suse Bug Description: SUSE doesn't have the python-selinux module by default, so this tool crashes as it can't find the python module for import. Fix Description: ATtempt to import the library, and on failure return false for enabled. https://pagure.io/389-ds-base/issue/50622 Author: William Brown <[email protected]> Review by: mhonek (Thanks!) diff --git a/ldap/admin/src/scripts/ds_selinux_enabled.in b/ldap/admin/src/scripts/ds_selinux_enabled.in index 54a79b054..67206608d 100755 --- a/ldap/admin/src/scripts/ds_selinux_enabled.in +++ b/ldap/admin/src/scripts/ds_selinux_enabled.in @@ -13,11 +13,13 @@ import sys -import selinux -import semanage # Returns 1 for true, 0 for false. -sys.exit(selinux.is_selinux_enabled()) +try: + import selinux + sys.exit(selinux.is_selinux_enabled()) +except ImportError: + sys.exit(0)
0
f5fd00f684729b8fc0eeee4c2f80362e3f0e75d6
389ds/389-ds-base
Issue 4859 - Don't version libns-dshttpd Description: On every build libns-dshttpd has a version corresponding to the package version, e.g. libns-dshttpd-2.0.7.so. It's unnecessary, as we are the only consumers of this library and we don't change its ABI on every build. It also triggers rpmdiff test failures that have to be waived on each build. Fixes: https://github.com/389ds/389-ds-base/issues/4859 Reviewed by: @mreynolds389 (Thanks!)
commit f5fd00f684729b8fc0eeee4c2f80362e3f0e75d6 Author: Viktor Ashirov <[email protected]> Date: Thu Aug 5 18:23:11 2021 +0200 Issue 4859 - Don't version libns-dshttpd Description: On every build libns-dshttpd has a version corresponding to the package version, e.g. libns-dshttpd-2.0.7.so. It's unnecessary, as we are the only consumers of this library and we don't change its ABI on every build. It also triggers rpmdiff test failures that have to be waived on each build. Fixes: https://github.com/389ds/389-ds-base/issues/4859 Reviewed by: @mreynolds389 (Thanks!) diff --git a/Makefile.am b/Makefile.am index e2dfd751c..4e8e746ac 100644 --- a/Makefile.am +++ b/Makefile.am @@ -1077,8 +1077,7 @@ libns_dshttpd_la_SOURCES = lib/libaccess/access_plhash.cpp \ libns_dshttpd_la_CPPFLAGS = -I$(srcdir)/include/base $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) -I$(srcdir)/lib/ldaputil libns_dshttpd_la_LIBADD = libslapd.la libldaputil.la $(LDAPSDK_LINK) $(SASL_LINK) $(NSS_LINK) $(NSPR_LINK) -# Mark that this is a per version library. -libns_dshttpd_la_LDFLAGS = -release @PACKAGE_VERSION@ +libns_dshttpd_la_LDFLAGS = $(AM_LDFLAGS) #------------------------ # libslapd diff --git a/rpm/389-ds-base.spec.in b/rpm/389-ds-base.spec.in index 3bba6e045..aad05e3eb 100644 --- a/rpm/389-ds-base.spec.in +++ b/rpm/389-ds-base.spec.in @@ -638,7 +638,7 @@ fi %dir %{_libdir}/%{pkgname} %{_libdir}/libsvrcore.so.* %{_libdir}/%{pkgname}/libslapd.so.* -%{_libdir}/%{pkgname}/libns-dshttpd-*.so +%{_libdir}/%{pkgname}/libns-dshttpd.so.* %{_libdir}/%{pkgname}/libldaputil.so.* %{_libdir}/%{pkgname}/librewriters.so* %if %{bundle_jemalloc}
0
be34548e9d9f61ef98086a1ab67ad15f63eb0c05
389ds/389-ds-base
Bug 697027 - 9 - minor memory leaks found by Valgrind + TET https://bugzilla.redhat.com/show_bug.cgi?id=697027 [Case 9] Description: 1) Adding more checks to snmp_collator_stop and snmp_collator_sem_wait before proceeding the operation. 2) Fixing a return value for an error case in plugin_enabled -- in case a plugin entry did not exist, plugin_enabled used to return "enabled".
commit be34548e9d9f61ef98086a1ab67ad15f63eb0c05 Author: Noriko Hosoi <[email protected]> Date: Fri Apr 15 13:42:42 2011 -0700 Bug 697027 - 9 - minor memory leaks found by Valgrind + TET https://bugzilla.redhat.com/show_bug.cgi?id=697027 [Case 9] Description: 1) Adding more checks to snmp_collator_stop and snmp_collator_sem_wait before proceeding the operation. 2) Fixing a return value for an error case in plugin_enabled -- in case a plugin entry did not exist, plugin_enabled used to return "enabled". diff --git a/ldap/servers/slapd/plugin.c b/ldap/servers/slapd/plugin.c index d60d1913c..cc88dc4c2 100644 --- a/ldap/servers/slapd/plugin.c +++ b/ldap/servers/slapd/plugin.c @@ -2990,6 +2990,7 @@ plugin_enabled(const char *plugin_name, void *identity) slapi_search_internal_pb(search_pb); slapi_pblock_get(search_pb, SLAPI_PLUGIN_INTOP_RESULT, &rc); if (LDAP_SUCCESS != rc) { /* plugin is not available */ + rc = 0; /* disabled, by default */ goto bail; } diff --git a/ldap/servers/slapd/snmp_collator.c b/ldap/servers/slapd/snmp_collator.c index 9c485de41..6a059b965 100644 --- a/ldap/servers/slapd/snmp_collator.c +++ b/ldap/servers/slapd/snmp_collator.c @@ -481,6 +481,10 @@ int snmp_collator_stop() { int err; + if (snmp_collator_stopped) { + return 0; + } + /* Abort any pending events */ slapi_eq_cancel(snmp_eq_ctx); snmp_collator_stopped = 1; @@ -506,7 +510,9 @@ int snmp_collator_stop() sem_unlink(stats_sem_name); /* delete lock */ - PR_DestroyLock(interaction_table_mutex); + if (interaction_table_mutex) { + PR_DestroyLock(interaction_table_mutex); + } #ifdef _WIN32 /* send the event so server down trap gets set on NT */ @@ -570,6 +576,12 @@ snmp_collator_sem_wait() int i = 0; int got_sem = 0; + if (SEM_FAILED == stats_sem) { + LDAPDebug1Arg(LDAP_DEBUG_ANY, + "semaphore for stats file (%s) is not available.\n", szStatsFile); + return; + } + for (i=0; i < SNMP_NUM_SEM_WAITS; i++) { if (sem_trywait(stats_sem) == 0) { got_sem = 1;
0
b969287dda45bbd511a85cb2227e2ad2779e82b5
389ds/389-ds-base
Bug 619122 - fix coverify Defect Type: Resource leaks issues CID 11975 - 12053 https://bugzilla.redhat.com/show_bug.cgi?id=619122 Resolves: bug 619122 Bug description: fix coverify Defect Type: Resource leaks issues CID 12008. description: The dna_is_replica_bind_dn() has been modified to release resources before it returns.
commit b969287dda45bbd511a85cb2227e2ad2779e82b5 Author: Endi S. Dewata <[email protected]> Date: Thu Jul 29 17:55:19 2010 -0500 Bug 619122 - fix coverify Defect Type: Resource leaks issues CID 11975 - 12053 https://bugzilla.redhat.com/show_bug.cgi?id=619122 Resolves: bug 619122 Bug description: fix coverify Defect Type: Resource leaks issues CID 12008. description: The dna_is_replica_bind_dn() has been modified to release resources before it returns. diff --git a/ldap/servers/plugins/dna/dna.c b/ldap/servers/plugins/dna/dna.c index 6fc222a5f..5838cd747 100644 --- a/ldap/servers/plugins/dna/dna.c +++ b/ldap/servers/plugins/dna/dna.c @@ -2322,7 +2322,8 @@ static int dna_is_replica_bind_dn(char *range_dn, char *bind_dn) slapi_log_error(SLAPI_LOG_PLUGIN, DNA_PLUGIN_SUBSYSTEM, "dna_is_replica_bind_dn: failed to create " "replica dn for %s\n", be_suffix); - return 1; + ret = 1; + goto done; } replica_sdn = slapi_sdn_new_dn_passin(replica_dn); @@ -2350,6 +2351,7 @@ static int dna_is_replica_bind_dn(char *range_dn, char *bind_dn) } } +done: slapi_entry_free(e); slapi_sdn_free(&range_sdn); slapi_sdn_free(&replica_sdn);
0
f2ada9482258f475b193218f1e122e4f0bd48853
389ds/389-ds-base
Issue 5176 - CI rewriter fails when libslapd.so.0 does not exist (#5177) Bug description: rewriter test assumes that the library libslapd.so.0 exists (/usr/lib64/dirsrv/libslapd.so.0). On 389-ds-base-2.0, only libslapd.so exists. Fix description: The test case should test if libslapd.so exists If not then fall back to libslapd.so.0 relates: https://github.com/389ds/389-ds-base/issues/5176 Reviewed by: Mark Reynolds Platforms tested: F35
commit f2ada9482258f475b193218f1e122e4f0bd48853 Author: tbordaz <[email protected]> Date: Thu Feb 24 18:13:42 2022 +0100 Issue 5176 - CI rewriter fails when libslapd.so.0 does not exist (#5177) Bug description: rewriter test assumes that the library libslapd.so.0 exists (/usr/lib64/dirsrv/libslapd.so.0). On 389-ds-base-2.0, only libslapd.so exists. Fix description: The test case should test if libslapd.so exists If not then fall back to libslapd.so.0 relates: https://github.com/389ds/389-ds-base/issues/5176 Reviewed by: Mark Reynolds Platforms tested: F35 diff --git a/dirsrvtests/tests/suites/rewriters/basic_test.py b/dirsrvtests/tests/suites/rewriters/basic_test.py index 61bbade24..ce89ab26e 100644 --- a/dirsrvtests/tests/suites/rewriters/basic_test.py +++ b/dirsrvtests/tests/suites/rewriters/basic_test.py @@ -30,7 +30,9 @@ def test_foo_filter_rewriter(topology_st): Test that example filter rewriter 'foo' is register and search use it """ - libslapd = os.path.join( topology_st.standalone.ds_paths.lib_dir, 'dirsrv/libslapd.so.0') + libslapd = os.path.join( topology_st.standalone.ds_paths.lib_dir, 'dirsrv/libslapd.so') + if not os.path.exists(libslapd): + libslapd = os.path.join( topology_st.standalone.ds_paths.lib_dir, 'dirsrv/libslapd.so.0') # register foo filter rewriters topology_st.standalone.add_s(Entry(( "cn=foo_filter,cn=rewriters,cn=config", {
0
0f4979332c73b48eefa0ad36e87437c37bee668d
389ds/389-ds-base
Bug 658312 - Allow mapped attribute types to be quoted This patch enhances the way mapped attributes can be defined in a managed entry template. There are three scenarios that this patch adds support for. The first is to allow a mapping in the template entry that maps the DN like this: attr: $dn The second thing is to stop parsing an attribute variable at the first character that is not legal in an attribute name (see RFC 4512 for what is legal). This allows a mapping like this to work: attr: cn=$cn,dc=example,dc=com The third thing is to allow quoting of an attribute variable. This allows one to append anything to the end of a mapped attribute value, even if it begins with a character that is valid for use in an attribute name. An example of this sort of mapping is: attr: ${cn}test I also fixed a crash that could occur when one created an invalid managed entry template. The test managed entry that is created from the pending template ends up being NULL, but we still try to check if that entry violates the schema. If the test entry is not able to be created, we should not try to check it against the schema as that causes a NULL dereference.
commit 0f4979332c73b48eefa0ad36e87437c37bee668d Author: Nathan Kinder <[email protected]> Date: Tue Nov 30 14:59:48 2010 -0800 Bug 658312 - Allow mapped attribute types to be quoted This patch enhances the way mapped attributes can be defined in a managed entry template. There are three scenarios that this patch adds support for. The first is to allow a mapping in the template entry that maps the DN like this: attr: $dn The second thing is to stop parsing an attribute variable at the first character that is not legal in an attribute name (see RFC 4512 for what is legal). This allows a mapping like this to work: attr: cn=$cn,dc=example,dc=com The third thing is to allow quoting of an attribute variable. This allows one to append anything to the end of a mapped attribute value, even if it begins with a character that is valid for use in an attribute name. An example of this sort of mapping is: attr: ${cn}test I also fixed a crash that could occur when one created an invalid managed entry template. The test managed entry that is created from the pending template ends up being NULL, but we still try to check if that entry violates the schema. If the test entry is not able to be created, we should not try to check it against the schema as that causes a NULL dereference. diff --git a/ldap/servers/plugins/mep/mep.c b/ldap/servers/plugins/mep/mep.c index cb329e44b..943633212 100644 --- a/ldap/servers/plugins/mep/mep.c +++ b/ldap/servers/plugins/mep/mep.c @@ -1325,19 +1325,48 @@ mep_parse_mapped_attr(char *mapping, Slapi_Entry *origin, /* This is an escaped $, so just skip it. */ p++; } else { + int quoted = 0; + /* We found a variable. Terminate the pre * string and process the variable. */ *p = '\0'; p++; + /* Check if the variable name is quoted. If it is, we skip past + * the quoting brace to avoid putting it in the mapped value. */ + if (*p == '{') { + quoted = 1; + if (p < end) { + p++; + } else { + slapi_log_error( SLAPI_LOG_FATAL, MEP_PLUGIN_SUBSYSTEM, + "mep_parse_mapped_attr: Invalid mapped " + "attribute value for type \"%s\".\n", mapping); + ret = 1; + goto bail; + } + } + /* We should be pointing at the variable name now. */ var_start = p; - /* Move the pointer to the end of the variable name. */ - while ((p < end) && !isspace(*p)) { + /* Move the pointer to the end of the variable name. We + * stop at the first character that is not legal for use + * in an attribute description. */ + while ((p < end) && IS_ATTRDESC_CHAR(*p)) { p++; } + /* If the variable is quoted and this is not a closing + * brace, there is a syntax error in the mapping rule. */ + if (quoted && (*p != '}')) { + slapi_log_error( SLAPI_LOG_FATAL, MEP_PLUGIN_SUBSYSTEM, + "mep_parse_mapped_attr: Invalid mapped " + "attribute value for type \"%s\".\n", mapping); + ret = 1; + goto bail; + } + /* Check for a missing variable name. */ if (p == var_start) { break; @@ -1352,7 +1381,13 @@ mep_parse_mapped_attr(char *mapping, Slapi_Entry *origin, if (p == end) { post_str = ""; } else { - post_str = p; + /* If the variable is quoted, don't include + * the closing brace in the post string. */ + if (quoted) { + post_str = p+1; + } else { + post_str = p; + } } /* We only support a single variable, so we're done. */ @@ -1363,7 +1398,14 @@ mep_parse_mapped_attr(char *mapping, Slapi_Entry *origin, if (map_type) { if (origin) { - char *map_val = slapi_entry_attr_get_charptr(origin, map_type); + char *map_val = NULL; + + /* If the map type is dn, fetch the origin dn. */ + if (slapi_attr_type_cmp(map_type, "dn", SLAPI_TYPE_CMP_EXACT) == 0) { + map_val = slapi_entry_get_ndn(origin); + } else { + map_val = slapi_entry_attr_get_charptr(origin, map_type); + } if (map_val) { /* Create the new mapped value. */ @@ -1625,9 +1667,7 @@ mep_pre_op(Slapi_PBlock * pb, int modop) errstr = slapi_ch_smprintf("Changes result in an invalid " "managed entries template."); ret = LDAP_UNWILLING_TO_PERFORM; - } - - if (slapi_entry_schema_check(NULL, test_entry) != 0) { + } else if (slapi_entry_schema_check(NULL, test_entry) != 0) { errstr = slapi_ch_smprintf("Changes result in an invalid " "managed entries template due " "to a schema violation."); diff --git a/ldap/servers/plugins/mep/mep.h b/ldap/servers/plugins/mep/mep.h index 9cbd2044d..e2e680c71 100644 --- a/ldap/servers/plugins/mep/mep.h +++ b/ldap/servers/plugins/mep/mep.h @@ -89,6 +89,11 @@ #define MEP_TEMPLATE_OC "mepTemplateEntry" #define MEP_ORIGIN_OC "mepOriginEntry" +/* + * Helper defines + */ +#define IS_ATTRDESC_CHAR(c) ( isalnum(c) || (c == '.') || (c == ';') || (c == '-') ) + /* * Linked list of config entries. */
0
fa06ccebb7ae5b85403187a0362d2ea221708975
389ds/389-ds-base
Issue 5785 - CLI - arg completion is broken Bug Description: Files installed by 389-ds-base under /usr/share/bash-completion/completions are not owned by 389-ds-base rpm package. Fix Description: * Move the snippet for registering completions to %install section and install them under builddir. * Register bash completions in %files section so that they are owned by the package. Fixes: https://github.com/389ds/389-ds-base/issues/5785 Reviewed-by: @droideck (Thanks!)
commit fa06ccebb7ae5b85403187a0362d2ea221708975 Author: Viktor Ashirov <[email protected]> Date: Tue Jul 25 18:45:03 2023 +0200 Issue 5785 - CLI - arg completion is broken Bug Description: Files installed by 389-ds-base under /usr/share/bash-completion/completions are not owned by 389-ds-base rpm package. Fix Description: * Move the snippet for registering completions to %install section and install them under builddir. * Register bash completions in %files section so that they are owned by the package. Fixes: https://github.com/389ds/389-ds-base/issues/5785 Reviewed-by: @droideck (Thanks!) diff --git a/rpm/389-ds-base.spec.in b/rpm/389-ds-base.spec.in index fa37515fc..6017a34e0 100644 --- a/rpm/389-ds-base.spec.in +++ b/rpm/389-ds-base.spec.in @@ -539,6 +539,13 @@ popd %endif +# Register CLI tools for bash completion +for clitool in dsconf dsctl dsidm dscreate ds-replcheck +do + register-python-argcomplete "${clitool}" > "${clitool}" + install -p -m 0644 -D -t '%{buildroot}%{bash_completions_dir}' "${clitool}" +done + mkdir -p $RPM_BUILD_ROOT/var/log/%{pkgname} mkdir -p $RPM_BUILD_ROOT/var/lib/%{pkgname} mkdir -p $RPM_BUILD_ROOT/var/lock/%{pkgname} \ @@ -584,12 +591,6 @@ else output2=/dev/null fi -# Register CLI tools for bash completion -for clitool in dsconf dsctl dsidm dscreate ds-replcheck -do - register-python-argcomplete "${clitool}" > "/usr/share/bash-completion/completions/${clitool}" -done - # reload to pick up any changes to systemd files /bin/systemctl daemon-reload >$output 2>&1 || : @@ -694,6 +695,7 @@ fi %{_mandir}/man1/dbscan.1.gz %{_bindir}/ds-replcheck %{_mandir}/man1/ds-replcheck.1.gz +%{bash_completions_dir}/ds-replcheck %{_bindir}/ds-logpipe.py %{_mandir}/man1/ds-logpipe.py.1.gz %{_bindir}/ldclt @@ -781,6 +783,10 @@ fi %{_sbindir}/dsidm %{_mandir}/man8/dsidm.8.gz %{_libexecdir}/%{pkgname}/dscontainer +%{bash_completions_dir}/dsctl +%{bash_completions_dir}/dsconf +%{bash_completions_dir}/dscreate +%{bash_completions_dir}/dsidm %if %{use_cockpit} %files -n cockpit-389-ds -f cockpit.list
0
4d60b688b286673ac4613c03b07e56deee3e0fa4
389ds/389-ds-base
Ticket #48048 - Fix coverity issues - 2015/2/24 Coverity defect 13060 - Explicit null dereferenced (FORWARD_NULL) Description: Added NULL check for creds. modified: chainingdb_bind in cb_bind.c
commit 4d60b688b286673ac4613c03b07e56deee3e0fa4 Author: Noriko Hosoi <[email protected]> Date: Tue Feb 24 15:26:55 2015 -0800 Ticket #48048 - Fix coverity issues - 2015/2/24 Coverity defect 13060 - Explicit null dereferenced (FORWARD_NULL) Description: Added NULL check for creds. modified: chainingdb_bind in cb_bind.c diff --git a/ldap/servers/plugins/chainingdb/cb_bind.c b/ldap/servers/plugins/chainingdb/cb_bind.c index 4c6a12bc3..4dc1a7271 100644 --- a/ldap/servers/plugins/chainingdb/cb_bind.c +++ b/ldap/servers/plugins/chainingdb/cb_bind.c @@ -257,7 +257,7 @@ chainingdb_bind( Slapi_PBlock *pb ) cb_send_ldap_result( pb, rc, NULL, NULL, 0, NULL ); if (ctrls) ldap_controls_free(ctrls); - return SLAPI_BIND_FAIL; + return SLAPI_BIND_FAIL; } if (ctrls) ldap_controls_free(ctrls); @@ -267,6 +267,10 @@ chainingdb_bind( Slapi_PBlock *pb ) slapi_pblock_get( pb, SLAPI_BIND_METHOD, &method ); slapi_pblock_get( pb, SLAPI_BIND_SASLMECHANISM, &mechanism); slapi_pblock_get( pb, SLAPI_BIND_CREDENTIALS, &creds ); + if (NULL == creds) { + cb_send_ldap_result( pb, rc, NULL, "No credentials", 0, NULL ); + return SLAPI_BIND_FAIL; + } slapi_pblock_get( pb, SLAPI_REQCONTROLS, &reqctrls ); cb = cb_get_instance(be);
0
e46749b77d95ad8fedf07d38890573b2862badf7
389ds/389-ds-base
Ticket 48681 - Use of uninitialized value in string ne at /usr/bin/logconv.pl line 2565, <$LOGFH> line 4 Bug description: The original fix for 48681 added a regression in regards to perl warning everytime you ran the script. That was due to a new hash for sasl binds that was not initialized. Fix Description: Check is the saslbind hash "exists" before checking its value. https://pagure.io/389-ds-base/issue/48681 Reviewed by: mreynolds (one line fix)
commit e46749b77d95ad8fedf07d38890573b2862badf7 Author: Mark Reynolds <[email protected]> Date: Thu Oct 19 14:44:38 2017 -0400 Ticket 48681 - Use of uninitialized value in string ne at /usr/bin/logconv.pl line 2565, <$LOGFH> line 4 Bug description: The original fix for 48681 added a regression in regards to perl warning everytime you ran the script. That was due to a new hash for sasl binds that was not initialized. Fix Description: Check is the saslbind hash "exists" before checking its value. https://pagure.io/389-ds-base/issue/48681 Reviewed by: mreynolds (one line fix) diff --git a/ldap/admin/src/logconv.pl b/ldap/admin/src/logconv.pl index 4932db42e..473c71f21 100755 --- a/ldap/admin/src/logconv.pl +++ b/ldap/admin/src/logconv.pl @@ -2562,7 +2562,7 @@ sub parseLineNormal if ($_ =~ /conn= *([0-9A-Z]+) +op= *([0-9\-]+)/i){ $conn = $1; $op = $2; - if ($hashes->{saslconnop}->{$conn-$op} ne ""){ + if (exists $hashes->{saslconnop}->{$conn-$op} && $hashes->{saslconnop}->{$conn-$op} ne ""){ # This was a SASL BIND - record the dn if ($binddn ne ""){ if($binddn eq $rootDN){ $rootDNBindCount++; }
0
40ea59e4c2e81ce665d8c09f0104be1378de901d
389ds/389-ds-base
Resolves: 212671 Summary: Handle syncing multi-valued street attribute to AD.
commit 40ea59e4c2e81ce665d8c09f0104be1378de901d Author: Nathan Kinder <[email protected]> Date: Thu Aug 30 15:56:36 2007 +0000 Resolves: 212671 Summary: Handle syncing multi-valued street attribute to AD. diff --git a/ldap/servers/plugins/replication/windows_protocol_util.c b/ldap/servers/plugins/replication/windows_protocol_util.c index e1b3d669d..573ee587d 100644 --- a/ldap/servers/plugins/replication/windows_protocol_util.c +++ b/ldap/servers/plugins/replication/windows_protocol_util.c @@ -1363,7 +1363,41 @@ windows_create_remote_entry(Private_Repl_Protocol *prp,Slapi_Entry *original_ent } } else { - slapi_entry_add_valueset(new_entry,new_type,vs); + Slapi_Attr *new_attr = NULL; + + /* AD treats streetAddress as a single-valued attribute, while we define it + * as a multi-valued attribute as it's defined in rfc 4519. We only + * sync the first value to AD to avoid a constraint violation. + */ + if (0 == slapi_attr_type_cmp(new_type, "streetAddress", SLAPI_TYPE_CMP_SUBTYPE)) { + if (slapi_valueset_count(vs) > 1) { + int i = 0; + const char *street_value = NULL; + Slapi_Value *value = NULL; + Slapi_Value *new_value = NULL; + + i = slapi_valueset_first_value(vs,&value); + if (i >= 0) { + /* Dup the first value, trash the valueset, then copy + * in the dup'd value. */ + new_value = slapi_value_dup(value); + slapi_valueset_done(vs); + /* The below hands off the memory to the valueset */ + slapi_valueset_add_value_ext(vs, new_value, SLAPI_VALUE_FLAG_PASSIN); + } + } + } + + slapi_entry_add_valueset(new_entry,type,vs); + + /* Reset the type to new_type here. This is needed since + * slapi_entry_add_valueset will create the Slapi_Attrs using + * the schema definition, which can reset the type to something + * other than the type you pass into it. To be safe, we just + * create the attributes with the old type, then reset them. */ + if (slapi_entry_attr_find(new_entry, type, &new_attr) == 0) { + slapi_attr_set_type(new_attr, new_type); + } } slapi_ch_free((void**)&new_type); } @@ -1574,6 +1608,29 @@ windows_map_mods_for_replay(Private_Repl_Protocol *prp,LDAPMod **original_mods, slapi_valueset_free(vs); } else { + /* AD treats streetAddress as a single-valued attribute, while we define it + * as a multi-valued attribute as it's defined in rfc 4519. We only + * sync the first value to AD to avoid a constraint violation. + */ + if (0 == slapi_attr_type_cmp(mapped_type, "streetAddress", SLAPI_TYPE_CMP_SUBTYPE)) { + Slapi_Mod smod; + struct berval *new_bval = NULL; + + slapi_mod_init_byref(&smod,mod); + + /* Check if there is more than one value */ + if (slapi_mod_get_num_values(&smod) > 1) { + new_bval = slapi_mod_get_first_value(&smod); + /* Remove all values except for the first */ + while (slapi_mod_get_next_value(&smod)) { + /* This modifies the bvalues in the mod itself */ + slapi_mod_remove_value(&smod); + } + } + + slapi_mod_done(&smod); + } + slapi_mods_add_modbvps(&mapped_smods,mod->mod_op,mapped_type,mod->mod_bvalues); } slapi_ch_free((void**)&mapped_type); @@ -1668,6 +1725,28 @@ attr_compare_equal(Slapi_Attr *a, Slapi_Attr *b, int n) return match; } +/* Returns non-zero if all of the values of attribute a are contained in attribute b. */ +static int +attr_compare_present(Slapi_Attr *a, Slapi_Attr *b) +{ + int match = 1; + int i = 0; + Slapi_Value *va = NULL; + + /* Iterate through values in attr a and search for each in attr b */ + for (i = slapi_attr_first_value(a, &va); va && (i != -1); + i = slapi_attr_next_value(a, i, &va)) { + if (slapi_attr_value_find(b, slapi_value_get_berval(va)) != 0) { + /* This value wasn't found, so stop checking for values */ + match = 0; + break; + } + } + + return match; +} + + /* Helper functions for dirsync result processing */ /* Is this entry a tombstone ? */ @@ -2669,8 +2748,11 @@ windows_generate_update_mods(Private_Repl_Protocol *prp,Slapi_Entry *remote_entr /* AD has a legth contraint on the initials attribute, * so treat is as a special case. */ - if (0 == slapi_attr_type_cmp(type,"initials",SLAPI_TYPE_CMP_SUBTYPE) && !to_windows) { + if (0 == slapi_attr_type_cmp(type, "initials", SLAPI_TYPE_CMP_SUBTYPE) && !to_windows) { values_equal = attr_compare_equal(attr, local_attr, AD_INITIALS_LENGTH); + } else if (0 == slapi_attr_type_cmp(type, FAKE_STREET_ATTR_NAME, SLAPI_TYPE_CMP_SUBTYPE) && !to_windows) { + /* Need to check if attr is present in local_attr */ + values_equal = attr_compare_present(attr, local_attr); } else { /* Compare the entire attribute values */ values_equal = attr_compare_equal(attr, local_attr, 0); diff --git a/ldap/servers/slapd/attr.c b/ldap/servers/slapd/attr.c index 7cd5a2f58..e6b32d42e 100644 --- a/ldap/servers/slapd/attr.c +++ b/ldap/servers/slapd/attr.c @@ -699,7 +699,20 @@ slapi_attr_add_value(Slapi_Attr *a, const Slapi_Value *v) return 0; } -/* Make the valuset in SLapi_Attr be *vs--not a copy */ +int +slapi_attr_set_type(Slapi_Attr *a, const char *type) +{ + int rc = 0; + + if((NULL == a) || (NULL == type)) { + rc = -1; + } else { + a->a_type = slapi_ch_strdup(type); + } + return rc; +} + +/* Make the valuset in Slapi_Attr be *vs--not a copy */ int slapi_attr_set_valueset(Slapi_Attr *a, const Slapi_ValueSet *vs) { diff --git a/ldap/servers/slapd/slapi-plugin.h b/ldap/servers/slapd/slapi-plugin.h index c624b38c7..30f581b6c 100644 --- a/ldap/servers/slapd/slapi-plugin.h +++ b/ldap/servers/slapd/slapi-plugin.h @@ -418,6 +418,7 @@ int slapi_attr_get_numvalues( const Slapi_Attr *a, int *numValues); int slapi_attr_get_valueset(const Slapi_Attr *a, Slapi_ValueSet **vs); /* Make the valuset in Slapi_Attr be *vs--not a copy */ int slapi_attr_set_valueset(Slapi_Attr *a, const Slapi_ValueSet *vs); +int slapi_attr_set_type(Slapi_Attr *a, const char *type); int slapi_attr_get_bervals_copy( Slapi_Attr *a, struct berval ***vals ); char * slapi_attr_syntax_normalize( const char *s ); void slapi_valueset_set_valueset(Slapi_ValueSet *vs1, const Slapi_ValueSet *vs2);
0
2472169b3e077ff379812f9e91f439ce1e4edffb
389ds/389-ds-base
Bug 693962 - Full replica push loses some entries with multi-valued RDNs https://bugzilla.redhat.com/show_bug.cgi?id=693962 Resolves: bug 693962 Bug Description: Full replica push loses some entries with multi-valued RDNs Reviewed by: nhosoi (Thanks!) Branch: master Fix Description: The code in _entryrdn_insert_key was assuming the srdn passed in was already normalized. This is not true in some cases where the data is coming from a source of old data such as replication with an older server. The solution is to make sure the rdn code always normalizes the code using slapi_dn_normalize_case_ext() instead of slapi_dn_normalize_case() which now doesn't do any normalization, it just converts the given string to lower case. I added a function normalize_case_helper() to return a normalized dn as a copy or in place depending on the arguments. Tested with gdb - stepped through and verified the char arrays are correctly replaced, and copy values are correctly assigned. Used valgrind with online import to verify no leaks or errors. Exported a netscaperoot ldif from an older 1.2.8 server and imported with the new code. Verified that the dbscan -f entryrdn.db4 output was identical between the two. Platforms tested: RHEL6 x86_64 Flag Day: no Doc impact: no
commit 2472169b3e077ff379812f9e91f439ce1e4edffb Author: Rich Megginson <[email protected]> Date: Fri Apr 8 11:55:27 2011 -0600 Bug 693962 - Full replica push loses some entries with multi-valued RDNs https://bugzilla.redhat.com/show_bug.cgi?id=693962 Resolves: bug 693962 Bug Description: Full replica push loses some entries with multi-valued RDNs Reviewed by: nhosoi (Thanks!) Branch: master Fix Description: The code in _entryrdn_insert_key was assuming the srdn passed in was already normalized. This is not true in some cases where the data is coming from a source of old data such as replication with an older server. The solution is to make sure the rdn code always normalizes the code using slapi_dn_normalize_case_ext() instead of slapi_dn_normalize_case() which now doesn't do any normalization, it just converts the given string to lower case. I added a function normalize_case_helper() to return a normalized dn as a copy or in place depending on the arguments. Tested with gdb - stepped through and verified the char arrays are correctly replaced, and copy values are correctly assigned. Used valgrind with online import to verify no leaks or errors. Exported a netscaperoot ldif from an older 1.2.8 server and imported with the new code. Verified that the dbscan -f entryrdn.db4 output was identical between the two. Platforms tested: RHEL6 x86_64 Flag Day: no Doc impact: no diff --git a/ldap/servers/slapd/rdn.c b/ldap/servers/slapd/rdn.c index e8b79152d..758c16b72 100644 --- a/ldap/servers/slapd/rdn.c +++ b/ldap/servers/slapd/rdn.c @@ -614,6 +614,52 @@ slapi_rdn_get_rdn(const Slapi_RDN *srdn) return srdn->rdn; } +/* + * if src is set, make a copy and return in inplace + * if *inplace is set, try to use that in place, or + * free it and set to a new value + */ +static void +normalize_case_helper(const char *copy, char **inplace) +{ + int rc; + char **newdnaddr = NULL; + char *newdn = NULL; + char *dest = NULL; + size_t dest_len = 0; + + if (!inplace) { /* no place to put result */ + return; + } + + if (!copy && !*inplace) { /* no string to operate on */ + return; + } + + if (copy) { + newdn = slapi_ch_strdup(copy); + newdnaddr = &newdn; + } else { + newdnaddr = inplace; + } + + rc = slapi_dn_normalize_case_ext(*newdnaddr, 0, &dest, &dest_len); + if (rc < 0) { + /* we give up, just case normalize in place */ + slapi_dn_ignore_case(*newdnaddr); /* ignore case */ + } else if (rc == 0) { + /* dest points to *newdnaddr - normalized in place */ + *(dest + dest_len) = '\0'; + } else { + /* dest is a new string */ + slapi_ch_free_string(newdnaddr); + *newdnaddr = dest; + } + + *inplace = *newdnaddr; + return; +} + /* srdn is updated in the function, it cannot be const */ const char * slapi_rdn_get_nrdn(Slapi_RDN *srdn) @@ -624,8 +670,7 @@ slapi_rdn_get_nrdn(Slapi_RDN *srdn) } if (NULL == srdn->nrdn) { - srdn->nrdn = slapi_ch_strdup(srdn->rdn); - slapi_dn_normalize_case(srdn->nrdn); + normalize_case_helper(srdn->rdn, &srdn->nrdn); } return (const char *)srdn->nrdn; } @@ -668,7 +713,7 @@ slapi_rdn_get_first_ext(Slapi_RDN *srdn, const char **firstrdn, int flag) srdn->all_nrdns = charray_dup(srdn->all_rdns); for (ptr = srdn->all_nrdns; ptr && *ptr; ptr++) { - slapi_dn_normalize_case(*ptr); + normalize_case_helper(NULL, ptr); } } ptr = srdn->all_nrdns; @@ -720,7 +765,7 @@ slapi_rdn_get_last_ext(Slapi_RDN *srdn, const char **lastrdn, int flag) srdn->all_nrdns = charray_dup(srdn->all_rdns); for (ptr = srdn->all_nrdns; ptr && *ptr; ptr++) { - slapi_dn_normalize_case(*ptr); + normalize_case_helper(NULL, ptr); } } ptr = srdn->all_nrdns; @@ -946,7 +991,6 @@ slapi_rdn_replace_rdn(Slapi_RDN *srdn, char *new_rdn) slapi_ch_free_string(&(srdn->nrdn)); srdn->rdn = slapi_ch_strdup(new_rdn); srdn->nrdn = slapi_ch_strdup(srdn->rdn); - slapi_dn_normalize_case(srdn->nrdn); if (srdn->all_rdns) { @@ -985,7 +1029,7 @@ slapi_rdn_partial_dup(Slapi_RDN *from, Slapi_RDN **to, int rdnidx) from->all_nrdns = charray_dup(from->all_rdns); for (ptr = from->all_nrdns; ptr && *ptr; ptr++) { - slapi_dn_normalize_case(*ptr); + normalize_case_helper(NULL, ptr); } }
0
b28d8b4b5bc4496b25ad11121d30dfcf01b1ce63
389ds/389-ds-base
Ticket 47671 - CI lib389: allow to open a DirSrv without having to create the instance Bug Description: DSAdmin allowed to bind to an already existing instance without creation of the instance. Allocate() and open() require that a server-id is provided during Allocate. This prevents to allocate a DirSrv? to bind to an existing instance. Fix Description: Change Allocate so that SER_SERVERID_PROP is not mandatory. Change Open so that if self.serverid is not defined it retrieves it from the instance (self.inst) https://fedorahosted.org/389/ticket/47671 Reviewed by: Rich Megginson Platforms tested: F17/F19(jenkins) Flag Day: no Doc impact: no
commit b28d8b4b5bc4496b25ad11121d30dfcf01b1ce63 Author: Thierry bordaz (tbordaz) <[email protected]> Date: Mon Jan 13 17:10:44 2014 +0100 Ticket 47671 - CI lib389: allow to open a DirSrv without having to create the instance Bug Description: DSAdmin allowed to bind to an already existing instance without creation of the instance. Allocate() and open() require that a server-id is provided during Allocate. This prevents to allocate a DirSrv? to bind to an existing instance. Fix Description: Change Allocate so that SER_SERVERID_PROP is not mandatory. Change Open so that if self.serverid is not defined it retrieves it from the instance (self.inst) https://fedorahosted.org/389/ticket/47671 Reviewed by: Rich Megginson Platforms tested: F17/F19(jenkins) Flag Day: no Doc impact: no diff --git a/src/lib389/lib389/__init__.py b/src/lib389/lib389/__init__.py index 6e2b35786..8db6aab9e 100644 --- a/src/lib389/lib389/__init__.py +++ b/src/lib389/lib389/__init__.py @@ -234,6 +234,13 @@ class DirSrv(SimpleLDAPObject): self.sroot, self.inst = match.groups() else: self.sroot = self.inst = '' + # In case DirSrv was allocated without creating the instance + # serverid is not set. Set it now from the config + if hasattr(self, 'serverid') and self.serverid: + assert self.serverid == self.inst + else: + self.serverid = self.inst + ent = self.getEntry('cn=config,' + DN_LDBM, attrlist=['nsslapd-directory']) self.dbdir = os.path.dirname(ent.getValue('nsslapd-directory')) @@ -355,7 +362,7 @@ class DirSrv(SimpleLDAPObject): The final state -> DIRSRV_STATE_ALLOCATED @param args - dictionary that contains the DirSrv properties properties are - SER_SERVERID_PROP: mandatory server id of the instance -> slapd-<serverid> + SER_SERVERID_PROP: used for offline op (create/delete/backup/start/stop..) -> slapd-<serverid> SER_HOST: hostname [LOCALHOST] SER_PORT: normal ldap port [DEFAULT_PORT] SER_SECURE_PORT: secure ldap port @@ -374,7 +381,7 @@ class DirSrv(SimpleLDAPObject): raise ValueError("invalid state for calling allocate: %s" % self.state) if SER_SERVERID_PROP not in args: - raise ValueError("%s is a mandatory parameter" % SER_SERVERID_PROP) + self.log.info('SER_SERVERID_PROP not provided') # Settings from args of server attributes self.host = args.get(SER_HOST, LOCALHOST) @@ -393,7 +400,7 @@ class DirSrv(SimpleLDAPObject): # Settings from args of server attributes - self.serverid = args.get(SER_SERVERID_PROP) + self.serverid = args.get(SER_SERVERID_PROP, None) self.groupid = args.get(SER_GROUP_ID, self.userid) self.backupdir = args.get(SER_BACKUP_INST_DIR, DEFAULT_BACKUPDIR) self.prefix = args.get(SER_DEPLOYED_DIR, None) @@ -657,7 +664,8 @@ class DirSrv(SimpleLDAPObject): @return - None - @raise ValueError - if it exist an instance with the same 'serverid' + @raise ValueError - if 'serverid' is missing or if it exist an + instance with the same 'serverid' """ # check that DirSrv was in DIRSRV_STATE_ALLOCATED state if self.state != DIRSRV_STATE_ALLOCATED: @@ -668,6 +676,9 @@ class DirSrv(SimpleLDAPObject): if len(props) != 0: raise ValueError("Error it already exists the instance (%s)" % props[0][CONF_INST_DIR]) + if not self.serverid: + raise ValueError("SER_SERVERID_PROP is missing, it is required to create an instance") + # Time to create the instance and retrieve the effective sroot self._createDirsrv(verbose=self.verbose) @@ -721,11 +732,8 @@ class DirSrv(SimpleLDAPObject): @return None - @raise ValueError - if the instance has not the right state or can not find the binddn to bind + @raise ValueError - if can not find the binddn to bind ''' - # check that DirSrv was in DIRSRV_STATE_OFFLINE or DIRSRV_STATE_ONLINE state - if self.state != DIRSRV_STATE_OFFLINE and self.state != DIRSRV_STATE_ONLINE: - raise ValueError("invalid state for calling open: %s" % self.state) uri = self.toLDAPURL() diff --git a/src/lib389/tests/dirsrv_test.py b/src/lib389/tests/dirsrv_test.py index 4650a4667..5a2fbb79d 100644 --- a/src/lib389/tests/dirsrv_test.py +++ b/src/lib389/tests/dirsrv_test.py @@ -62,24 +62,12 @@ class Test_dirsrv(): instance.log.debug("Instance allocated") assert instance.state == DIRSRV_STATE_INIT - # Check that SER_SERVERID_PROP is a mandatory parameter + # Allocate the instance args = {SER_HOST: LOCALHOST, SER_PORT: INSTANCE_PORT, - SER_DEPLOYED_DIR: INSTANCE_PREFIX + SER_DEPLOYED_DIR: INSTANCE_PREFIX, + SER_SERVERID_PROP: INSTANCE_SERVERID } - try: - instance.allocate(args) - except Exception as e: - instance.log.info('Allocate fails (normal): %s' % e.args) - assert type(e) == ValueError - assert e.args[0].find("%s is a mandatory parameter" % SER_SERVERID_PROP) >= 0 - pass - - # Check the state - assert instance.state == DIRSRV_STATE_INIT - - # Now do a successful allocate - args[SER_SERVERID_PROP] = INSTANCE_SERVERID instance.allocate(args) userid = pwd.getpwuid( os.getuid() )[ 0 ] @@ -162,8 +150,45 @@ class Test_dirsrv(): def test_offline_to_allocated(self): self.instance.delete() + + def test_allocated_to_online(self, verbose): + # Here the instance was already create, check we can connect to it + # without creating it (especially without serverid value) + # Allocate the instance + args = {SER_HOST: LOCALHOST, + SER_PORT: INSTANCE_PORT, + SER_DEPLOYED_DIR: INSTANCE_PREFIX, + SER_SERVERID_PROP: INSTANCE_SERVERID + } + self.instance.log.info("test_allocated_to_online: Create an instance") + self.instance = DirSrv(verbose=verbose) + assert not hasattr(self, 'serverid') + self.instance.allocate(args) + self.instance.create() + self.instance.open() + assert self.instance.serverid != None + + # The instance is create, allocate a new DirSrv + self.instance.log.info("test_allocated_to_online: instance New") + self.instance = DirSrv(verbose=verbose) + assert not hasattr(self, 'serverid') + assert self.instance.state == DIRSRV_STATE_INIT + + args = {SER_HOST: LOCALHOST, + SER_PORT: INSTANCE_PORT, + SER_DEPLOYED_DIR: INSTANCE_PREFIX, + } + self.instance.allocate(args) + self.instance.log.info("test_allocated_to_online: instance Allocated") + assert self.instance.serverid == None + assert self.instance.state == DIRSRV_STATE_ALLOCATED + + self.instance.open() + self.instance.log.info("test_allocated_to_online: instance online") + assert self.instance.serverid != None + assert self.instance.serverid == self.instance.inst + assert self.instance.state == DIRSRV_STATE_ONLINE - if __name__ == "__main__": #import sys;sys.argv = ['', 'Test.testName'] test = Test_dirsrv() @@ -201,6 +226,8 @@ if __name__ == "__main__": test.test_offline_to_allocated() + test.test_allocated_to_online(verbose=False) + test.tearDown()
0
d594e010b125b427bf7afc666f620abafdbaa461
389ds/389-ds-base
Fix logic errors: del_mod should be latched (might not be last mod), and avoid skipping add-mods (int value 0) Reviewed by: rmeggins
commit d594e010b125b427bf7afc666f620abafdbaa461 Author: Ken Rossato <[email protected]> Date: Mon Aug 27 17:29:45 2012 -0400 Fix logic errors: del_mod should be latched (might not be last mod), and avoid skipping add-mods (int value 0) Reviewed by: rmeggins diff --git a/ldap/servers/plugins/posix-winsync/posix-group-func.c b/ldap/servers/plugins/posix-winsync/posix-group-func.c index 6a819e610..1403a899b 100644 --- a/ldap/servers/plugins/posix-winsync/posix-group-func.c +++ b/ldap/servers/plugins/posix-winsync/posix-group-func.c @@ -230,7 +230,7 @@ modGroupMembership(Slapi_Entry *entry, Slapi_Mods *smods, int *do_modify) Slapi_Mod *smod = NULL; Slapi_Mod *nextMod = slapi_mod_new(); - int del_mod = 0; + int del_mod = 0; /* Bool: was there a delete mod? */ char **smod_adduids = NULL; char **smod_deluids = NULL; @@ -243,13 +243,13 @@ modGroupMembership(Slapi_Entry *entry, Slapi_Mods *smods, int *do_modify) if (slapi_attr_types_equivalent(slapi_mod_get_type(smod), "uniqueMember")) { struct berval *bv; - del_mod = slapi_mod_get_operation(smod); for (bv = slapi_mod_get_first_value(smod); bv; bv = slapi_mod_get_next_value(smod)) { Slapi_Value *sv = slapi_value_new(); slapi_value_init_berval(sv, bv); /* copies bv_val */ if (SLAPI_IS_MOD_DELETE(slapi_mod_get_operation(smod))) { + del_mod = 1; slapi_ch_array_add(&smod_deluids, slapi_ch_strdup(slapi_value_get_string(sv))); slapi_log_error(SLAPI_LOG_PLUGIN, POSIX_WINSYNC_PLUGIN_NAME, @@ -267,11 +267,6 @@ modGroupMembership(Slapi_Entry *entry, Slapi_Mods *smods, int *do_modify) } } slapi_mod_free(&nextMod); - if (!del_mod) { - slapi_log_error(SLAPI_LOG_PLUGIN, POSIX_WINSYNC_PLUGIN_NAME, - "modGroupMembership: no uniquemember mod, nothing to do<==\n"); - return 0; - } slapi_log_error(SLAPI_LOG_PLUGIN, POSIX_WINSYNC_PLUGIN_NAME, "modGroupMembership: entry is posixGroup\n"); @@ -285,7 +280,7 @@ modGroupMembership(Slapi_Entry *entry, Slapi_Mods *smods, int *do_modify) int doModify = false; int j = 0; - if (SLAPI_IS_MOD_DELETE(del_mod) || smod_deluids != NULL) { + if (del_mod || smod_deluids != NULL) { do { /* Create a context to "break" from */ Slapi_Attr * mu_attr = NULL; /* Entry attributes */ rc = slapi_entry_attr_find(entry, "memberUid", &mu_attr);
0
ac4309050fed643e962b21faa2cd76f6acd4fcf8
389ds/389-ds-base
Ticket 47878 - Improve setup-ds update logging Bug Description: If updating several instances using the --force option, and a particular instance fails to update, the script reports total failure even though the other instances were successfully updated. Fix Description: Improved the logging to say that the update was successful, and also clearly log the instances that were not updated. Also removed an unnecessary warning from 52updateAESplugin.pl https://fedorahosted.org/389/ticket/47878 Reviewed by: nhosoi(Thanks!)
commit ac4309050fed643e962b21faa2cd76f6acd4fcf8 Author: Mark Reynolds <[email protected]> Date: Fri Jun 19 16:22:46 2015 -0400 Ticket 47878 - Improve setup-ds update logging Bug Description: If updating several instances using the --force option, and a particular instance fails to update, the script reports total failure even though the other instances were successfully updated. Fix Description: Improved the logging to say that the update was successful, and also clearly log the instances that were not updated. Also removed an unnecessary warning from 52updateAESplugin.pl https://fedorahosted.org/389/ticket/47878 Reviewed by: nhosoi(Thanks!) diff --git a/ldap/admin/src/scripts/52updateAESplugin.pl b/ldap/admin/src/scripts/52updateAESplugin.pl index 6a8a885d5..ae258b637 100644 --- a/ldap/admin/src/scripts/52updateAESplugin.pl +++ b/ldap/admin/src/scripts/52updateAESplugin.pl @@ -6,6 +6,8 @@ use File::Basename; use File::Copy; use DSUtil qw(debug serverIsRunning); +no warnings 'experimental::smartmatch'; + # # Check if there is a DES plugin and make sure the AES plugin contains the same attributes # @@ -53,7 +55,7 @@ sub runinst { if($val eq ""){ last; } - if(!($val ~~ @attrs) ){ + if(!($val ~~ @attrs) ){ # smartmatch $attrs_to_add[$des_count] = $val; $des_count++; } diff --git a/ldap/admin/src/scripts/DSUpdate.pm.in b/ldap/admin/src/scripts/DSUpdate.pm.in index 208edfec7..6660f5cd9 100644 --- a/ldap/admin/src/scripts/DSUpdate.pm.in +++ b/ldap/admin/src/scripts/DSUpdate.pm.in @@ -270,17 +270,36 @@ sub updateDS { } # update each instance - for my $inst ($setup->getDirServers()) { + my @instances = $setup->getDirServers(); + my $inst_count = @instances; + my @failed_instances = (); + my $failed_count = 0; + for my $inst (@instances) { + debug(0, "Updating instance ($inst)...\n"); my @localerrs = updateDSInstance($inst, $inf, $setup->{configdir}, \@updates, $force); if (@localerrs) { # push array here because localerrs will likely be an array of # array refs already - push @errs, @localerrs; - if (!$force) { + $failed_count++; + if (!$force || $inst_count == 1) { + push @errs, @localerrs; return @errs; } + push @failed_instances, $inst; + debug(0, "Failed to update instance ($inst):\n---> @localerrs\n"); + } else { + debug(0, "Successfully updated instance ($inst).\n"); } } + if($failed_count && $failed_count == $inst_count){ + push @errs, ('error_update_all'); + return @errs; + } + if (@failed_instances){ + # list all the instances that were not updated + debug(0, "The following instances were not updated: (@failed_instances). "); + debug(0, "After fixing the problems you will need to rerun the setup script\n"); + } # run post-update hooks for my $upd (@updates) { @@ -344,7 +363,7 @@ sub updateDSInstance { $conn = new Mozilla::LDAP::Conn({ host => $host, port => $port, bind => $binddn, pswd => $bindpw, cert => $certdir, starttls => 1 }); if (!$conn) { - debug(0, "Could not open TLS connection to $host:$port - trying regular connection\n"); + debug(1, "Could not open TLS connection to $host:$port - trying regular connection\n"); $conn = new Mozilla::LDAP::Conn({ host => $host, port => $port, bind => $binddn, pswd => $bindpw }); } diff --git a/ldap/admin/src/scripts/setup-ds.res.in b/ldap/admin/src/scripts/setup-ds.res.in index f8157af2a..011bf3678 100644 --- a/ldap/admin/src/scripts/setup-ds.res.in +++ b/ldap/admin/src/scripts/setup-ds.res.in @@ -206,3 +206,4 @@ error_running_command = Error: command '%s' failed - output [%s] error [%s] error_opening_file = Opening file '%s' failed. Error: %s\n error_format_error = '%s' has invalid format.\n error_update_not_offline = Error: offline mode selected but the server [%s] is still running.\n +error_update_all = Failed to update all the Directory Server instances.\n
0
ee25b881225f788fc35d8ebdfa126e797960760c
389ds/389-ds-base
Ticket 49387 - pbkdf2 settings were too aggressive Bug Description: Our initial settings were too aggresive and caused some cpu latency issues. We should tone these down a bit, and then step them up slower. Fix Description: Decrease the test rounds at start up, lower the minimum to 2048, and decrease the time factor to 4 ms rather than 40. Cleanup to int types. https://pagure.io/389-ds-base/issue/49387 Author: wibrown Review by: mreynolds (Thanks mate!)
commit ee25b881225f788fc35d8ebdfa126e797960760c Author: William Brown <[email protected]> Date: Fri Sep 29 12:34:49 2017 +1000 Ticket 49387 - pbkdf2 settings were too aggressive Bug Description: Our initial settings were too aggresive and caused some cpu latency issues. We should tone these down a bit, and then step them up slower. Fix Description: Decrease the test rounds at start up, lower the minimum to 2048, and decrease the time factor to 4 ms rather than 40. Cleanup to int types. https://pagure.io/389-ds-base/issue/49387 Author: wibrown Review by: mreynolds (Thanks mate!) diff --git a/ldap/servers/plugins/pwdstorage/pbkdf2_pwd.c b/ldap/servers/plugins/pwdstorage/pbkdf2_pwd.c index e9b584e4b..d310dc792 100644 --- a/ldap/servers/plugins/pwdstorage/pbkdf2_pwd.c +++ b/ldap/servers/plugins/pwdstorage/pbkdf2_pwd.c @@ -43,24 +43,35 @@ * * At the same time we MUST increase this with each version of Directory Server * This value is written into the hash, so it's safe to change. + * + * So let's assume that we have 72 threads, and we want to process say ... 10,000 binds per + * second. At 72 threads, that's 138 ops per second per thread. This means each op have to take + * 7.2 milliseconds to complete. We know binds really are quicker, but for now, lets say this can + * be 2 milliseconds to time for. */ -#define PBKDF2_MILLISECONDS 40 +#define PBKDF2_MILLISECONDS 2 + +/* + * We would like to raise this, but today due to NSS issues we have to be conservative. Regardless + * it's still better than ssha512. + */ +#define PBKDF2_MINIMUM 2048 -static PRUint32 PBKDF2_ITERATIONS = 30000; +static uint32_t PBKDF2_ITERATIONS = 8192; static const char *schemeName = PBKDF2_SHA256_SCHEME_NAME; -static const PRUint32 schemeNameLength = PBKDF2_SHA256_NAME_LEN; +static const uint32_t schemeNameLength = PBKDF2_SHA256_NAME_LEN; /* For requesting the slot which supports these types */ static CK_MECHANISM_TYPE mechanism_array[] = {CKM_SHA256_HMAC, CKM_PKCS5_PBKD2}; /* Used in our startup benching code */ -#define PBKDF2_BENCH_ROUNDS 50000 -#define PBKDF2_BENCH_LOOP 10 +#define PBKDF2_BENCH_ROUNDS 25000 +#define PBKDF2_BENCH_LOOP 8 void -pbkdf2_sha256_extract(char *hash_in, SECItem *salt, PRUint32 *iterations) +pbkdf2_sha256_extract(char *hash_in, SECItem *salt, uint32_t *iterations) { /* * This will take the input of hash_in (generated from pbkdf2_sha256_hash) and @@ -78,7 +89,7 @@ pbkdf2_sha256_extract(char *hash_in, SECItem *salt, PRUint32 *iterations) } SECStatus -pbkdf2_sha256_hash(char *hash_out, size_t hash_out_len, SECItem *pwd, SECItem *salt, PRUint32 iterations) +pbkdf2_sha256_hash(char *hash_out, size_t hash_out_len, SECItem *pwd, SECItem *salt, uint32_t iterations) { SECItem *result = NULL; SECAlgorithmID *algid = NULL; @@ -96,7 +107,7 @@ pbkdf2_sha256_hash(char *hash_out, size_t hash_out_len, SECItem *pwd, SECItem *s PK11_FreeSlot(slot); if (symkey == NULL) { /* We try to get the Error here but NSS has two or more error interfaces, and sometimes it uses none of them. */ - PRInt32 status = PORT_GetError(); + int32_t status = PORT_GetError(); slapi_log_err(SLAPI_LOG_ERR, (char *)schemeName, "Unable to retrieve symkey from NSS. Error code might be %d ???\n", status); slapi_log_err(SLAPI_LOG_ERR, (char *)schemeName, "The most likely cause is your system has nss 3.21 or lower. PBKDF2 requires nss 3.22 or higher.\n"); return SECFailure; @@ -131,7 +142,7 @@ pbkdf2_sha256_hash(char *hash_out, size_t hash_out_len, SECItem *pwd, SECItem *s } char * -pbkdf2_sha256_pw_enc_rounds(const char *pwd, PRUint32 iterations) +pbkdf2_sha256_pw_enc_rounds(const char *pwd, uint32_t iterations) { char hash[PBKDF2_TOTAL_LENGTH]; size_t encsize = 3 + schemeNameLength + LDIF_BASE64_LEN(PBKDF2_TOTAL_LENGTH); @@ -186,16 +197,16 @@ pbkdf2_sha256_pw_enc(const char *pwd) return pbkdf2_sha256_pw_enc_rounds(pwd, PBKDF2_ITERATIONS); } -PRInt32 +int32_t pbkdf2_sha256_pw_cmp(const char *userpwd, const char *dbpwd) { - PRInt32 result = 1; /* Default to fail. */ + int32_t result = 1; /* Default to fail. */ char dbhash[PBKDF2_TOTAL_LENGTH] = {0}; char userhash[PBKDF2_HASH_LENGTH] = {0}; - PRUint32 dbpwd_len = strlen(dbpwd); + int32_t dbpwd_len = strlen(dbpwd); SECItem saltItem; SECItem passItem; - PRUint32 iterations = 0; + uint32_t iterations = 0; slapi_log_err(SLAPI_LOG_PLUGIN, (char *)schemeName, "Comparing password\n"); @@ -259,7 +270,7 @@ pbkdf2_sha256_benchmark_iterations() return time_nsec; } -PRUint32 +uint32_t pbkdf2_sha256_calculate_iterations(uint64_t time_nsec) { /* @@ -286,10 +297,10 @@ pbkdf2_sha256_calculate_iterations(uint64_t time_nsec) /* * Finally, we make the rounds in terms of thousands, and cast it. */ - PRUint32 final_rounds = thou_rounds * 1000; + uint32_t final_rounds = thou_rounds * 1000; - if (final_rounds < 10000) { - final_rounds = 10000; + if (final_rounds < PBKDF2_MINIMUM) { + final_rounds = PBKDF2_MINIMUM; } return final_rounds; @@ -305,7 +316,7 @@ pbkdf2_sha256_start(Slapi_PBlock *pb __attribute__((unused))) /* set it globally */ PBKDF2_ITERATIONS = pbkdf2_sha256_calculate_iterations(time_nsec); /* Make a note of it. */ - slapi_log_err(SLAPI_LOG_PLUGIN, (char *)schemeName, "Based on CPU performance, chose %" PRIu32 " rounds\n", PBKDF2_ITERATIONS); + slapi_log_err(SLAPI_LOG_INFO, (char *)schemeName, "Based on CPU performance, chose %" PRIu32 " rounds\n", PBKDF2_ITERATIONS); return 0; } diff --git a/test/plugins/pwdstorage/pbkdf2.c b/test/plugins/pwdstorage/pbkdf2.c index b94406c3a..16fc680e0 100644 --- a/test/plugins/pwdstorage/pbkdf2.c +++ b/test/plugins/pwdstorage/pbkdf2.c @@ -69,12 +69,12 @@ test_plugin_pwdstorage_pbkdf2_rounds(void **state __attribute__((unused))) /* * On a very slow system, we get the default min rounds out. */ - assert_true(pbkdf2_sha256_calculate_iterations(1000000000) == 10000); + assert_true(pbkdf2_sha256_calculate_iterations(10000000000) == 2048); /* * On a "fast" system, we should see more rounds. */ - assert_true(pbkdf2_sha256_calculate_iterations(200000000) == 10000); - assert_true(pbkdf2_sha256_calculate_iterations(100000000) == 20000); - assert_true(pbkdf2_sha256_calculate_iterations(50000000) == 40000); + assert_true(pbkdf2_sha256_calculate_iterations(800000000) == 2048); + assert_true(pbkdf2_sha256_calculate_iterations(5000000) == 10000); + assert_true(pbkdf2_sha256_calculate_iterations(2500000) == 20000); #endif }
0
56441c31d8709fdd5c9e45a0cfcaa1e9ec88532a
389ds/389-ds-base
Ticket 47394 - remove-ds.pl should remove /var/lock/dirsrv Bug Description: Once a DS instance has been created, /var/lock/dirsrv, /var/lib/dirsrv, and /var/run/dirsrv are owned by the user that was specified when the instance was created. Then remove this single instance, and create a new instance that runs as a different user. The install will fail. Fix Description: When removing the last instance on the machine, remove the /var/lock|run|lib/dirsrv/ directories. https://fedorahosted.org/389/ticket/47394 Reviewed by: richm(Thanks!)
commit 56441c31d8709fdd5c9e45a0cfcaa1e9ec88532a Author: Mark Reynolds <[email protected]> Date: Fri Aug 23 12:17:57 2013 -0400 Ticket 47394 - remove-ds.pl should remove /var/lock/dirsrv Bug Description: Once a DS instance has been created, /var/lock/dirsrv, /var/lib/dirsrv, and /var/run/dirsrv are owned by the user that was specified when the instance was created. Then remove this single instance, and create a new instance that runs as a different user. The install will fail. Fix Description: When removing the last instance on the machine, remove the /var/lock|run|lib/dirsrv/ directories. https://fedorahosted.org/389/ticket/47394 Reviewed by: richm(Thanks!) diff --git a/ldap/admin/src/scripts/DSCreate.pm.in b/ldap/admin/src/scripts/DSCreate.pm.in index cc00bb322..7835c4134 100644 --- a/ldap/admin/src/scripts/DSCreate.pm.in +++ b/ldap/admin/src/scripts/DSCreate.pm.in @@ -1274,7 +1274,10 @@ sub removeDSInstance { my @errs; my $initconfig = "$initconfig_dir/@package_name@-$inst"; - + my $pkglockdir = "@localstatedir@/lock/@package_name@"; + my $pkgrundir = "@localstatedir@/run/@package_name@"; + my $pkglibdir = "@localstatedir@/lib/@package_name@"; + # Get the configdir, rundir and product_name from the instance initconfig script. unless(open(INFILE, $initconfig)) { return ( [ 'error_no_such_instance', $instname, $! ] ); @@ -1454,7 +1457,14 @@ sub removeDSInstance { # update systemd files push @errs, updateSystemD(); - + + # remove /var/lock/dirsrv & /var/run/dirsrv if this was the last instance + if(!<$pkglockdir/*>){ + rmdir $pkglockdir; + rmdir $pkgrundir; + rmdir $pkglibdir; + } + # if we got here, report success if (@errs) { debug(1, "Could not successfully remove $instname\n");
0
904416f4631d842a105851b4a9931ae17822a107
389ds/389-ds-base
fix compiler warning
commit 904416f4631d842a105851b4a9931ae17822a107 Author: Ludwig Krispenz <[email protected]> Date: Tue Jul 30 11:29:47 2013 +0200 fix compiler warning diff --git a/ldap/servers/plugins/acl/aclutil.c b/ldap/servers/plugins/acl/aclutil.c index 2002276f0..4f81bc0c8 100644 --- a/ldap/servers/plugins/acl/aclutil.c +++ b/ldap/servers/plugins/acl/aclutil.c @@ -1400,7 +1400,6 @@ void acl_ht_add_and_freeOld(acl_ht_t * acl_ht, void acl_ht_remove_and_free(acl_ht_t * acl_ht, PLHashNumber key){ char *old_value = NULL; - uintptr_t pkey = (uintptr_t)key; if ( (old_value = (char *)acl_ht_lookup( acl_ht, key)) != NULL ) { acl_ht_remove( acl_ht, key);
0
99e636a0e44c874473e8e0f07e3dcf0dbeaa88f0
389ds/389-ds-base
use prefix instead of hardcoded ldapserver
commit 99e636a0e44c874473e8e0f07e3dcf0dbeaa88f0 Author: Rich Megginson <[email protected]> Date: Fri Mar 11 03:55:32 2005 +0000 use prefix instead of hardcoded ldapserver diff --git a/ldapserver.spec b/ldapserver.spec index b33412f36..b148dea41 100644 --- a/ldapserver.spec +++ b/ldapserver.spec @@ -1,7 +1,7 @@ Summary: Directory Server Name: ldapserver Version: 7.1 -Release: 1 +Release: 0 License: GPL Group: System Environment/Daemons URL: http://www.redhat.com @@ -13,7 +13,7 @@ BuildPreReq: perl, fileutils, make Autoreq: 0 # Without Requires: something, rpmbuild will abort! Requires: perl -Prefix: /opt/ldapserver +Prefix: /opt/%{name} %description ldapserver is an LDAPv3 compliant server. @@ -51,11 +51,11 @@ cp -r $NSJRE/lib bin/base/jre zip -q -r ../base/nsjre.zip bin cd .. rm -rf tmp -echo yes | ./setup -b $RPM_BUILD_ROOT/opt/ldapserver +echo yes | ./setup -b $RPM_BUILD_ROOT/%{prefix} # this is our setup script that sets up the initial # server instances after installation cd .. -cp ldap/cm/newinst/setup $RPM_BUILD_ROOT/opt/ldapserver/setup +cp ldap/cm/newinst/setup $RPM_BUILD_ROOT/%{prefix}/setup %clean rm -rf $RPM_BUILD_ROOT @@ -63,15 +63,18 @@ rm -rf $RPM_BUILD_ROOT %files # rather than listing individual files, we just package (and own) # the entire ldapserver directory - if we change this to put -# files in different places, we don't be able to do this anymore +# files in different places, we won't be able to do this anymore %defattr(-,root,root,-) -/opt/ldapserver +%{prefix} %post echo "" -echo "Please cd /opt/ldapserver and run ./setup/setup" +echo "Please cd " %{prefix} " and run ./setup/setup" %changelog +* Tue Mar 8 2005 Richard Megginson <[email protected]> 7.1-0 +- use ${prefix} instead of /opt/ldapserver - prefix is defined as /opt/%{name} + * Thu Jan 20 2005 Richard Megginson <[email protected]> - Initial build.
0
8e32e5f4fa9d5c533ddd6372f1ca99cf1b784074
389ds/389-ds-base
Issue 5521 - BUG - Pam PTA multiple issues Bug Description: Pam PTA and the lib389 cli had numerous issues that were affecting administration and configuration. Fix Description: This fixes many issues: * add pam-[enable,disable,show] seperate to pta enable. We can't combine these into one because they are seperate plugins. They also still needs ways to enable them outside of the direct config attribute manipulation. * Make pamMissingSuffix return a default of IGNORE on NONE. This is because many of the current tools don't actually set this value and it can block server restarts. * pamSecure would not warn properly on lack of TLS connections which can trick users into thinking the plugin is not working. fixes: https://github.com/389ds/389-ds-base/issues/5521 Author: William Brown <[email protected]> Review by: @mreynolds389 @droideck (Thanks!)
commit 8e32e5f4fa9d5c533ddd6372f1ca99cf1b784074 Author: William Brown <[email protected]> Date: Mon Nov 14 13:04:34 2022 +1000 Issue 5521 - BUG - Pam PTA multiple issues Bug Description: Pam PTA and the lib389 cli had numerous issues that were affecting administration and configuration. Fix Description: This fixes many issues: * add pam-[enable,disable,show] seperate to pta enable. We can't combine these into one because they are seperate plugins. They also still needs ways to enable them outside of the direct config attribute manipulation. * Make pamMissingSuffix return a default of IGNORE on NONE. This is because many of the current tools don't actually set this value and it can block server restarts. * pamSecure would not warn properly on lack of TLS connections which can trick users into thinking the plugin is not working. fixes: https://github.com/389ds/389-ds-base/issues/5521 Author: William Brown <[email protected]> Review by: @mreynolds389 @droideck (Thanks!) diff --git a/ldap/servers/plugins/pam_passthru/pam_ptconfig.c b/ldap/servers/plugins/pam_passthru/pam_ptconfig.c index be94e1d87..5858b9732 100644 --- a/ldap/servers/plugins/pam_passthru/pam_ptconfig.c +++ b/ldap/servers/plugins/pam_passthru/pam_ptconfig.c @@ -195,12 +195,12 @@ static int missing_suffix_to_int(char *missing_suffix) { int retval = -1; /* -1 is error */ - if (!PL_strcasecmp(missing_suffix, PAMPT_MISSING_SUFFIX_ERROR_STRING)) { - retval = PAMPT_MISSING_SUFFIX_ERROR; + if (missing_suffix == NULL || !PL_strcasecmp(missing_suffix, PAMPT_MISSING_SUFFIX_IGNORE_STRING)) { + retval = PAMPT_MISSING_SUFFIX_IGNORE; } else if (!PL_strcasecmp(missing_suffix, PAMPT_MISSING_SUFFIX_ALLOW_STRING)) { retval = PAMPT_MISSING_SUFFIX_ALLOW; - } else if (!PL_strcasecmp(missing_suffix, PAMPT_MISSING_SUFFIX_IGNORE_STRING)) { - retval = PAMPT_MISSING_SUFFIX_IGNORE; + } else if (!PL_strcasecmp(missing_suffix, PAMPT_MISSING_SUFFIX_ERROR_STRING)) { + retval = PAMPT_MISSING_SUFFIX_ERROR; } return retval; diff --git a/ldap/servers/plugins/pam_passthru/pam_ptpreop.c b/ldap/servers/plugins/pam_passthru/pam_ptpreop.c index 5bca823ff..0ceeea024 100644 --- a/ldap/servers/plugins/pam_passthru/pam_ptpreop.c +++ b/ldap/servers/plugins/pam_passthru/pam_ptpreop.c @@ -462,8 +462,8 @@ pam_passthru_bindpreop(Slapi_PBlock *pb) int is_ssl = 0; slapi_pblock_get(pb, SLAPI_CONN_IS_SSL_SESSION, &is_ssl); if (!is_ssl) { - slapi_log_err(SLAPI_LOG_PLUGIN, PAM_PASSTHRU_PLUGIN_SUBSYSTEM, - "pam_passthru_bindpreop - Connection not secure (secure connection required; check config)\n"); + slapi_log_err(SLAPI_LOG_WARNING, PAM_PASSTHRU_PLUGIN_SUBSYSTEM, + "pam_passthru_bindpreop - Client connection not secure and pamSecure is true (missing LDAPS)\n"); goto done; } } diff --git a/src/lib389/lib389/cli_conf/plugins/passthroughauth.py b/src/lib389/lib389/cli_conf/plugins/passthroughauth.py index 5c7dbc82e..e0e2ba2ac 100644 --- a/src/lib389/lib389/cli_conf/plugins/passthroughauth.py +++ b/src/lib389/lib389/cli_conf/plugins/passthroughauth.py @@ -11,7 +11,7 @@ import ldap from lib389.plugins import (PassThroughAuthenticationPlugin, PAMPassThroughAuthPlugin, PAMPassThroughAuthConfigs, PAMPassThroughAuthConfig) -from lib389.cli_conf import add_generic_plugin_parsers, generic_object_edit, generic_object_add +from lib389.cli_conf import add_generic_plugin_parsers, generic_object_edit, generic_object_add, generic_show, generic_enable, generic_disable, generic_status arg_to_attr_pam = { 'exclude_suffix': 'pamExcludeSuffix', @@ -234,18 +234,19 @@ def _add_parser_args_pam(parser): def create_parser(subparsers): passthroughauth_parser = subparsers.add_parser('pass-through-auth', help='Manage and configure Pass-Through Authentication plugins ' - '(URLs and PAM)') + '(LDAP URLs and PAM)') subcommands = passthroughauth_parser.add_subparsers(help='action') + add_generic_plugin_parsers(subcommands, PassThroughAuthenticationPlugin) - list = subcommands.add_parser('list', help='List pass-though plugin URLs or PAM configurations') + list = subcommands.add_parser('list', help='List pass-though plugin LDAP URLs or PAM configurations') subcommands_list = list.add_subparsers(help='action') - list_urls = subcommands_list.add_parser('urls', help='Lists URLs') + list_urls = subcommands_list.add_parser('urls', help='Lists LDAP URLs') list_urls.set_defaults(func=pta_list) list_pam = subcommands_list.add_parser('pam-configs', help='Lists PAM configurations') list_pam.set_defaults(func=pam_pta_list) - url = subcommands.add_parser('url', help='Manage PTA URL configurations') + url = subcommands.add_parser('url', help='Manage PTA LDAP URL configurations') subcommands_url = url.add_subparsers(help='action') add_url = subcommands_url.add_parser('add', help='Add the config entry') @@ -267,6 +268,19 @@ def create_parser(subparsers): delete_url.add_argument('URL', help='The full LDAP URL you get from the "list" command') delete_url.set_defaults(func=pta_del) + # Pam PTA and PTA are not the same plugin! We need to enable and control them seperately! + show_parser = subcommands.add_parser('pam-show', help='Displays the plugin configuration') + show_parser.set_defaults(func=generic_show, plugin_cls=PAMPassThroughAuthPlugin) + + enable_parser = subcommands.add_parser('pam-enable', help='Enables the plugin') + enable_parser.set_defaults(func=generic_enable, plugin_cls=PAMPassThroughAuthPlugin) + + disable_parser = subcommands.add_parser('pam-disable', help='Disables the plugin') + disable_parser.set_defaults(func=generic_disable, plugin_cls=PAMPassThroughAuthPlugin) + + status_parser = subcommands.add_parser('pam-status', help='Displays the plugin status') + status_parser.set_defaults(func=generic_status, plugin_cls=PAMPassThroughAuthPlugin) + pam = subcommands.add_parser('pam-config', help='Manage PAM PTA configurations.') pam.add_argument('NAME', help='The PAM PTA configuration name') subcommands_pam = pam.add_subparsers(help='action') diff --git a/src/lib389/lib389/plugins.py b/src/lib389/lib389/plugins.py index 1b59bc2b5..27bc57717 100644 --- a/src/lib389/lib389/plugins.py +++ b/src/lib389/lib389/plugins.py @@ -1491,7 +1491,7 @@ class PAMPassThroughAuthPlugin(Plugin): super(PAMPassThroughAuthPlugin, self).__init__(instance, dn) -class PAMPassThroughAuthConfig(Plugin): +class PAMPassThroughAuthConfig(DSLdapObject): """A single instance of PAM Pass Through Auth config entry :param instance: An instance @@ -1500,24 +1500,11 @@ class PAMPassThroughAuthConfig(Plugin): :type dn: str """ - _plugin_properties = { - 'cn' : 'USN', - 'nsslapd-pluginEnabled': 'off', - 'nsslapd-pluginPath': 'libpam-passthru-plugin', - 'nsslapd-pluginInitfunc': 'pam_passthruauth_init', - 'nsslapd-pluginType': 'betxnpreoperation', - 'nsslapd-plugin-depends-on-type': 'database', - 'nsslapd-pluginId': 'PAM', - 'nsslapd-pluginVendor': '389 Project', - 'nsslapd-pluginVersion': '1.3.7.0', - 'nsslapd-pluginDescription': 'PAM Pass Through Auth plugin' - } - def __init__(self, instance, dn=None): super(PAMPassThroughAuthConfig, self).__init__(instance, dn) self._rdn_attribute = 'cn' self._must_attributes = ['cn'] - self._create_objectclasses = ['top', 'extensibleObject', 'nsslapdplugin', 'pamConfig'] + self._create_objectclasses = ['top', 'extensibleObject', 'pamConfig'] self._protected = False
0
a71633d56951dd6c4d0368c790b85628f1598968
389ds/389-ds-base
Ticket #47313 - Indexed search with filter containing '&' and "!" with attribute subtypes gives wrong result Description: commit fae006821bd6e524c0f7f8d5f023f4fe5e160ef0 introduced a bug, which occurs when a filter includes NOT and one of the results from the subfilters returns NONE. This patch backoffs the last section of the commit fae006821bd6e524c0f7f8d5f023f4fe5e160ef0 with an improvement -- avoiding unnecessary idl duplication. Also, adding (NULL == idl) checks to idl_common.c. https://fedorahosted.org/389/ticket/47313 Reviewed by [email protected] (Thank you, Rich!)
commit a71633d56951dd6c4d0368c790b85628f1598968 Author: Noriko Hosoi <[email protected]> Date: Fri Dec 13 10:35:08 2013 -0800 Ticket #47313 - Indexed search with filter containing '&' and "!" with attribute subtypes gives wrong result Description: commit fae006821bd6e524c0f7f8d5f023f4fe5e160ef0 introduced a bug, which occurs when a filter includes NOT and one of the results from the subfilters returns NONE. This patch backoffs the last section of the commit fae006821bd6e524c0f7f8d5f023f4fe5e160ef0 with an improvement -- avoiding unnecessary idl duplication. Also, adding (NULL == idl) checks to idl_common.c. https://fedorahosted.org/389/ticket/47313 Reviewed by [email protected] (Thank you, Rich!) diff --git a/ldap/servers/slapd/back-ldbm/filterindex.c b/ldap/servers/slapd/back-ldbm/filterindex.c index 971e10b6c..9ad2ab4b9 100644 --- a/ldap/servers/slapd/back-ldbm/filterindex.c +++ b/ldap/servers/slapd/back-ldbm/filterindex.c @@ -852,17 +852,19 @@ list_candidates( break; /* We can exit the loop now, since the candidate list is small already */ } } else if ( ftype == LDAP_FILTER_AND ) { - if (isnot && !idl_is_allids(tmp)) { - IDList *new_idl = NULL; - int notin_result = 0; - /* - * If the given tmp is ALLIDs (due to subtype in filter), - * we cannot use idl_notin. + if (isnot) { + /* + * If tmp is NULL or ALLID, idl_notin just duplicates idl. + * We don't have to do it. */ - notin_result = idl_notin( be, idl, tmp, &new_idl ); - if (notin_result) { - idl_free(idl); - idl = new_idl; + if (!tmp && !idl_is_allids(tmp)) { + IDList *new_idl = NULL; + int notin_result = 0; + notin_result = idl_notin( be, idl, tmp, &new_idl ); + if (notin_result) { + idl_free(idl); + idl = new_idl; + } } } else { idl = idl_intersection(be, idl, tmp); diff --git a/ldap/servers/slapd/back-ldbm/idl_common.c b/ldap/servers/slapd/back-ldbm/idl_common.c index 584bba565..e3023d8ed 100644 --- a/ldap/servers/slapd/back-ldbm/idl_common.c +++ b/ldap/servers/slapd/back-ldbm/idl_common.c @@ -46,16 +46,25 @@ size_t idl_sizeof(IDList *idl) { + if (NULL == idl) { + return 0; + } return (2 + idl->b_nmax) * sizeof(ID); } NIDS idl_length(IDList *idl) { + if (NULL == idl) { + return 0; + } return (idl->b_nmax == ALLIDSBLOCK) ? UINT_MAX : idl->b_nids; } int idl_is_allids(IDList *idl) { + if (NULL == idl) { + return 0; + } return (idl->b_nmax == ALLIDSBLOCK); } @@ -110,6 +119,9 @@ idl_free( IDList *idl ) /* JCM - pass in ** */ int idl_append( IDList *idl, ID id) { + if (NULL == idl) { + return 2; + } if ( ALLIDS( idl ) || ( (idl->b_nids) && (idl->b_ids[idl->b_nids - 1] == id)) ) { return( 1 ); /* already there */ } @@ -321,7 +333,7 @@ idl_notin( backend *be, IDList *a, IDList *b, - IDList **new_result + IDList **new_result ) { NIDS ni, ai, bi; @@ -435,6 +447,9 @@ idl_nextid( IDList *idl, ID id ) { NIDS i; + if (NULL == idl) { + return NOID; + } if ( ALLIDS( idl ) ) { return( ++id < idl->b_nids ? id : NOID ); }
0
eb191f5b146c07088d9fa4b10713f5644eca89a3
389ds/389-ds-base
Issue 51113 - Allow using uid for replication manager entry Bug Description: Currently it was hardcoded to only allow "cn" as the rdn attribute for the replication manager entry. Fix description: Allow setting the rdn attribute of the replication manager DS ldap object, and include the schema that allows "uid". relates: https://pagure.io/389-ds-base/issue/51113 Reviewed by: spichugi & firstyear(Thanks!!)
commit eb191f5b146c07088d9fa4b10713f5644eca89a3 Author: Mark Reynolds <[email protected]> Date: Tue May 26 17:03:11 2020 -0400 Issue 51113 - Allow using uid for replication manager entry Bug Description: Currently it was hardcoded to only allow "cn" as the rdn attribute for the replication manager entry. Fix description: Allow setting the rdn attribute of the replication manager DS ldap object, and include the schema that allows "uid". relates: https://pagure.io/389-ds-base/issue/51113 Reviewed by: spichugi & firstyear(Thanks!!) diff --git a/src/lib389/lib389/cli_conf/replication.py b/src/lib389/lib389/cli_conf/replication.py index 09cb9b435..b9bc3d291 100644 --- a/src/lib389/lib389/cli_conf/replication.py +++ b/src/lib389/lib389/cli_conf/replication.py @@ -199,19 +199,21 @@ def enable_replication(inst, basedn, log, args): # Create replication manager if password was provided if args.bind_dn and args.bind_passwd: - cn_rdn = args.bind_dn.split(",", 1)[0] - cn_val = cn_rdn.split("=", 1)[1] - manager = BootstrapReplicationManager(inst, dn=args.bind_dn) + rdn = args.bind_dn.split(",", 1)[0] + rdn_attr, rdn_val = rdn.split("=", 1) + manager = BootstrapReplicationManager(inst, dn=args.bind_dn, rdn_attr=rdn_attr) try: manager.create(properties={ - 'cn': cn_val, + 'cn': rdn_val, + 'uid': rdn_val, 'userPassword': args.bind_passwd }) except ldap.ALREADY_EXISTS: # Already there, but could have different password. Delete and recreate manager.delete() manager.create(properties={ - 'cn': cn_val, + 'cn': rdn_val, + 'uid': rdn_val, 'userPassword': args.bind_passwd }) except ldap.NO_SUCH_OBJECT: @@ -511,22 +513,23 @@ def get_cl(inst, basedn, log, args): def create_repl_manager(inst, basedn, log, args): - manager_cn = "replication manager" + manager_name = "replication manager" repl_manager_password = "" repl_manager_password_confirm = "" if args.name: - manager_cn = args.name - - if is_a_dn(manager_cn): - # A full DN was provided, make sure it uses "cn" for the RDN - if manager_cn.split("=", 1)[0].lower() != "cn": - raise ValueError("Replication manager DN must use \"cn\" for the rdn attribute") - manager_dn = manager_cn - manager_rdn = manager_dn.split(",", 1)[0] - manager_cn = manager_rdn.split("=", 1)[1] + manager_name = args.name + + if is_a_dn(manager_name): + # A full DN was provided + manager_dn = manager_name + manager_rdn = manager_name.split(",", 1)[0] + manager_attr, manager_name = manager_rdn.split("=", 1) + if manager_attr.lower() not in ['cn', 'uid']: + raise ValueError(f'The RDN attribute "{manager_attr}" is not allowed, you must use "cn" or "uid"') else: - manager_dn = "cn={},cn=config".format(manager_cn) + manager_dn = "cn={},cn=config".format(manager_name) + manager_attr = "cn" if args.passwd: repl_manager_password = args.passwd @@ -544,10 +547,11 @@ def create_repl_manager(inst, basedn, log, args): repl_manager_password = "" repl_manager_password_confirm = "" - manager = BootstrapReplicationManager(inst, dn=manager_dn) + manager = BootstrapReplicationManager(inst, dn=manager_dn, rdn_attr=manager_attr) try: manager.create(properties={ - 'cn': manager_cn, + 'cn': manager_name, + 'uid': manager_name, 'userPassword': repl_manager_password }) if args.suffix: @@ -564,7 +568,8 @@ def create_repl_manager(inst, basedn, log, args): # Already there, but could have different password. Delete and recreate manager.delete() manager.create(properties={ - 'cn': manager_cn, + 'cn': manager_name, + 'uid': manager_name, 'userPassword': repl_manager_password }) if args.suffix: @@ -954,6 +959,7 @@ def get_winsync_agmt_status(inst, basedn, log, args): status = agmt.status(winsync=True, use_json=args.json) log.info(status) + # # Tasks # @@ -1347,8 +1353,7 @@ def create_parser(subparsers): agmt_set_parser.add_argument('--wait-async-results', help="The amount of time in milliseconds the server waits if " "the consumer is not ready before resending data") agmt_set_parser.add_argument('--busy-wait-time', help="The amount of time in seconds a supplier should wait after " - "a consumer sends back a busy response before making another " - "attempt to acquire access.") + "a consumer sends back a busy response before making another attempt to acquire access.") agmt_set_parser.add_argument('--session-pause-time', help="The amount of time in seconds a supplier should wait between update sessions.") agmt_set_parser.add_argument('--flow-control-window', help="Sets the maximum number of entries and updates sent by a supplier, which are not acknowledged by the consumer.") agmt_set_parser.add_argument('--flow-control-pause', help="The time in milliseconds to pause after reaching the number of entries and updates set in \"--flow-control-window\"") @@ -1438,8 +1443,7 @@ def create_parser(subparsers): winsync_agmt_add_parser.add_argument('--subtree-pair', help="Set the subtree pair: <DS_SUBTREE>:<WINDOWS_SUBTREE>") winsync_agmt_add_parser.add_argument('--conn-timeout', help="The timeout used for replicaton connections") winsync_agmt_add_parser.add_argument('--busy-wait-time', help="The amount of time in seconds a supplier should wait after " - "a consumer sends back a busy response before making another " - "attempt to acquire access.") + "a consumer sends back a busy response before making another attempt to acquire access.") winsync_agmt_add_parser.add_argument('--session-pause-time', help="The amount of time in seconds a supplier should wait between update sessions.") winsync_agmt_add_parser.add_argument('--init', action='store_true', default=False, help="Initialize the agreement after creating it.") @@ -1468,8 +1472,7 @@ def create_parser(subparsers): winsync_agmt_set_parser.add_argument('--subtree-pair', help="Set the subtree pair: <DS_SUBTREE>:<WINDOWS_SUBTREE>") winsync_agmt_set_parser.add_argument('--conn-timeout', help="The timeout used for replicaton connections") winsync_agmt_set_parser.add_argument('--busy-wait-time', help="The amount of time in seconds a supplier should wait after " - "a consumer sends back a busy response before making another " - "attempt to acquire access.") + "a consumer sends back a busy response before making another attempt to acquire access.") winsync_agmt_set_parser.add_argument('--session-pause-time', help="The amount of time in seconds a supplier should wait between update sessions.") # Get diff --git a/src/lib389/lib389/replica.py b/src/lib389/lib389/replica.py index e3fc7fe1f..f8adb3ce2 100644 --- a/src/lib389/lib389/replica.py +++ b/src/lib389/lib389/replica.py @@ -1779,15 +1779,18 @@ class BootstrapReplicationManager(DSLdapObject): :type instance: lib389.DirSrv :param dn: The dn to create :type dn: str + :param rdn_attr: The attribute to use for the RDN + :type rdn_attr: str """ - def __init__(self, instance, dn='cn=replication manager,cn=config'): + def __init__(self, instance, dn='cn=replication manager,cn=config', rdn_attr='cn'): super(BootstrapReplicationManager, self).__init__(instance, dn) - self._rdn_attribute = 'cn' + self._rdn_attribute = rdn_attr self._must_attributes = ['cn', 'userPassword'] self._create_objectclasses = [ 'top', - 'netscapeServer', - 'nsAccount' + 'inetUser', # for uid + 'netscapeServer', # for cn + 'nsAccount', # for authentication attributes ] if ds_is_older('1.4.0'): self._create_objectclasses.remove('nsAccount')
0
9506a1d704ce99945e12dc797f932b4a50a0da24
389ds/389-ds-base
Resolves: 464188 Summary: Perform better config validation in the DNA plug-in.
commit 9506a1d704ce99945e12dc797f932b4a50a0da24 Author: Nathan Kinder <[email protected]> Date: Fri Oct 3 04:28:22 2008 +0000 Resolves: 464188 Summary: Perform better config validation in the DNA plug-in. diff --git a/ldap/servers/plugins/dna/dna.c b/ldap/servers/plugins/dna/dna.c index a106110e2..7b7470bd8 100644 --- a/ldap/servers/plugins/dna/dna.c +++ b/ldap/servers/plugins/dna/dna.c @@ -210,7 +210,7 @@ static int dna_exop_init(Slapi_PBlock * pb); * */ static int dna_load_plugin_config(); -static int dna_parse_config_entry(Slapi_Entry * e); +static int dna_parse_config_entry(Slapi_Entry * e, int apply); static void dna_delete_config(); static void dna_free_config_entry(struct configEntry ** entry); static int dna_load_host_port(); @@ -617,9 +617,10 @@ dna_load_plugin_config() } for (i = 0; (entries[i] != NULL); i++) { - status = dna_parse_config_entry(entries[i]); - if (DNA_SUCCESS != status) - break; + /* We don't care about the status here because we may have + * some invalid config entries, but we just want to continue + * looking for valid ones. */ + dna_parse_config_entry(entries[i], 1); } cleanup: @@ -632,22 +633,43 @@ dna_load_plugin_config() return status; } +/* + * dna_parse_config_entry() + * + * Parses a single config entry. If apply is non-zero, then + * we will load and start using the new config. You can simply + * validate config without making any changes by setting apply + * to 0. + * + * Returns DNA_SUCCESS if the entry is valid and DNA_FAILURE + * if it is invalid. + */ static int -dna_parse_config_entry(Slapi_Entry * e) +dna_parse_config_entry(Slapi_Entry * e, int apply) { char *value; - struct configEntry *entry; + struct configEntry *entry = NULL; struct configEntry *config_entry; PRCList *list; int entry_added = 0; + int ret = DNA_SUCCESS; slapi_log_error(SLAPI_LOG_TRACE, DNA_PLUGIN_SUBSYSTEM, "--> dna_parse_config_entry\n"); + /* If this is the main DNA plug-in + * config entry, just bail. */ + if (strcasecmp(getPluginDN(), slapi_entry_get_ndn(e)) == 0) { + ret = DNA_FAILURE; + goto bail; + } + entry = (struct configEntry *) slapi_ch_calloc(1, sizeof(struct configEntry)); - if (NULL == entry) + if (NULL == entry) { + ret = DNA_FAILURE; goto bail; + } value = slapi_entry_get_ndn(e); if (value) { @@ -660,8 +682,14 @@ dna_parse_config_entry(Slapi_Entry * e) value = slapi_entry_attr_get_charptr(e, DNA_TYPE); if (value) { entry->type = value; - } else + } else { + slapi_log_error(SLAPI_LOG_FATAL, DNA_PLUGIN_SUBSYSTEM, + "dna_parse_config_entry: The %s config " + "setting is required for range %s.\n", + DNA_TYPE, entry->dn); + ret = DNA_FAILURE; goto bail; + } slapi_log_error(SLAPI_LOG_CONFIG, DNA_PLUGIN_SUBSYSTEM, "----------> %s [%s]\n", DNA_TYPE, entry->type, 0, 0); @@ -670,8 +698,14 @@ dna_parse_config_entry(Slapi_Entry * e) if (value) { entry->nextval = strtoull(value, 0, 0); slapi_ch_free_string(&value); - } else + } else { + slapi_log_error(SLAPI_LOG_FATAL, DNA_PLUGIN_SUBSYSTEM, + "dna_parse_config_entry: The %s config " + "setting is required for range %s.\n", + DNA_NEXTVAL, entry->dn); + ret = DNA_FAILURE; goto bail; + } slapi_log_error(SLAPI_LOG_CONFIG, DNA_PLUGIN_SUBSYSTEM, "----------> %s [%llu]\n", DNA_NEXTVAL, entry->nextval, 0, @@ -699,8 +733,7 @@ dna_parse_config_entry(Slapi_Entry * e) if (value) { entry->interval = strtoull(value, 0, 0); slapi_ch_free_string(&value); - } else - goto bail; + } slapi_log_error(SLAPI_LOG_CONFIG, DNA_PLUGIN_SUBSYSTEM, "----------> %s [%llu]\n", DNA_INTERVAL, entry->interval, 0, 0); @@ -722,9 +755,15 @@ dna_parse_config_entry(Slapi_Entry * e) slapi_log_error(SLAPI_LOG_FATAL, DNA_PLUGIN_SUBSYSTEM , "Error: Invalid search filter in entry [%s]: [%s]\n", entry->dn, value); + ret = DNA_FAILURE; goto bail; } } else { + slapi_log_error(SLAPI_LOG_FATAL, DNA_PLUGIN_SUBSYSTEM, + "dna_parse_config_entry: The %s config " + "setting is required for range %s.\n", + DNA_FILTER, entry->dn); + ret = DNA_FAILURE; goto bail; } @@ -733,7 +772,16 @@ dna_parse_config_entry(Slapi_Entry * e) value = slapi_entry_attr_get_charptr(e, DNA_SCOPE); if (value) { + /* TODO - Allow multiple scope settings for a single range. This may + * make ordering the scopes tough when we put them in the clist. */ entry->scope = slapi_dn_normalize(value); + } else { + slapi_log_error(SLAPI_LOG_FATAL, DNA_PLUGIN_SUBSYSTEM, + "dna_parse_config_entry: The %s config " + "config setting is required for range %s.\n", + DNA_SCOPE, entry->dn); + ret = DNA_FAILURE; + goto bail; } slapi_log_error(SLAPI_LOG_CONFIG, DNA_PLUGIN_SUBSYSTEM, @@ -744,15 +792,14 @@ dna_parse_config_entry(Slapi_Entry * e) value = slapi_entry_attr_get_charptr(e, DNA_MAXVAL); if (value) { entry->maxval = strtoull(value, 0, 0); - - slapi_log_error(SLAPI_LOG_CONFIG, DNA_PLUGIN_SUBSYSTEM, - "----------> %s [%llu]\n", DNA_MAXVAL, value, 0, 0); - slapi_ch_free_string(&value); } else { entry->maxval = -1; } + slapi_log_error(SLAPI_LOG_CONFIG, DNA_PLUGIN_SUBSYSTEM, + "----------> %s [%llu]\n", DNA_MAXVAL, entry->maxval, 0, 0); + value = slapi_entry_attr_get_charptr(e, DNA_SHARED_CFG_DN); if (value) { Slapi_Entry *shared_e = NULL; @@ -772,6 +819,7 @@ dna_parse_config_entry(Slapi_Entry * e) slapi_log_error(SLAPI_LOG_FATAL, DNA_PLUGIN_SUBSYSTEM, "dna_parse_config_entry: Unable to locate " "shared configuration entry (%s)\n", value); + ret = DNA_FAILURE; goto bail; } else { slapi_entry_free(shared_e); @@ -803,19 +851,21 @@ dna_parse_config_entry(Slapi_Entry * e) entry->threshold = 1; } + slapi_log_error(SLAPI_LOG_CONFIG, DNA_PLUGIN_SUBSYSTEM, + "----------> %s [%llu]\n", DNA_THRESHOLD, entry->threshold, 0, 0); + value = slapi_entry_attr_get_charptr(e, DNA_RANGE_REQUEST_TIMEOUT); if (value) { entry->timeout = strtoull(value, 0, 0); - - slapi_log_error(SLAPI_LOG_CONFIG, DNA_PLUGIN_SUBSYSTEM, - "----------> %s [%llu]\n", DNA_RANGE_REQUEST_TIMEOUT, - value, 0, 0); - slapi_ch_free_string(&value); } else { entry->timeout = DNA_DEFAULT_TIMEOUT; } + slapi_log_error(SLAPI_LOG_CONFIG, DNA_PLUGIN_SUBSYSTEM, + "----------> %s [%llu]\n", DNA_RANGE_REQUEST_TIMEOUT, + entry->timeout, 0, 0); + value = slapi_entry_attr_get_charptr(e, DNA_NEXT_RANGE); if (value) { char *p = NULL; @@ -831,16 +881,47 @@ dna_parse_config_entry(Slapi_Entry * e) if (entry->next_range_upper <= entry->next_range_lower) { slapi_log_error(SLAPI_LOG_FATAL, DNA_PLUGIN_SUBSYSTEM, "dna_parse_config_entry: Illegal %s " - "setting specified for range %s.\n", + "setting specified for range %s. Legal " + "format is <lower>-<upper>.\n", DNA_NEXT_RANGE, entry->dn); + ret = DNA_FAILURE; entry->next_range_lower = 0; entry->next_range_upper = 0; } + + /* make sure next range doesn't overlap with + * the active range */ + if (((entry->next_range_upper <= entry->maxval) && + (entry->next_range_upper >= entry->nextval)) || + ((entry->next_range_lower <= entry->maxval) && + (entry->next_range_lower >= entry->nextval))) { + slapi_log_error(SLAPI_LOG_FATAL, DNA_PLUGIN_SUBSYSTEM, + "dna_parse_config_entry: Illegal %s " + "setting specified for range %s. %s " + "overlaps with the active range.\n", + DNA_NEXT_RANGE, entry->dn, DNA_NEXT_RANGE); + ret = DNA_FAILURE; + entry->next_range_lower = 0; + entry->next_range_upper = 0; + } + } else { + slapi_log_error(SLAPI_LOG_FATAL, DNA_PLUGIN_SUBSYSTEM, + "dna_parse_config_entry: Illegal %s " + "setting specified for range %s. Legal " + "format is <lower>-<upper>.\n", + DNA_NEXT_RANGE, entry->dn); + ret = DNA_FAILURE; } slapi_ch_free_string(&value); } + /* If we were only called to validate config, we can + * just bail out before applying the config changes */ + if (apply == 0) { + goto bail; + } + /* Calculate number of remaining values. */ if (entry->next_range_lower != 0) { entry->remaining = ((entry->next_range_upper - entry->next_range_lower + 1) / @@ -856,6 +937,10 @@ dna_parse_config_entry(Slapi_Entry * e) /* create the new value lock for this range */ entry->lock = slapi_new_mutex(); if (!entry->lock) { + slapi_log_error(SLAPI_LOG_FATAL, DNA_PLUGIN_SUBSYSTEM, + "dna_parse_config_entry: Unable to create lock " + "for range %s.\n", entry->dn); + ret = DNA_FAILURE; goto bail; } @@ -912,8 +997,12 @@ dna_parse_config_entry(Slapi_Entry * e) bail: if (0 == entry_added) { - slapi_log_error(SLAPI_LOG_CONFIG, DNA_PLUGIN_SUBSYSTEM, - "config entry [%s] skipped\n", entry->dn, 0, 0); + /* Don't log error if we weren't asked to apply config */ + if ((apply != 0) && (entry != NULL)) { + slapi_log_error(SLAPI_LOG_FATAL, DNA_PLUGIN_SUBSYSTEM, + "dna_parse_config_entry: Invalid config entry " + "[%s] skipped\n", entry->dn, 0, 0); + } dna_free_config_entry(&entry); } else { time_t now; @@ -925,12 +1014,14 @@ dna_parse_config_entry(Slapi_Entry * e) * performing the operation now would cause the * change to not get changelogged. */ slapi_eq_once(dna_update_config_event, entry, now + 30); + + ret = DNA_SUCCESS; } slapi_log_error(SLAPI_LOG_TRACE, DNA_PLUGIN_SUBSYSTEM, "<-- dna_parse_config_entry\n"); - return DNA_SUCCESS; + return ret; } static void @@ -938,6 +1029,9 @@ dna_free_config_entry(struct configEntry ** entry) { struct configEntry *e = *entry; + if (e == NULL) + return; + if (e->dn) { slapi_log_error(SLAPI_LOG_CONFIG, DNA_PLUGIN_SUBSYSTEM, "freeing config entry [%s]\n", e->dn, 0, 0); @@ -2396,9 +2490,6 @@ static int dna_pre_op(Slapi_PBlock * pb, int modtype) if (0 == (dn = dna_get_dn(pb))) goto bail; - if (dna_dn_is_config(dn)) - goto bail; - if (LDAP_CHANGETYPE_ADD == modtype) { slapi_pblock_get(pb, SLAPI_ADD_ENTRY, &e); } else { @@ -2431,6 +2522,34 @@ static int dna_pre_op(Slapi_PBlock * pb, int modtype) if (0 == e) goto bailmod; + if (dna_dn_is_config(dn)) { + /* Validate config changes, but don't apply them. + * This allows us to reject invalid config changes + * here at the pre-op stage. Applying the config + * needs to be done at the post-op stage. */ + if (smods) { + if (slapi_entry_apply_mods(e, mods) != LDAP_SUCCESS) { + /* The mods don't apply cleanly, so we just let this op go + * to let the main server handle it. */ + goto bailmod; + } + } + + if (dna_parse_config_entry(e, 0) != DNA_SUCCESS) { + /* Refuse the operation if config parsing failed. */ + ret = LDAP_UNWILLING_TO_PERFORM; + if (LDAP_CHANGETYPE_ADD == modtype) { + errstr = slapi_ch_smprintf("Not a valid DNA configuration entry."); + } else { + errstr = slapi_ch_smprintf("Changes result in an invalid " + "DNA configuration."); + } + } + + /* We're done, so just bail. */ + goto bailmod; + } + dna_read_lock(); if (!PR_CLIST_IS_EMPTY(dna_global_config)) { diff --git a/ldap/servers/slapd/entry.c b/ldap/servers/slapd/entry.c index 9172aef20..c7c1a74ee 100644 --- a/ldap/servers/slapd/entry.c +++ b/ldap/servers/slapd/entry.c @@ -2577,6 +2577,12 @@ slapi_entry_has_children(const Slapi_Entry *entry) /* * Apply a set of modifications to an entry */ +int +slapi_entry_apply_mods( Slapi_Entry *e, LDAPMod **mods ) +{ + return entry_apply_mods(e, mods); +} + int entry_apply_mods( Slapi_Entry *e, LDAPMod **mods ) { diff --git a/ldap/servers/slapd/slapi-plugin.h b/ldap/servers/slapd/slapi-plugin.h index 359e957ed..8cfc51c0d 100644 --- a/ldap/servers/slapd/slapi-plugin.h +++ b/ldap/servers/slapd/slapi-plugin.h @@ -278,6 +278,7 @@ int slapi_entry_add_value(Slapi_Entry *e, const char *type, const Slapi_Value *v int slapi_entry_add_string(Slapi_Entry *e, const char *type, const char *value); int slapi_entry_delete_string(Slapi_Entry *e, const char *type, const char *value); void slapi_entry_diff(Slapi_Mods *smods, Slapi_Entry *e1, Slapi_Entry *e2, int diff_ctrl); +int slapi_entry_apply_mods(Slapi_Entry *e, LDAPMod **mods); /* diff --git a/ldap/servers/slapd/slapi-private.h b/ldap/servers/slapd/slapi-private.h index 3a06f4aa8..2a8eff126 100644 --- a/ldap/servers/slapd/slapi-private.h +++ b/ldap/servers/slapd/slapi-private.h @@ -315,7 +315,7 @@ int entry_add_present_attribute_wsi(Slapi_Entry *e, Slapi_Attr *a); int entry_add_deleted_attribute_wsi(Slapi_Entry *e, Slapi_Attr *a); /* - * slapi_entry_apply_mods_wsi is similar to slapi_entry_apply_mods. It also + * entry_apply_mods_wsi is similar to entry_apply_mods. It also * handles the state storage information. "csn" is the CSN associated with * this modify operation. */
0
65aebb36662623fd9270161afa487f8529e89d15
389ds/389-ds-base
pick up new adminutil, adminserver, and WinSync for INTERNAL_BUILD
commit 65aebb36662623fd9270161afa487f8529e89d15 Author: Noriko Hosoi <[email protected]> Date: Fri May 12 21:38:26 2006 +0000 pick up new adminutil, adminserver, and WinSync for INTERNAL_BUILD diff --git a/component_versions.mk b/component_versions.mk index dd6790b9f..67da0afb9 100644 --- a/component_versions.mk +++ b/component_versions.mk @@ -109,7 +109,7 @@ endif # admin server ifndef ADM_RELDATE - ADM_RELDATE = 20060405 + ADM_RELDATE = 20060512 endif ifndef ADM_VERSDIR ADM_VERSDIR = adminserver/1.0 @@ -144,7 +144,7 @@ ifndef ADMINUTIL_VER ADMINUTIL_DOT_VER=1.0 endif ifndef ADMINUTIL_RELDATE - ADMINUTIL_RELDATE=20060323 + ADMINUTIL_RELDATE=20060511 endif ifndef ADMINUTIL_VERSDIR @@ -229,9 +229,9 @@ ifndef MAVEN_VERSION endif ifndef ADSYNC_VERSION - ADSYNC_VERSION=20051017 + ADSYNC_VERSION=20060330 endif ifndef NT4SYNC_VERSION - NT4SYNC_VERSION=20051017 + NT4SYNC_VERSION=20060330 endif
0
b35e8c772eb2264f44046146037bc467e990906b
389ds/389-ds-base
Ticket 48860 - Add replication tools Description: Created new replciation classes that use the new get/set mapping object class. Also added a variey of replication tools https://fedorahosted.org/389/ticket/48860 Reviewed by: firstyear(Thanks!)
commit b35e8c772eb2264f44046146037bc467e990906b Author: Mark Reynolds <[email protected]> Date: Tue Jun 28 20:46:39 2016 -0400 Ticket 48860 - Add replication tools Description: Created new replciation classes that use the new get/set mapping object class. Also added a variey of replication tools https://fedorahosted.org/389/ticket/48860 Reviewed by: firstyear(Thanks!) diff --git a/src/lib389/lib389/__init__.py b/src/lib389/lib389/__init__.py index 2031daa6f..972f75aab 100644 --- a/src/lib389/lib389/__init__.py +++ b/src/lib389/lib389/__init__.py @@ -333,11 +333,11 @@ class DirSrv(SimpleLDAPObject): from lib389.dirsrv_log import DirsrvAccessLog, DirsrvErrorLog from lib389.ldclt import Ldclt from lib389.backend import Backends - from lib389.mappingTree import MappingTree from lib389.backend import BackendLegacy as Backend from lib389.suffix import Suffix - from lib389.replica import Replica + from lib389.replica import ReplicaLegacy as Replica + from lib389.replica import Replicas from lib389.changelog import Changelog from lib389.agreement import Agreement from lib389.schema import Schema @@ -362,6 +362,7 @@ class DirSrv(SimpleLDAPObject): # Do we have a certdb path? # if MAJOR < 3: self.backends = Backends(self) + self.replicas = Replicas(self) self.aci = Aci(self) self.nss_ssl = NssSsl(self) self.rsa = RSA(self) diff --git a/src/lib389/lib389/_mapped_object.py b/src/lib389/lib389/_mapped_object.py index fdbb65321..9af2876ba 100644 --- a/src/lib389/lib389/_mapped_object.py +++ b/src/lib389/lib389/_mapped_object.py @@ -60,8 +60,8 @@ def _gen_filter(attrtypes, values, extra=None): class DSLogging(object): """ - The benefit of this is automatic name detection, and correct application of level - and verbosity to the object. + The benefit of this is automatic name detection, and correct application + of level and verbosity to the object. """ def __init__(self, verbose=False): # Maybe we can think of a way to make this display the instance name or __unicode__? @@ -130,23 +130,67 @@ class DSLdapObject(DSLogging): else: return self._instance.modify_s(self._dn, [(action, key, value)]) - def get(self, key): - """Get an attribute under dn""" + def apply_mods(self, mods): + """Perform modification operation using several mods at once + + @param mods - list of tuples: [(action, key, value),] + @raise ValueError - if a provided mod op is invalid + @raise LDAPError + """ + mod_list = [] + for mod in mods: + if len(mod) < 2: + # Error + raise ValueError('Not enough arguments in the mod op') + elif len(mod) == 2: # no action + action = ldap.MOD_REPLACE + key, value = mod + elif len(mod) == 3: + action, key, value = mod + if action != ldap.MOD_REPLACE or \ + action != ldap.MOD_ADD or \ + action != ldap.MOD_DELETE: + raise ValueError('Invalid mod action(%s)' % str(action)) + else: + # Error too many items + raise ValueError('Too many arguments in the mod op') + + if isinstance(value, list): + value = ensure_list_bytes(value) + else: + value = [ensure_bytes(value)] + + mod_list.append((action, key, value)) + return self._instance.modify_s(self._dn, mod_list) + + def get_attr_vals(self, key): + """Get an attribute's values from the dn""" self._log.debug("%s get(%r)" % (self._dn, key)) # We might need to add a state check for NONE dn. if self._instance.state != DIRSRV_STATE_ONLINE: - ValueError("Invalid state. Cannot get properties on instance that is not ONLINE") - # In the future, I plan to add a mode where if local == true, we can use - # get on dse.ldif to get values offline. + raise ValueError("Invalid state. Cannot get properties on instance that is not ONLINE") + # In the future, I plan to add a mode where if local == true, we + # can use get on dse.ldif to get values offline. else: return self._instance.getEntry(self._dn).getValues(key) + def get_attr_val(self, key): + """Get a single attribute value from the dn""" + self._log.debug("%s getVal(%r)" % (self._dn, key)) + # We might need to add a state check for NONE dn. + if self._instance.state != DIRSRV_STATE_ONLINE: + raise ValueError("Invalid state. Cannot get properties on instance that is not ONLINE") + # In the future, I plan to add a mode where if local == true, we + # can use get on dse.ldif to get values offline. + else: + return self._instance.getEntry(self._dn).getValue(key) + # This needs to work on key + val, and key def remove(self, key): """Remove a value defined by key""" self._log.debug("%s get(%r, %r)" % (self._dn, key, value)) if self._instance.state != DIRSRV_STATE_ONLINE: - ValueError("Invalid state. Cannot remove properties on instance that is not ONLINE") + raise ValueError("Invalid state. Cannot remove properties on instance that is not ONLINE") else: # Do a mod_delete on the value. pass diff --git a/src/lib389/lib389/backend.py b/src/lib389/lib389/backend.py index fbebe3a00..ccf157072 100644 --- a/src/lib389/lib389/backend.py +++ b/src/lib389/lib389/backend.py @@ -11,6 +11,7 @@ from lib389._constants import * from lib389.properties import * from lib389.utils import normalizeDN from lib389 import Entry + # Need to fix this .... from lib389._mapped_object import DSLdapObjects, DSLdapObject @@ -289,7 +290,6 @@ class BackendLegacy(object): % ents[0].dn) # All checks are done, Time to create the backend - import time try: entry = Entry(dn) entry.update({ diff --git a/src/lib389/lib389/dirsrv_log.py b/src/lib389/lib389/dirsrv_log.py index 17d609048..bf7f3013c 100644 --- a/src/lib389/lib389/dirsrv_log.py +++ b/src/lib389/lib389/dirsrv_log.py @@ -18,6 +18,7 @@ from lib389._constants import DN_CONFIG from lib389.properties import LOG_ACCESS_PATH, LOG_ERROR_PATH from lib389.utils import ensure_bytes, ensure_str + # Because many of these settings can change live, we need to check for certain # attributes all the time. @@ -38,18 +39,29 @@ MONTH_LOOKUP = { class DirsrvLog(object): + """Class of functions to working with the various DIrectory Server logs + """ def __init__(self, dirsrv): + """Initial class + @param dirsrv - DirSrv object + """ self.dirsrv = dirsrv self.log = self.dirsrv.log - self.prog_timestamp = re.compile('\[(?P<day>\d*)\/(?P<month>\w*)\/(?P<year>\d*):(?P<hour>\d*):(?P<minute>\d*):(?P<second>\d*)(.(?P<nanosecond>\d*))+\s(?P<tz>[\+\-]\d*)') # noqa + self.prog_timestamp = re.compile('\[(?P<day>\d*)\/(?P<month>\w*)\/(?P<year>\d*):(?P<hour>\d*):(?P<minute>\d*):(?P<second>\d*)(.(?P<nanosecond>\d*))+\s(?P<tz>[\+\-]\d*)') # noqa + self.prog_datetime = re.compile('^(?P<timestamp>\[.*\])') def _get_log_attr(self, attr): + """Get a logging configfurable attribute value + @param attr - a logging configuration attribute + """ return self.dirsrv.getEntry(DN_CONFIG).__getattr__(attr) def _get_log_path(self): + """Return the current log file location""" return self._get_log_attr(self.log_path_attr) def _get_all_log_paths(self): + """Return all the log paths""" return glob("%s.*-*" % self._get_log_path()) + [self._get_log_path()] def readlines_archive(self): @@ -57,6 +69,8 @@ class DirsrvLog(object): Returns an array of all the lines in all logs, included rotated logs and compressed logs. (gzip) Will likely be very slow. Try using match instead. + + @return - an array of all the lines in all logs """ lines = [] for log in self._get_all_log_paths(): @@ -70,9 +84,10 @@ class DirsrvLog(object): return lines def readlines(self): - """ - Returns an array of all the lines in the log. + """Returns an array of all the lines in the log. Will likely be very slow. Try using match instead. + + @return - an array of all the lines in the log. """ lines = [] self.lpath = self._get_log_path() @@ -83,6 +98,10 @@ class DirsrvLog(object): return lines def match_archive(self, pattern): + """Search all the log files, including "zipped" logs + @param pattern - a regex pattern + @return - results of the pattern matching + """ results = [] prog = re.compile(pattern) for log in self._get_all_log_paths(): @@ -101,6 +120,10 @@ class DirsrvLog(object): return results def match(self, pattern): + """Search the current log file for the pattern + @param pattern - a regex pattern + @return - results of the pattern matching + """ results = [] prog = re.compile(pattern) self.lpath = self._get_log_path() @@ -113,6 +136,10 @@ class DirsrvLog(object): return results def parse_timestamp(self, ts): + """Parse a logs timestamps and break it down into its individual parts + @param ts - The timestamp string from a log + @return - a "datetime" object + """ timedata = self.prog_timestamp.match(ts).groupdict() # Now, have to convert month to an int. dt_str = '{YEAR}-{MONTH}-{DAY} {HOUR}-{MINUTE}-{SECOND} {TZ}'.format( @@ -128,19 +155,55 @@ class DirsrvLog(object): dt = dt.replace(microsecond=int(int(timedata['nanosecond']) / 1000)) return dt + def get_time_in_secs(self, log_line): + """Take the timestamp (not the date) from a DS log and convert it + to seconds: + + [25/May/2016:15:24:27.289341875 -0400]... + + @param log_line - A line of txt from a DS error/access log + @return - time in seconds + """ + + total = 0 + index = log_line.index(':') + 1 + hms = log_line[index: index + 8] + parts = hms.split(':') + if int(parts[0]): + total += int(parts[0]) * 3600 + if int(parts[1]): + total += int(parts[1]) * 60 + total += int(parts[2]) + + return total + class DirsrvAccessLog(DirsrvLog): + """Class for process access logs""" def __init__(self, dirsrv): + """Init the class + @param dirsrv - A DirSrv object + """ super(DirsrvAccessLog, self).__init__(dirsrv) self.log_path_attr = LOG_ACCESS_PATH - # We precompile our regex for parse_line to make it faster. - self.prog_m1 = re.compile('^(?P<timestamp>\[.*\])\sconn=(?P<conn>\d*)\sop=(?P<op>\d*)\s(?P<action>\w*)\s(?P<rem>.*)') # noqa - self.prog_con = re.compile('^(?P<timestamp>\[.*\])\sconn=(?P<conn>\d*)\sfd=(?P<fd>\d*)\sslot=(?P<slot>\d*)\sconnection\sfrom\s(?P<remote>[^\s]*)\sto\s(?P<local>[^\s]*)') # noqa - self.prog_discon = re.compile('^(?P<timestamp>\[.*\])\sconn=(?P<conn>\d*)\sop=(?P<op>\d*)\sfd=(?P<fd>\d*)\s(?P<action>closed)\s-\s(?P<status>\w*)') # noqa + ## We precompile our regex for parse_line to make it faster. + self.prog_m1 = re.compile('^(?P<timestamp>\[.*\])\sconn=(?P<conn>\d*)\sop=(?P<op>\d*)\s(?P<action>\w*)\s(?P<rem>.*)') + self.prog_con = re.compile('^(?P<timestamp>\[.*\])\sconn=(?P<conn>\d*)\sfd=(?P<fd>\d*)\sslot=(?P<slot>\d*)\sconnection\sfrom\s(?P<remote>[^\s]*)\sto\s(?P<local>[^\s]*)') + self.prog_discon = re.compile('^(?P<timestamp>\[.*\])\sconn=(?P<conn>\d*)\sop=(?P<op>\d*)\sfd=(?P<fd>\d*)\s(?P<action>closed)\s-\s(?P<status>\w*)') + # RESULT regex's (based off action.rem) + self.prog_notes = re.compile('err=(?P<err>\d*)\stag=(?P<tag>\d*)\snentries=(?P<nentries>\d*)\setime=(?P<etime>\d*)\snotes=(?P<notes>\w*)') + self.prog_repl = re.compile('err=(?P<err>\d*)\stag=(?P<tag>\d*)\snentries=(?P<nentries>\d*)\setime=(?P<etime>\d*)\scsn=(?P<csn>\w*)') + self.prog_result = re.compile('err=(?P<err>\d*)\stag=(?P<tag>\d*)\snentries=(?P<nentries>\d*)\setime=(?P<etime>\d*)\s(?P<rem>.*)') + # Lists for each regex type + self.full_regexs = [self.prog_m1, self.prog_con, self.prog_discon] + self.result_regexs = [self.prog_notes, self.prog_repl, + self.prog_result] def parse_line(self, line): """ This knows how to break up an access log line into the specific fields. + @param line - A text line from an access log + @return - A dictionary of the log parts """ line = line.strip() action = { @@ -149,20 +212,21 @@ class DirsrvAccessLog(DirsrvLog): # First, pull some well known info out. if self.dirsrv.verbose: self.log.info("--> %s " % line) - m1 = self.prog_m1.match(line) - if m1: - action.update(m1.groupdict()) - # Do more parsing. - # Specifically, we need to break up action.rem based on action.action. - con = self.prog_con.match(line) - if con: - action.update(con.groupdict()) + for regex in self.full_regexs: + result = regex.match(line) + if result: + action.update(result.groupdict()) + if regex == self.prog_discon: + action['action'] = 'DISCONNECT' + break - discon = self.prog_discon.match(line) - if discon: - action.update(discon.groupdict()) - action['action'] = 'DISCONNECT' + if action['action'] == 'RESULT': + for regex in self.result_regexs: + result = regex.match(action['rem']) + if result: + action.update(result.groupdict()) + break action['datetime'] = self.parse_timestamp(action['timestamp']) @@ -171,16 +235,28 @@ class DirsrvAccessLog(DirsrvLog): return action def parse_lines(self, lines): + """Parse multiple log lines + @param lines - a list of log lines + @return - A dictionary of the log parts for each line + """ return map(self.parse_line, lines) class DirsrvErrorLog(DirsrvLog): + """Directory Server Error log class""" def __init__(self, dirsrv): + """Init the Error log class + @param diursrv - A DirSrv object + """ super(DirsrvErrorLog, self).__init__(dirsrv) self.log_path_attr = LOG_ERROR_PATH self.prog_m1 = re.compile('^(?P<timestamp>\[.*\])\s(?P<message>.*)') def parse_line(self, line): + """Parse an errors log line + @line - a text string from an errors log + @return - A dictionary of the log parts + """ line = line.strip() action = self.prog_m1.match(line).groupdict() @@ -188,4 +264,8 @@ class DirsrvErrorLog(DirsrvLog): return action def parse_lines(self, lines): + """Parse multiple lines from an errors log + @param lines - a lits of strings/lines from an errors log + @return - A dictionary of the log parts for each line + """ return map(self.parse_line, lines) diff --git a/src/lib389/lib389/properties.py b/src/lib389/lib389/properties.py index 2293b2b7c..ca9d5cd6f 100644 --- a/src/lib389/lib389/properties.py +++ b/src/lib389/lib389/properties.py @@ -165,6 +165,7 @@ REPL_CLEAN_RUV = 'nsds5ReplicaCleanRUV' REPL_ABORT_RUV = 'nsds5ReplicaAbortCleanRUV' REPL_COUNT_COUNT = 'nsds5ReplicaChangeCount' REPL_PRECISE_PURGE = 'nsds5ReplicaPreciseTombstonePurging' +REPL_RELEASE_TIMEOUT = 'nsds5replicaReleaseTimeout' # The values are from the REST API REPLICA_SUFFIX = 'suffix' @@ -190,6 +191,7 @@ REPLICA_CLEAN_RUV = 'ReplicaCleanRUV' REPLICA_ABORT_RUV = 'ReplicaAbortCleanRUV' REPLICA_COUNT_COUNT = 'ReplicaChangeCount' REPLICA_PRECISE_PURGING = 'ReplicaPreciseTombstonePurging' +REPLICA_RELEASE_TIMEOUT = 'ReplicaReleaseTimeout' REPLICA_PROPNAME_TO_ATTRNAME = {REPLICA_SUFFIX: REPL_ROOT, REPLICA_ROOT: REPL_ROOT, @@ -217,7 +219,8 @@ REPLICA_PROPNAME_TO_ATTRNAME = {REPLICA_SUFFIX: REPL_ROOT, REPLICA_CLEAN_RUV: REPL_CLEAN_RUV, REPLICA_ABORT_RUV: REPL_ABORT_RUV, REPLICA_COUNT_COUNT: REPL_COUNT_COUNT, - REPLICA_PRECISE_PURGING: REPL_PRECISE_PURGE} + REPLICA_PRECISE_PURGING: REPL_PRECISE_PURGE, + REPLICA_RELEASE_TIMEOUT: REPL_RELEASE_TIMEOUT} #################################### # diff --git a/src/lib389/lib389/replica.py b/src/lib389/lib389/replica.py index 682acfff5..8ba527ca3 100644 --- a/src/lib389/lib389/replica.py +++ b/src/lib389/lib389/replica.py @@ -10,17 +10,19 @@ import ldap import os import decimal import time -from lib389 import DirSrv, Entry, NoSuchEntryError, InvalidArgumentError from lib389._constants import * -from lib389._replication import RUV from lib389.properties import * from lib389.utils import normalizeDN, escapeDNValue +from lib389._replication import RUV +from lib389.repltools import ReplTools +from lib389 import DirSrv, Entry, NoSuchEntryError, InvalidArgumentError +from lib389._mapped_object import DSLdapObjects, DSLdapObject ROLE_ORDER = {'master': 3, 'hub': 2, 'consumer': 1} ROLE_TO_NAME = {3: 'master', 2: 'hub', 1: 'consumer'} -class Replica(object): +class ReplicaLegacy(object): proxied_methods = 'search_s getEntry'.split() def __init__(self, conn): @@ -29,7 +31,7 @@ class Replica(object): self.log = conn.log def __getattr__(self, name): - if name in Replica.proxied_methods: + if name in ReplicaLegacy.proxied_methods: return DirSrv.__getattr__(self.conn, name) def _get_mt_entry(self, suffix): @@ -265,8 +267,8 @@ class Replica(object): @param rid - number that identify the supplier replica (role=REPLICAROLE_MASTER) in the topology. For hub/consumer (role=REPLICAROLE_HUB or - REPLICAROLE_CONSUMER), rid value is not used. This - parameter is mandatory for supplier. + REPLICAROLE_CONSUMER), rid value is not used. + This parameter is mandatory for supplier. @param args - dictionary of initial replica's properties Supported properties are: @@ -293,13 +295,13 @@ class Replica(object): " (REPLICAROLE_*)") raise InvalidArgumentError("role missing") - if not Replica._valid_role(role): + if not ReplicaLegacy._valid_role(role): self.log.fatal("enableReplication: replica role invalid (%s) " % role) raise ValueError("invalid role: %s" % role) # check the validity of 'rid' - if not Replica._valid_rid(role, rid=rid): + if not ReplicaLegacy._valid_rid(role, rid=rid): self.log.fatal("Replica.create: replica role is master but 'rid'" + " is missing or invalid value") raise InvalidArgumentError("rid missing or invalid value") @@ -334,8 +336,8 @@ class Replica(object): properties[prop] = args[prop] # Now set default values of unset properties - Replica._set_or_default(REPLICA_LEGACY_CONS, properties, 'off') - Replica._set_or_default(REPLICA_BINDDN, properties, + ReplicaLegacy._set_or_default(REPLICA_LEGACY_CONS, properties, 'off') + ReplicaLegacy._set_or_default(REPLICA_BINDDN, properties, [defaultProperties[REPLICATION_BIND_DN]]) if role != REPLICAROLE_CONSUMER: @@ -488,7 +490,7 @@ class Replica(object): # Now check we have a suffix entries_backend = self.conn.backend.list(suffix=suffix) if not entries_backend: - self.log.fatal("enableReplication: enable to retrieve the " + + self.log.fatal("enableReplication: unable to retrieve the " + "backend for %s" % suffix) raise ValueError("no backend for suffix %s" % suffix) @@ -529,9 +531,10 @@ class Replica(object): return ret def check_init(self, agmtdn): - """returns tuple - first element is done/not done, 2nd is no error/has - error - @param agmtdn - the agreement dn + """Check that a total update has completed + @returns tuple - first element is done/not done, 2nd is no error/has + error + @param agmtdn - the agreement dn """ done, hasError = False, 0 attrlist = ['cn', @@ -794,3 +797,646 @@ class Replica(object): str(CONSUMER_REPLICAID))]) except ldap.LDAPError as e: raise ValueError('Failed to update replica: ' + str(e)) + + +class Replica(DSLdapObject): + """Replica object. There is one "replica" per backend + """ + def __init__(self, instance, dn=None, batch=False): + """Init the Replica object + @param instance - a DirSrv object + @param dn - A DN of the replica entry + @param batch - NOT IMPLELMENTED + """ + super(Replica, self).__init__(instance, dn, batch) + self._rdn_attribute = 'cn' + self._must_attributes = ['cn', REPL_LEGACY_CONS, REPL_TYPE, + REPL_ROOT, REPL_BINDDN, REPL_ID] + + self._create_objectclasses = ['top', 'extensibleObject', + REPLICA_OBJECTCLASS_VALUE] + self._protected = False + self._suffix = None + + @staticmethod + def _valid_role(role): + """Return True if role is valid + + @param role - A string containing the "role" name + @return - True if the role is a valid role name, otherwise return False + """ + if role != REPLICAROLE_MASTER and \ + role != REPLICAROLE_HUB and \ + role != REPLICAROLE_CONSUMER: + return False + else: + return True + + @staticmethod + def _valid_rid(role, rid=None): + """ Return True if rid is valid for the replica role + @param role - A string containing the role name + @param rid - Only needed if the role is a "master" + @return - True is rid is valid, otherwise return False + """ + if role == REPLICAROLE_MASTER: + if not decimal.Decimal(rid) or \ + (rid <= 0) or \ + (rid >= CONSUMER_REPLICAID): + return False + else: + if rid and (rid != CONSUMER_REPLICAID): + return False + return True + + def delete(self): + ''' + Delete a replica related to the provided suffix. + If this replica role was REPLICAROLE_HUB or REPLICAROLE_MASTER, it + also deletes the changelog associated to that replica. If it + exists some replication agreement below that replica, they are + deleted. + + @return None + @raise InvalidArgumentError - if suffix is missing + ldap.LDAPError - for all other update failures + + ''' + + # Get the suffix + suffix = self.get_attr_val(REPL_ROOT) + if not suffix: + self.log.fatal("disableReplication: suffix is not defined") + raise InvalidArgumentError("suffix missing") + + # Delete the agreements + try: + self.deleteAgreements(suffix) + except ldap.LDAPError as e: + self.log.fatal('Failed to delete replica agreements!') + raise e + + # Delete the replica + try: + super(Replica, self).delete() + except ldap.LDAPError as e: + self.log.fatal('Failed to delete replica configuration ' + + '(%s), error: %s' % (dn_replica, str(e))) + raise e + + def deleteAgreements(self): + ''' + Delete all the agreements for the suffix + @raise LDAPError - If failing to delete or search for agreements + ''' + + # Delete the agreements + try: + suffix = self.get_attr_val(REPL_ROOT) + agmts = self._instance.agreement.list(suffix=suffix) + for agmt in agmts: + try: + self._instance.delete_s(agmt.dn) + except ldap.LDAPError as e: + self.log.fatal('Failed to delete replica agreement (%s),' + + ' error: %s' % + (admt.dn, str(e))) + raise e + except ldap.LDAPError as e: + self.log.fatal('Failed to search for replication agreements ' + + 'under (%s), error: %s' % (self._dn, str(e))) + raise e + + def promote(self, newrole, binddn=None, rid=None): + """ + Promote the replica + + @param newrole - The new replication role for the replica: + REPLICAROLE_MASTER + REPLICAROLE_HUB + REPLICAROLE_CONSUMER + @param binddn - The replication bind dn - only applied to master + @param rid - The replication ID, applies only to promotions to "master" + + @raise ldap.NO_SUCH_OBJECT - If suffix is not replicated + + @raise ValueError + """ + + if newrole != REPLICAROLE_MASTER and newrole != REPLICAROLE_HUB: + raise ValueError('Can only prompt replica to "master" or "hub"') + + if not binddn: + raise ValueError('"binddn" required for promotion') + + if newrole == REPLICAROLE_MASTER: + if not rid: + raise ValueError('"rid" required for promotion') + else: + # Must be a hub - set the rid + rid = CONSUMER_REPLICAID + + # + # Check the replica role and flags + # + repltype = self.get_attr_val(REPL_TYPE) + replflags = self.get_attr_val(REPL_FLAGS) + + if repltype == REPLICA_TYPE_MASTER and \ + replflags == REPLICA_FLAGS_WRITE: + replicarole = 3 + elif (repltype == REPLICA_TYPE_HUBCON and + replflags == REPLICA_TYPE_MASTER): + replicarole = 2 + else: + replicarole = 1 + + if ROLE_ORDER[newrole] < replicarole: + raise ValueError('Can not promote replica to lower role:' + + ' %s -> %s' % (ROLE_TO_NAME[replicarole], + newrole)) + + # + # Create the changelog + # + try: + self._instance.changelog.create() + except ldap.LDAPError as e: + raise ValueError('Failed to create changelog: %s' % str(e)) + + # + # Check that a RID was provided, and its a valid number + # + if newrole == REPLICAROLE_MASTER: + try: + rid = int(rid) + except: + # Not a number + raise ValueError('"rid" value (%s) is not a number' % str(rid)) + + if rid < 1 and rid > 65534: + raise ValueError('"rid" value (%d) is not in range ' + + ' 1 - 65534' % rid) + + # + # Set bind dn + # + try: + self.set(REPL_BINDDN, binddn) + except ldap.LDAPError as e: + raise ValueError('Failed to update replica: ' + str(e)) + + # + # Set the replica type and flags + # + if newrole == REPLICAROLE_HUB: + try: + self.apply_mods([(REPL_TYPE, '2'), (REPL_FLAGS, '1')]) + except ldap.LDAPError as e: + raise ValueError('Failed to update replica: ' + str(e)) + else: # master + try: + self.apply_mods([(REPL_TYPE, '3'), (REPL_FLAGS, '1'), + (REPL_ID, str(rid))]) + except ldap.LDAPError as e: + raise ValueError('Failed to update replica: ' + str(e)) + + def demote(self, newrole): + """ + Demote a replica to a hub or consumer + @param suffix - The replication suffix + @param newrole - The new replication role of this replica + REPLICAROLE_HUB + REPLICAROLE_CONSUMER + @raise ValueError + """ + if newrole != REPLICAROLE_CONSUMER and newrole != REPLICAROLE_HUB: + raise ValueError('Can only demote replica to "hub" or "consumer"') + + # + # Check the role type + # + repltype = self.get_attr_val(REPL_TYPE) + replflags = self.get_attr_val(REPL_FLAGS) + + if repltype == REPLICA_TYPE_MASTER and \ + replflags == REPLICA_FLAGS_WRITE: + replicarole = 3 + elif (repltype == REPLICA_TYPE_HUBCON and + replflags == REPLICA_FLAGS_WRITE): + replicarole = 2 + else: + replicarole = 1 + + if ROLE_ORDER[newrole] > replicarole: + raise ValueError('Can not demote replica to lower role:' + + ' %s -> %s' % (ROLE_TO_NAME[replicarole], + newrole)) + + # + # Demote it - set the replica type and flags + # + if newrole == 'hub': + flag = '1' + else: + flag = '0' + try: + self.apply_mods([(REPL_TYPE, '2'), (REPL_FLAGS, flag), + (REPL_ID, str(CONSUMER_REPLICAID))]) + except ldap.LDAPError as e: + raise ValueError('Failed to update replica: ' + str(e)) + + def get_role(self): + """Return the replica role: + + @return: "master", "hub", or "consumer" + """ + repltype = self.get_attr_val(REPL_TYPE) + replflags = self.get_attr_val(REPL_FLAGS) + + if repltype == REPLICA_TYPE_MASTER and \ + replflags == REPLICA_FLAGS_WRITE: + replicarole = 3 + elif (repltype == REPLICA_TYPE_HUBCON and + replflags == REPLICA_TYPE_MASTER): + replicarole = 2 + else: + replicarole = 1 + + return ROLE_TO_NAME[replicarole] + + def check_init(self, agmtdn): + """Check that a total update has completed + @returns tuple - first element is done/not done, 2nd is no error/has + error + @param agmtdn - the agreement dn + + THIS SHOULD BE IN THE NEW AGREEMENT CLASS + """ + done, hasError = False, 0 + attrlist = ['cn', + 'nsds5BeginReplicaRefresh', + 'nsds5replicaUpdateInProgress', + 'nsds5ReplicaLastInitStatus', + 'nsds5ReplicaLastInitStart', + 'nsds5ReplicaLastInitEnd'] + try: + entry = self._instance.getEntry( + agmtdn, ldap.SCOPE_BASE, "(objectclass=*)", attrlist) + except NoSuchEntryError: + self.log.exception("Error reading status from agreement %r" % + agmtdn) + hasError = 1 + else: + refresh = entry.nsds5BeginReplicaRefresh + inprogress = entry.nsds5replicaUpdateInProgress + status = entry.nsds5ReplicaLastInitStatus + if not refresh: # done - check status + if not status: + print("No status yet") + elif status.find("replica busy") > -1: + print("Update failed - replica busy - status", status) + done = True + hasError = 2 + elif status.find("Total update succeeded") > -1: + print("Update succeeded: status ", status) + done = True + elif inprogress.lower() == 'true': + print("Update in progress yet not in progress: status ", + status) + else: + print("Update failed: status", status) + hasError = 1 + done = True + elif self.verbose: + print("Update in progress: status", status) + + return done, hasError + + def wait_init(self, agmtdn): + """Initialize replication and wait for completion. + @oaram agmtdn - agreement dn + @return - 0 if the initialization is complete + + THIS SHOULD BE IN THE NEW AGREEMENT CLASS + """ + done = False + haserror = 0 + while not done and not haserror: + time.sleep(1) # give it a few seconds to get going + done, haserror = self.check_init(agmtdn) + return haserror + + def start_and_wait(self, agmtdn): + """Initialize an agreement and wait for it to complete + + @param agmtdn - agreement dn + @return - 0 if successful + THIS SHOULD BE IN THE NEW AGREEMENT CLASS + """ + rc = self.start_async(agmtdn) + if not rc: + rc = self.wait_init(agmtdn) + if rc == 2: # replica busy - retry + rc = self.start_and_wait(agmtdn) + return rc + + def start_async(self, agmtdn): + """Initialize replication without waiting. + @param agmtdn - agreement dn + + THIS SHOULD BE IN THE NEW AGREEMENT CLASS + """ + self.log.info("Starting async replication %s" % agmtdn) + mod = [(ldap.MOD_ADD, 'nsds5BeginReplicaRefresh', 'start')] + self._instance.modify_s(agmtdn, mod) + + def get_ruv_entry(self): + """Return the database RUV entry + @return - The database RUV entry + @raise ValeuError - If suffix is not setup for replication + LDAPError - If there is a problem trying to search for the RUV + """ + try: + entry = self._instance.search_s(self._suffix, + ldap.SCOPE_SUBTREE, + REPLICA_RUV_FILTER) + if entry: + return entry[0] + else: + raise ValueError('Suffix (%s) is not setup for replication' % + suffix) + except ldap.LDAPError as e: + raise e + + def test(self, *replica_dirsrvs): + '''Make a "dummy" update on the the replicated suffix, and check + all the provided replicas to see if they received the update. + + @param *replica_dirsrvs - DirSrv instance, DirSrv instance, ... + @return True - if all servers have recevioed the update by this + replica, otherwise return False + @raise LDAPError - when failing to update/search database + ''' + + # Generate a unique test value + test_value = ('test replication from ' + self._instance.serverid + + ' to ' + replica_dirsrvs[0].serverid + ': ' + + str(int(time.time()))) + self._instance.modify_s(self._suffix, + [(ldap.MOD_REPLACE, 'description', test_value)]) + + for replica in replica_dirsrvs: + loop = 0 + replicated = False + while loop <= 30: + # Wait 60 seconds before giving up + try: + + entry = replica.getEntry(self._suffix, + ldap.SCOPE_BASE, + '(objectclass=*)') + if entry.hasValue('description', test_value): + replicated = True + break + except ldap.LDAPError as e: + raise e + loop += 1 + time.sleep(2) # Check the replica every 2 seconds + if not replicated: + self._log.error('Replication is not in sync with replica ' + + 'server (%s)' % replica.serverid) + return False + + # All is good, remove the test mod from the suffix entry + self._instance.modify_s(self._suffix, + [(ldap.MOD_DELETE, 'description', test_value)]) + + return True + + +class Replicas(DSLdapObjects): + """Class of all the Replicas""" + def __init__(self, instance, batch=False): + """Init Replicas + @param instance - a DirSrv objectc + @param batch - NOT IMPLELMENTED + """ + super(Replicas, self).__init__(instance=instance, batch=False) + self._objectclasses = [REPLICA_OBJECTCLASS_VALUE] + self._filterattrs = [REPL_ROOT] + self._childobject = Replica + self._basedn = 'cn=mapping tree,cn=config' + + def get(self, selector=[], dn=None): + """Wrap Replicas' "get", and set the suffix after we get the replica + """ + replica = super(Replicas, self).get(selector, dn) + if replica: + # Get and set the replica's suffix + replica._suffix = replica.get_attr_val(REPL_ROOT) + return replica + + def enable(self, suffix, role, replicaID=None, args=None): + """Enable replication for this suffix + + @param suffix - The suffix to enable replication for + @param role - REPLICAROLE_MASTER, REPLICAROLE_HUB or + REPLICAROLE_CONSUMER + @param rid - number that identify the supplier replica + (role=REPLICAROLE_MASTER) in the topology. For + hub/consumer (role=REPLICAROLE_HUB or + REPLICAROLE_CONSUMER), rid value is not used. This + parameter is mandatory for supplier. + + @param args - dictionary of additional replica properties + + @return replica DN + + @raise InvalidArgumentError - if missing mandatory arguments + ValueError - argument with invalid value + LDAPError - failed to add replica entry + + """ + # Normalize the suffix + suffix = normalizeDN(suffix) + + # Check validity of role + if not role: + self._log.fatal("Replica.create: replica role not specify" + + " (REPLICAROLE_*)") + raise InvalidArgumentError("role missing") + + if not Replica._valid_role(role): + self._log.fatal("enableReplication: replica role invalid (%s) " % + role) + raise ValueError("invalid role: %s" % role) + + # role is fine, set the replica type + if role == REPLICAROLE_MASTER: + rtype = REPLICA_RDWR_TYPE + # check the validity of 'rid' + if not Replica._valid_rid(role, rid=replicaID): + self._log.fatal("Replica.create: replica role is master but " + + "'rid' is missing or invalid value") + raise InvalidArgumentError("rid missing or invalid value") + else: + rtype = REPLICA_RDONLY_TYPE + + # Set the properties provided as mandatory parameter + properties = {'cn': RDN_REPLICA, + REPL_ROOT: suffix, + REPL_ID: str(replicaID), + REPL_TYPE: str(rtype)} + + # If the properties in args are valid add them to 'properties' + if args: + for prop in args: + if not inProperties(prop, REPLICA_PROPNAME_TO_ATTRNAME): + raise ValueError("unknown property: %s" % prop) + properties[prop] = args[prop] + + # Now set default values of unset properties + if REPLICA_LEGACY_CONS not in properties: + properties[REPL_LEGACY_CONS] = 'off' + if role != REPLICAROLE_CONSUMER: + properties[REPL_FLAGS] = "1" + + # + # Check if replica entry is already in the mapping-tree + # + try: + replica = self.get(suffix) + # Should we return an error, or just return the existing relica? + self._log.warn("Already setup replica for suffix %s" % suffix) + return replica + except: + pass + + # + # Create changelog + # + if (role == REPLICAROLE_MASTER) or (role == REPLICAROLE_HUB): + self._instance.changelog.create() + + # Create the default replica manager entry if it does not exist + if REPL_BINDDN not in properties: + properties[REPL_BINDDN] = defaultProperties[REPLICATION_BIND_DN] + if REPLICATION_BIND_PW not in properties: + repl_pw = defaultProperties[REPLICATION_BIND_PW] + else: + repl_pw = properties[REPLICATION_BIND_PW] + # Remove this property so we don't add it to the replica entry + del properties[REPLICATION_BIND_PW] + + ReplTools.createReplManager(self._instance, + repl_manager_dn=properties[REPL_BINDDN], + repl_manager_pw=repl_pw) + + # + # Now create the replica entry + # + mtents = self._instance.mappingtree.list(suffix=suffix) + suffix_dn = mtents[0].dn + replica = self.create(RDN_REPLICA, properties) + replica._suffix = suffix + + return replica + + def delete(self, suffix): + """Disable replication on the suffix specified + + @param suffix - Replicated suffix to disable + @raise ValueError is suffix is not being replicated + """ + try: + replica = self.get(suffix) + except ldap.NO_SUCH_OBJECT: + raise ValueError('Suffix (%s) is not setup for replication' % + suffix) + try: + replica.delete() + except ldap.LDAPError as e: + raise ValueError('Failed to disable replication for suffix ' + + '(%s) LDAP error (%s)' % (suffix, str(e))) + + def promote(self, suffix, newrole, binddn=None, rid=None): + """Promote the replica speficied by the suffix to the new role + + @param suffix - The replication suffix + @param newrole - The new replication role for the replica: + REPLICAROLE_MASTER + REPLICAROLE_HUB + + @param binddn - The replication bind dn - only applied to master + @param rid - The replication ID - applies only promotions to "master" + + @raise ldap.NO_SUCH_OBJECT - If suffix is not replicated + """ + replica = self.get(suffix) + try: + replica = self.get(suffix) + except ldap.NO_SUCH_OBJECT: + raise ValueError('Suffix (%s) is not setup for replication' % + suffix) + replica.promote(newrole, binddn, rid) + + def demote(self, suffix, newrole): + """Promote the replica speficied by the suffix to the new role + @param suffix - The replication suffix + @param newrole - The new replication role of this replica + REPLICAROLE_HUB + REPLICAROLE_CONSUMER + @raise ldap.NO_SUCH_OBJECT - If suffix is not replicated + """ + replica = self.get(suffix) + try: + replica = self.get(suffix) + except ldap.NO_SUCH_OBJECT: + raise ValueError('Suffix (%s) is not setup for replication' % + suffix) + replica.demote(newrole) + + def get_dn(self, suffix): + """Return the DN of the replica from cn=config, this is also + known as the mapping tree entry + + @param suffix - the replication suffix to get the mapping tree DN + @return - The DN of the replication entry from cn=config + """ + try: + replica = self.get(suffix) + except ldap.NO_SUCH_OBJECT: + raise ValueError('Suffix (%s) is not setup for replication' % + suffix) + return replica._dn + + def get_ruv_entry(self, suffix): + """Return the database RUV entry for the provided suffix + @return - The database RUV entry + @raise ValeuError - If suffix is not setup for replication + LDAPError - If there is a problem trying to search for the RUV + """ + try: + replica = self.get(suffix) + except ldap.NO_SUCH_OBJECT: + raise ValueError('Suffix (%s) is not setup for replication' % + suffix) + return replica.get_ruv_entry() + + def test(self, suffix, *replica_dirsrvs): + """Make a "dummy" update on the the replicated suffix, and check + all the provided replicas to see if they received the update. + + @param suffix - the replicated suffix we want to check + @param *replica_dirsrvs - DirSrv instance, DirSrv instance, ... + @return True - if all servers have recevioed the update by this + replica, otherwise return False + @raise LDAPError - when failing to update/search database + """ + try: + replica = self.get(suffix) + except ldap.NO_SUCH_OBJECT: + raise ValueError('Suffix (%s) is not setup for replication' % + suffix) + return replica.test(*replica_dirsrvs) diff --git a/src/lib389/lib389/repltools.py b/src/lib389/lib389/repltools.py new file mode 100644 index 000000000..b9d1e99b4 --- /dev/null +++ b/src/lib389/lib389/repltools.py @@ -0,0 +1,256 @@ +import os +import os.path +import re +import subprocess +import ldap +from lib389._constants import * +from lib389.properties import * + + +# Helper functions +def _alphanum_key(s): + """Turn the string into a list of string and number parts. + """ + return [int(c) if c.isdigit() else c for c in + re.split('([0-9]+)', s)] + + +def smart_sort(str_list): + """Sort the given list in the way that humans expect. + @param str_list - a list of strings to sort + """ + str_list.sort(key=_alphanum_key) + + +def _getCSNTime(inst, csn): + """Take a CSN and get the access log timestamp in seconds + + @param inst - A DirSrv object to check access log + @param csn - A "csn" string that is used to find when the csn was logged in + the access log, and what time in seconds it was logged. + @return - The time is seconds that the operation was logged. + """ + + op_line = inst.ds_access_log.match('.*csn=%s' % csn) + if op_line: + #vals = inst.ds_access_log.parse_line(op_line[0]) + return inst.ds_access_log.get_time_in_secs(op_line[0]) + else: + return None + + +def _getCSNandTime(inst, line): + """Take the line and find the CSN from the inst's access logs + + @param inst - A DirSrv object to check access log + @param line - A "RESULT" line from the access log that contains a "csn" + @return - a tuple containing the "csn" value and the time in seconds when + it was logged. + """ + + op_line = inst.ds_access_log.match('.*%s.*' % line) + if op_line: + vals = inst.ds_access_log.parse_line(op_line[0]) + op = vals['op'] + conn = vals['conn'] + + # Now find the result line and CSN + result_line = inst.ds_access_log.match_archive( + '.*conn=%s op=%s RESULT.*' % (conn, op)) + + if result_line: + vals = inst.ds_access_log.parse_line(result_line[0]) + if 'csn' in vals: + ts = inst.ds_access_log.get_time_in_secs(result_line[0]) + return (vals['csn'], ts) + + return (None, None) + + +class ReplTools(object): + """Replication tools + """ + + @staticmethod + def checkCSNs(dirsrv_replicas): + """Gather all the CSN strings from the access and verify all of those + CSNs exist on all the other replicas. + + @param dirsrv_replicas - a list of DirSrv objects. The list must begin + with master replicas + + @return - True if all the CSNs are present, otherwise False + """ + + csn_logs = [] + csn_log_count = 0 + + for replica in dirsrv_replicas: + logdir = '%s*' % replica.ds_access_log._get_log_path() + outfile = '/tmp/csn' + str(csn_log_count) + csn_logs.append(outfile) + csn_log_count += 1 + cmd = ("grep csn= " + logdir + + " | awk '{print $10}' | sort -u > " + outfile) + os.system(cmd) + + # Set a side the first master log - we use this for our "diffing" + main_log = csn_logs[0] + csn_logs.pop(0) + + # Now process the remaining csn logs + for csnlog in csn_logs: + cmd = 'diff %s %s' % (main_log, csnlog) + proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True) + line = proc.stdout.readline() + if line != "" and line != "\n": + if not line.startswith("\\"): + log.fatal("We have a CSN mismatch between (%s vs %s): %s" % + (main_log, csn_log, line)) + return False + + return True + + @staticmethod + def replConvReport(suffix, ops, replica, all_replicas): + """Find and measure the convergence of entries from a replica, and + print a report on how fast all the "ops" replicated to the other + replicas. + + @param suffix - Replicated suffix + @param ops - a list of "operations" to search for in the access logs + @param replica - Dirsrv object where the entries originated + @param all_replicas - A list of Dirsrv replicas: + (suppliers, hubs, consumers) + @return - None + """ + + print('Convergence Report for replica: %s (%s)' % + (replica.serverid, suffix)) + print('-' * 80) + + # Loop through each operation checking all the access logs + for op in ops: + csnstr, csntime = _getCSNandTime(replica, op) + if csnstr is None and csntime is None: + # Didn't find a csn, move on + continue + + conv_time = [] + longest_time = 0 + for inst in all_replicas: + replObj = inst.replicas.get(suffix) + if replObj is None: + inst.log.warn('(%s) not setup for replication of (%s)' % + (inst.serverid, suffix)) + continue + ctime = _getCSNTime(inst, csnstr) + if ctime: + role = replObj.get_role() + if role == REPLICAROLE_MASTER: + txt = ' Master (%s)' % (inst.serverid) + elif role == REPLICAROLE_HUB: + txt = ' Hub (%s)' % (inst.serverid) + elif role == REPLICAROLE_CONSUMER: + txt = ' Consumer (%s)' % (inst.serverid) + else: + txt = '?' + ctime = ctime - csntime + conv_time.append(str(ctime) + txt) + if ctime > longest_time: + longest_time = ctime + + smart_sort(conv_time) + print('\n Operation: %s\n %s' % (op, '-' * 40)) + print('\n Convergence times:') + for line in conv_time: + parts = line.split(' ', 1) + print(' %8s secs - %s' % (parts[0], parts[1])) + print('\n Longest Convergence Time: ' + + str(longest_time)) + + print('\nEnd of Convergence Report for: %s\n' % (replica.serverid)) + + @staticmethod + def replIdle(replicas, suffix=DEFAULT_SUFFIX): + """Take a list of DirSrv Objects and check to see if all of the present + replication agreements are idle for a particular backend + + @param replicas - List of DirSrv objects that are using replication + @param suffix - The replicated suffix + @raise LDAPError - if unable to search for the replication agreements + @return - True if all the agreements are idle, otherwise False + """ + + IDLE_MSG = ('0 Replica acquired successfully: Incremental ' + + 'update succeeded') + STATUS_ATTR = 'nsds5replicaLastUpdateStatus' + FILTER = ('(&(nsDS5ReplicaRoot=' + suffix + + ')(objectclass=nsds5replicationAgreement))') + repl_idle = True + + for inst in replicas: + try: + entries = inst.search_s("cn=config", + ldap.SCOPE_SUBTREE, FILTER, [STATUS_ATTR]) + if entries: + for entry in entries: + if entry.getValue(STATUS_ATTR) != IDLE_MSG: + repl_idle = False + break + + if not repl_idle: + break + + except ldap.LDAPError as e: + log.fatal('Failed to search the repl agmts on ' + + '%s - Error: %s' % (inst.serverid, str(e))) + assert False + return repl_idle + + @staticmethod + def createReplManager(server, repl_manager_dn=None, repl_manager_pw=None): + '''Create an entry that will be used to bind as replication manager. + + @param server - A DirSrv object to connect to + @param repl_manager_dn - DN of the bind entry. If not provided use + the default one + @param repl_manager_pw - Password of the entry. If not provide use + the default one + @return None + @raise KeyError - if can not find valid values of Bind DN and Pwd + LDAPError - if we fail to add the replication manager + ''' + + # check the DN and PW + try: + repl_manager_dn = repl_manager_dn or \ + defaultProperties[REPLICATION_BIND_DN] + repl_manager_pw = repl_manager_pw or \ + defaultProperties[REPLICATION_BIND_PW] + if not repl_manager_dn or not repl_manager_pw: + raise KeyError + except KeyError: + if not repl_manager_pw: + server.log.warning("replica_createReplMgr: bind DN password " + + "not specified") + if not repl_manager_dn: + server.log.warning("replica_createReplMgr: bind DN not " + + "specified") + raise + + # If the replication manager entry already exists, just return + try: + entries = server.search_s(repl_manager_dn, ldap.SCOPE_BASE, + "objectclass=*") + if entries: + # it already exist, fine + return + except ldap.NO_SUCH_OBJECT: + pass + + # ok it does not exist, create it + attrs = {'nsIdleTimeout': '0', + 'passwordExpirationTime': '20381010000000Z'} + server.setupBindDN(repl_manager_dn, repl_manager_pw, attrs) + diff --git a/src/lib389/lib389/tests/backend_test.py b/src/lib389/lib389/tests/backend_test.py index 841f2268f..570a15234 100644 --- a/src/lib389/lib389/tests/backend_test.py +++ b/src/lib389/lib389/tests/backend_test.py @@ -81,6 +81,7 @@ def test_list(topology): BACKEND_NAME: NEW_BACKEND_1, 'suffix': NEW_SUFFIX_1, }) + ents = topology.standalone.backends.list() for ent in ents: topology.standalone.log.info("List(%d): backend %s" % @@ -93,6 +94,7 @@ def test_list(topology): BACKEND_NAME: NEW_BACKEND_2, 'suffix': NEW_SUFFIX_2, }) + ents = topology.standalone.backends.list() for ent in ents: topology.standalone.log.info("List(%d): backend %s" % diff --git a/src/lib389/lib389/tools.py b/src/lib389/lib389/tools.py index c8d29ca6f..34c01877e 100644 --- a/src/lib389/lib389/tools.py +++ b/src/lib389/lib389/tools.py @@ -1518,13 +1518,16 @@ class SetupDs(object): # Change the root password finally # Complete. - ds_instance.config.set('nsslapd-rootpw', ensure_str(slapd['root_password'])) + ds_instance.config.set('nsslapd-rootpw', + ensure_str(slapd['root_password'])) def _remove_ds(self): """ The opposite of install: Removes an instance from the system. This takes a backup of all relevant data, and removes the paths. """ - # This probably actually would need to be able to read the ldif, to know what to remove ... - for path in ('backup_dir', 'cert_dir', 'config_dir', 'db_dir', 'ldif_dir', 'lock_dir', 'log_dir', 'run_dir'): + # This probably actually would need to be able to read the ldif, to + # know what to remove ... + for path in ('backup_dir', 'cert_dir', 'config_dir', 'db_dir', + 'ldif_dir', 'lock_dir', 'log_dir', 'run_dir'): print(path)
0
9f835473fe0893bc202d49a1069c0b7be5c0cab0
389ds/389-ds-base
Resolves: bug 288291 Bug Description: add an view object inside a view object that has an improper nsviewfilter crashes the server Reviewed by: nhosoi (Thanks!) Fix Description: I could not reproduce the problem by simply adding the bogus nsviewfilter. The server seemed to run fine, but I didn't stress it. However, if I restarted the server, the server would core during startup. The last message in the error log would say something about recovering the database, which is probably why the bug reporter said that it will not recover the database. The problem doesn't appear to be with views specifically, but with any internal search which uses the search_internal_callback_pb() (as opposed to the non callback internal search) and there are search base rewriters (such as the views code). The aci code uses this type of search at startup to find the acis, and that's where I saw the crash. I could crash the server at startup regardless of whether the view filter was bogus or not. The problem is that we are not passing in the address of new_base to slapi_ch_free. The fix is to use slapi_ch_free_string and pass in the address of the string. That fixes the crash. I also cleaned up a few places in the views code which was not checking to see if slapi_str2filter returned NULL, which would happen in the case of the bogus search filter. I also added an error message which will tell the user that filter X in entry Y is bogus. Platforms tested: RHEL5 x86_64 Flag Day: no Doc impact: no
commit 9f835473fe0893bc202d49a1069c0b7be5c0cab0 Author: Rich Megginson <[email protected]> Date: Fri Oct 12 16:53:03 2007 +0000 Resolves: bug 288291 Bug Description: add an view object inside a view object that has an improper nsviewfilter crashes the server Reviewed by: nhosoi (Thanks!) Fix Description: I could not reproduce the problem by simply adding the bogus nsviewfilter. The server seemed to run fine, but I didn't stress it. However, if I restarted the server, the server would core during startup. The last message in the error log would say something about recovering the database, which is probably why the bug reporter said that it will not recover the database. The problem doesn't appear to be with views specifically, but with any internal search which uses the search_internal_callback_pb() (as opposed to the non callback internal search) and there are search base rewriters (such as the views code). The aci code uses this type of search at startup to find the acis, and that's where I saw the crash. I could crash the server at startup regardless of whether the view filter was bogus or not. The problem is that we are not passing in the address of new_base to slapi_ch_free. The fix is to use slapi_ch_free_string and pass in the address of the string. That fixes the crash. I also cleaned up a few places in the views code which was not checking to see if slapi_str2filter returned NULL, which would happen in the case of the bogus search filter. I also added an error message which will tell the user that filter X in entry Y is bogus. Platforms tested: RHEL5 x86_64 Flag Day: no Doc impact: no diff --git a/ldap/servers/plugins/views/views.c b/ldap/servers/plugins/views/views.c index 3a6d83bde..243ac9625 100644 --- a/ldap/servers/plugins/views/views.c +++ b/ldap/servers/plugins/views/views.c @@ -764,6 +764,12 @@ static void views_cache_create_applied_filter(viewEntry *pView) buf = slapi_ch_strdup(current->viewfilter); pCurrentFilter = slapi_str2filter( buf ); + if (!pCurrentFilter) { + char ebuf[BUFSIZ]; + slapi_log_error(SLAPI_LOG_FATAL, VIEWS_PLUGIN_SUBSYSTEM, + "Error: the view filter [%s] in entry [%s] is not valid\n", + buf, escape_string(current->pDn, ebuf)); + } if(pBuiltFilter && pCurrentFilter) pBuiltFilter = slapi_filter_join_ex( LDAP_FILTER_AND, pBuiltFilter, pCurrentFilter, 0 ); else @@ -935,7 +941,13 @@ Slapi_Filter *views_cache_create_descendent_filter(viewEntry *ancestor, PRBool u if(buf) { pCurrentFilter = slapi_str2filter( buf ); - if(pOrSubFilter) + if (!pCurrentFilter) { + char ebuf[BUFSIZ]; + slapi_log_error(SLAPI_LOG_FATAL, VIEWS_PLUGIN_SUBSYSTEM, + "Error: the view filter [%s] in entry [%s] is not valid\n", + buf, escape_string(currentChild->pDn, ebuf)); + } + if(pOrSubFilter && pCurrentFilter) pOrSubFilter = slapi_filter_join_ex( LDAP_FILTER_OR, pOrSubFilter, pCurrentFilter, 0 ); else pOrSubFilter = pCurrentFilter; @@ -994,8 +1006,14 @@ static void views_cache_create_inclusion_filter(viewEntry *pView) buf = slapi_ch_calloc(1, strlen(viewRDNstr) + 11 ); /* 3 for filter */ sprintf(buf, "(%s)", viewRDNstr ); viewSubFilter = slapi_str2filter( buf ); - - if(pView->includeChildViewsFilter) + if (!viewSubFilter) { + char ebuf[BUFSIZ]; + slapi_log_error(SLAPI_LOG_FATAL, VIEWS_PLUGIN_SUBSYSTEM, + "Error: the view filter [%s] in entry [%s] is not valid\n", + buf, escape_string(current->pDn, ebuf)); + } + + if(pView->includeChildViewsFilter && viewSubFilter) pView->includeChildViewsFilter = slapi_filter_join_ex( LDAP_FILTER_OR, pView->includeChildViewsFilter, viewSubFilter, 0 ); else pView->includeChildViewsFilter = viewSubFilter; diff --git a/ldap/servers/slapd/plugin_internal_op.c b/ldap/servers/slapd/plugin_internal_op.c index 35021e8b6..e936c1a1d 100644 --- a/ldap/servers/slapd/plugin_internal_op.c +++ b/ldap/servers/slapd/plugin_internal_op.c @@ -768,7 +768,7 @@ done: } if(original_base != new_base) - slapi_ch_free((void**)new_base); + slapi_ch_free_string(&new_base); /* we strdup'd this above - need to free */ slapi_pblock_get(pb, SLAPI_ORIGINAL_TARGET_DN, &original_base);
0
94f9ceffbc0dba95763b9a27b71955323c58384f
389ds/389-ds-base
Ticket #355 - winsync should not delete entry that appears to be out of scope https://fedorahosted.org/389/ticket/355 Resolves: Ticket #355 Bug Description: winsync should not delete entry that appears to be out of scope Reviewed by: nhosoi (Thanks!) Branch: master Fix Description: There is a new winsync config attribute - winSyncMoveAction - this is the action to take on the DS side when the winsync finds an AD entry that has the same name/uid as a DS entry but the AD entry is out of the scope of the sync agreement (winsync has to search out of scope/subtree on AD to support deleted and moved entries). In earlier versions of DS, these entries were ignored. When DS was changed to support entry move/subtree rename, the winsync code was changed to delete entries moved out of scope. The new winSyncMoveAction has 3 values: none - ignore moved entries (like older versions of DS) delete - delete DS entries when the AD entry moves out of scope - like current versions of DS unsync - new behavior - if the DS entry is currently synced with the AD entry this will cause the DS entry to be "unlinked" from the AD entry so that they will no longer be in sync The default value is "none" because we should not unexpectedly delete DS entries (principle of least astonishment). Another problem with winsync is that it allowed you to change the subtree and domain in the middle of a sync update - this can lead to a great deal of confusion if suddenly many entries are out of scope. The fix is to "save" the changes in the entry, and apply those changes when the update is complete. Platforms tested: RHEL6 x86_64 Flag Day: yes - new attribute, schema Doc impact: yes - new attribute, schema (cherry picked from commit 3206571b8ac8308482c20c3866f407079479b8e6)
commit 94f9ceffbc0dba95763b9a27b71955323c58384f Author: Rich Megginson <[email protected]> Date: Wed Aug 22 20:52:24 2012 -0600 Ticket #355 - winsync should not delete entry that appears to be out of scope https://fedorahosted.org/389/ticket/355 Resolves: Ticket #355 Bug Description: winsync should not delete entry that appears to be out of scope Reviewed by: nhosoi (Thanks!) Branch: master Fix Description: There is a new winsync config attribute - winSyncMoveAction - this is the action to take on the DS side when the winsync finds an AD entry that has the same name/uid as a DS entry but the AD entry is out of the scope of the sync agreement (winsync has to search out of scope/subtree on AD to support deleted and moved entries). In earlier versions of DS, these entries were ignored. When DS was changed to support entry move/subtree rename, the winsync code was changed to delete entries moved out of scope. The new winSyncMoveAction has 3 values: none - ignore moved entries (like older versions of DS) delete - delete DS entries when the AD entry moves out of scope - like current versions of DS unsync - new behavior - if the DS entry is currently synced with the AD entry this will cause the DS entry to be "unlinked" from the AD entry so that they will no longer be in sync The default value is "none" because we should not unexpectedly delete DS entries (principle of least astonishment). Another problem with winsync is that it allowed you to change the subtree and domain in the middle of a sync update - this can lead to a great deal of confusion if suddenly many entries are out of scope. The fix is to "save" the changes in the entry, and apply those changes when the update is complete. Platforms tested: RHEL6 x86_64 Flag Day: yes - new attribute, schema Doc impact: yes - new attribute, schema (cherry picked from commit 3206571b8ac8308482c20c3866f407079479b8e6) diff --git a/ldap/schema/02common.ldif b/ldap/schema/02common.ldif index db8ea19bb..c209615f6 100644 --- a/ldap/schema/02common.ldif +++ b/ldap/schema/02common.ldif @@ -144,6 +144,7 @@ attributeTypes: ( 2.16.840.1.113730.3.1.1004 NAME 'nsds7WindowsDomain' DESC 'Net attributeTypes: ( 2.16.840.1.113730.3.1.1005 NAME 'nsds7DirsyncCookie' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.5 SINGLE-VALUE X-ORIGIN 'Netscape Directory Server' ) attributeTypes: ( 2.16.840.1.113730.3.1.1099 NAME 'winSyncInterval' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-ORIGIN 'Netscape Directory Server' ) attributeTypes: ( 2.16.840.1.113730.3.1.1100 NAME 'oneWaySync' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE X-ORIGIN 'Netscape Directory Server' ) +attributeTypes: ( 2.16.840.1.113730.3.1.2139 NAME 'winSyncMoveAction' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE X-ORIGIN 'Netscape Directory Server' ) attributeTypes: ( 1.3.6.1.1.4 NAME 'vendorName' EQUALITY 1.3.6.1.4.1.1466.109.114.1 SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE NO-USER-MODIFICATION USAGE dSAOperation X-ORIGIN 'RFC 3045' ) attributeTypes: ( 1.3.6.1.1.5 NAME 'vendorVersion' EQUALITY 1.3.6.1.4.1.1466.109.114.1 SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE NO-USER-MODIFICATION USAGE dSAOperation X-ORIGIN 'RFC 3045' ) attributeTypes: ( 2.16.840.1.113730.3.1.3023 NAME 'nsViewFilter' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 X-ORIGIN 'Netscape Directory Server' ) @@ -178,7 +179,7 @@ objectClasses: ( 2.16.840.1.113730.3.2.99 NAME 'cosSuperDefinition' DESC 'Netsca objectClasses: ( 2.16.840.1.113730.3.2.100 NAME 'cosClassicDefinition' DESC 'Netscape defined objectclass' SUP cosSuperDefinition MAY ( cosTemplateDn $ cosspecifier ) X-ORIGIN 'Netscape Directory Server' ) objectClasses: ( 2.16.840.1.113730.3.2.101 NAME 'cosPointerDefinition' DESC 'Netscape defined objectclass' SUP cosSuperDefinition MAY ( cosTemplateDn ) X-ORIGIN 'Netscape Directory Server' ) objectClasses: ( 2.16.840.1.113730.3.2.102 NAME 'cosIndirectDefinition' DESC 'Netscape defined objectclass' SUP cosSuperDefinition MAY ( cosIndirectSpecifier ) X-ORIGIN 'Netscape Directory Server' ) -objectClasses: ( 2.16.840.1.113730.3.2.503 NAME 'nsDSWindowsReplicationAgreement' DESC 'Netscape defined objectclass' SUP top MUST ( cn ) MAY ( nsDS5ReplicaHost $ nsDS5ReplicaPort $ nsDS5ReplicaTransportInfo $ nsDS5ReplicaBindDN $ nsDS5ReplicaCredentials $ nsDS5ReplicaBindMethod $ nsDS5ReplicaRoot $ nsDS5ReplicatedAttributeList $ nsDS5ReplicaUpdateSchedule $ nsds5BeginReplicaRefresh $ description $ nsds50ruv $ nsruvReplicaLastModified $ nsds5ReplicaTimeout $ nsds5replicaChangesSentSinceStartup $ nsds5replicaLastUpdateEnd $ nsds5replicaLastUpdateStart $ nsds5replicaLastUpdateStatus $ nsds5replicaUpdateInProgress $ nsds5replicaLastInitEnd $ nsds5replicaLastInitStart $ nsds5replicaLastInitStatus $ nsds5debugreplicatimeout $ nsds5replicaBusyWaitTime $ nsds5replicaSessionPauseTime $ nsds7WindowsReplicaSubtree $ nsds7DirectoryReplicaSubtree $ nsds7NewWinUserSyncEnabled $ nsds7NewWinGroupSyncEnabled $ nsds7WindowsDomain $ nsds7DirsyncCookie $ winSyncInterval $ oneWaySync) X-ORIGIN 'Netscape Directory Server' ) +objectClasses: ( 2.16.840.1.113730.3.2.503 NAME 'nsDSWindowsReplicationAgreement' DESC 'Netscape defined objectclass' SUP top MUST ( cn ) MAY ( nsDS5ReplicaHost $ nsDS5ReplicaPort $ nsDS5ReplicaTransportInfo $ nsDS5ReplicaBindDN $ nsDS5ReplicaCredentials $ nsDS5ReplicaBindMethod $ nsDS5ReplicaRoot $ nsDS5ReplicatedAttributeList $ nsDS5ReplicaUpdateSchedule $ nsds5BeginReplicaRefresh $ description $ nsds50ruv $ nsruvReplicaLastModified $ nsds5ReplicaTimeout $ nsds5replicaChangesSentSinceStartup $ nsds5replicaLastUpdateEnd $ nsds5replicaLastUpdateStart $ nsds5replicaLastUpdateStatus $ nsds5replicaUpdateInProgress $ nsds5replicaLastInitEnd $ nsds5replicaLastInitStart $ nsds5replicaLastInitStatus $ nsds5debugreplicatimeout $ nsds5replicaBusyWaitTime $ nsds5replicaSessionPauseTime $ nsds7WindowsReplicaSubtree $ nsds7DirectoryReplicaSubtree $ nsds7NewWinUserSyncEnabled $ nsds7NewWinGroupSyncEnabled $ nsds7WindowsDomain $ nsds7DirsyncCookie $ winSyncInterval $ oneWaySync $ winSyncMoveAction) X-ORIGIN 'Netscape Directory Server' ) objectClasses: ( 2.16.840.1.113730.3.2.128 NAME 'costemplate' DESC 'Netscape defined objectclass' SUP top MAY ( cn $ cospriority ) X-ORIGIN 'Netscape Directory Server' ) objectClasses: ( 2.16.840.1.113730.3.2.304 NAME 'nsView' DESC 'Netscape defined objectclass' SUP top AUXILIARY MAY ( nsViewFilter $ description ) X-ORIGIN 'Netscape Directory Server' ) objectClasses: ( 2.16.840.1.113730.3.2.316 NAME 'nsAttributeEncryption' DESC 'Netscape defined objectclass' SUP top MUST ( cn $ nsEncryptionAlgorithm ) X-ORIGIN 'Netscape Directory Server' ) diff --git a/ldap/servers/plugins/replication/repl5.h b/ldap/servers/plugins/replication/repl5.h index 26d029846..7b6cb8cc7 100644 --- a/ldap/servers/plugins/replication/repl5.h +++ b/ldap/servers/plugins/replication/repl5.h @@ -169,6 +169,7 @@ extern const char *type_nsds7DirsyncCookie; extern const char *type_nsds7WindowsDomain; extern const char *type_winSyncInterval; extern const char *type_oneWaySync; +extern const char *type_winsyncMoveAction; /* To Allow Consumer Initialisation when adding an agreement - */ extern const char *type_nsds5BeginReplicaRefresh; @@ -342,6 +343,7 @@ void agmt_set_last_update_start (Repl_Agmt *ra, time_t start_time); void agmt_set_last_update_end (Repl_Agmt *ra, time_t end_time); void agmt_set_last_update_status (Repl_Agmt *ra, int ldaprc, int replrc, const char *msg); void agmt_set_update_in_progress (Repl_Agmt *ra, PRBool in_progress); +PRBool agmt_get_update_in_progress (const Repl_Agmt *ra); void agmt_set_last_init_start (Repl_Agmt *ra, time_t start_time); void agmt_set_last_init_end (Repl_Agmt *ra, time_t end_time); void agmt_set_last_init_status (Repl_Agmt *ra, int ldaprc, int replrc, const char *msg); @@ -367,6 +369,7 @@ void agmt_set_cleanruv_notified_from_entry(Repl_Agmt *ra, Slapi_Entry *e); int agmt_set_cleanruv_data(Repl_Agmt *ra, ReplicaId rid, int op); int agmt_is_cleanruv_notified(Repl_Agmt *ra, ReplicaId rid); int agmt_set_timeout(Repl_Agmt *ra, long timeout); +void agmt_update_done(Repl_Agmt *ra, int is_total); typedef struct replica Replica; @@ -679,6 +682,7 @@ int windows_handle_modify_agreement(Repl_Agmt *ra, const char *type, Slapi_Entry void windows_agreement_delete(Repl_Agmt *ra); Repl_Connection *windows_conn_new(Repl_Agmt *agmt); void windows_conn_delete(Repl_Connection *conn); +void windows_update_done(Repl_Agmt *ra, int is_total); /* repl_session_plugin.c */ void repl_session_plugin_init(); diff --git a/ldap/servers/plugins/replication/repl5_agmt.c b/ldap/servers/plugins/replication/repl5_agmt.c index 9002ff387..d25f54b21 100644 --- a/ldap/servers/plugins/replication/repl5_agmt.c +++ b/ldap/servers/plugins/replication/repl5_agmt.c @@ -2265,6 +2265,17 @@ agmt_set_update_in_progress (Repl_Agmt *ra, PRBool in_progress) } } +PRBool +agmt_get_update_in_progress (const Repl_Agmt *ra) +{ + PR_ASSERT(NULL != ra); + if (NULL != ra) + { + return ra->update_in_progress; + } + return PR_FALSE; +} + void agmt_inc_last_update_changecount (Repl_Agmt *ra, ReplicaId rid, int skipped) { @@ -2694,3 +2705,18 @@ agmt_set_cleanruv_notified_from_entry(Repl_Agmt *ra, Slapi_Entry *e){ } PR_Unlock(ra->lock); } + +/* this is called whenever an update (total/incremental) + is completed */ +void +agmt_update_done(Repl_Agmt *agmt, int is_total) +{ + /* we could do a lot of stuff here - consolidate all of the other stuff that gets + done at the end of an update - setting status, times, etc. + but for now, all we want to do is "flush" any pending changes made + during the update into the proper structures so they are in place for the + next run + */ + windows_update_done(agmt, is_total); +} + diff --git a/ldap/servers/plugins/replication/repl5_inc_protocol.c b/ldap/servers/plugins/replication/repl5_inc_protocol.c index 025c23f84..743be570f 100644 --- a/ldap/servers/plugins/replication/repl5_inc_protocol.c +++ b/ldap/servers/plugins/replication/repl5_inc_protocol.c @@ -962,6 +962,7 @@ repl5_inc_run(Private_Repl_Protocol *prp) /* richm: We at least need to let monitors know that the protocol has been * shutdown - maybe they can figure out why */ agmt_set_last_update_status(prp->agmt, 0, 0, "Protocol stopped"); + agmt_update_done(prp->agmt, 0); break; } @@ -1076,6 +1077,7 @@ repl5_inc_run(Private_Repl_Protocol *prp) } agmt_set_last_update_end(prp->agmt, current_time()); agmt_set_update_in_progress(prp->agmt, PR_FALSE); + agmt_update_done(prp->agmt, 0); /* If timed out, close the connection after released the replica */ release_replica(prp); if (rc == UPDATE_TIMEOUT) { diff --git a/ldap/servers/plugins/replication/repl5_tot_protocol.c b/ldap/servers/plugins/replication/repl5_tot_protocol.c index 5eeef5d28..b10d2d0ba 100644 --- a/ldap/servers/plugins/replication/repl5_tot_protocol.c +++ b/ldap/servers/plugins/replication/repl5_tot_protocol.c @@ -475,6 +475,8 @@ repl5_tot_run(Private_Repl_Protocol *prp) slapi_pblock_destroy (pb); agmt_set_last_init_end(prp->agmt, current_time()); rc = cb_data.rc; + agmt_set_update_in_progress(prp->agmt, PR_FALSE); + agmt_update_done(prp->agmt, 1); release_replica(prp); if (rc != LDAP_SUCCESS) diff --git a/ldap/servers/plugins/replication/repl_globals.c b/ldap/servers/plugins/replication/repl_globals.c index 383d56257..f31a47688 100644 --- a/ldap/servers/plugins/replication/repl_globals.c +++ b/ldap/servers/plugins/replication/repl_globals.c @@ -141,6 +141,7 @@ const char *type_nsds7WindowsDomain = "nsds7WindowsDomain"; const char *type_nsds7DirsyncCookie = "nsds7DirsyncCookie"; const char *type_winSyncInterval = "winSyncInterval"; const char *type_oneWaySync = "oneWaySync"; +const char *type_winsyncMoveAction = "winSyncMoveAction"; /* To Allow Consumer Initialization when adding an agreement - */ const char *type_nsds5BeginReplicaRefresh = "nsds5BeginReplicaRefresh"; diff --git a/ldap/servers/plugins/replication/windows_inc_protocol.c b/ldap/servers/plugins/replication/windows_inc_protocol.c index 3bba77db9..e9df9f6d5 100644 --- a/ldap/servers/plugins/replication/windows_inc_protocol.c +++ b/ldap/servers/plugins/replication/windows_inc_protocol.c @@ -769,6 +769,7 @@ windows_inc_run(Private_Repl_Protocol *prp) /* richm: We at least need to let monitors know that the protocol has been shutdown - maybe they can figure out why */ agmt_set_last_update_status(prp->agmt, 0, 0, "Protocol stopped"); + agmt_update_done(prp->agmt, 0); break; } @@ -903,6 +904,7 @@ windows_inc_run(Private_Repl_Protocol *prp) agmt_set_last_update_end(prp->agmt, current_time()); agmt_set_update_in_progress(prp->agmt, PR_FALSE); + agmt_update_done(prp->agmt, 0); /* If timed out, close the connection after released the replica */ windows_release_replica(prp); if (rc == UPDATE_TIMEOUT) { diff --git a/ldap/servers/plugins/replication/windows_private.c b/ldap/servers/plugins/replication/windows_private.c index 1f0318f67..26d03c272 100644 --- a/ldap/servers/plugins/replication/windows_private.c +++ b/ldap/servers/plugins/replication/windows_private.c @@ -76,6 +76,7 @@ struct windowsprivate { void *api_cookie; /* private data used by api callbacks */ time_t sync_interval; /* how often to run the dirsync search, in seconds */ int one_way; /* Indicates if this is a one-way agreement and which direction it is */ + int move_action; /* Indicates what to do with DS entry if AD entry is moved out of scope */ }; static void windows_private_set_windows_domain(const Repl_Agmt *ra, char *domain); @@ -93,11 +94,62 @@ true_value_from_string(char *val) } } +/* yech - can't declare a constant string array because type_nsds7XX variables + are not constant strings - so have to build a lookup table */ +static int +get_next_disallow_attr_type(int *ii, const char **type) +{ + switch (*ii) { + case 0: *type = type_nsds7WindowsReplicaArea; break; + case 1: *type = type_nsds7DirectoryReplicaArea; break; + case 2: *type = type_nsds7WindowsDomain; break; + default: *type = NULL; break; + } + + if (*type) { + (*ii)++; + return 1; + } + return 0; +} + +static int +check_update_allowed(Repl_Agmt *ra, const char *type, Slapi_Entry *e, int *retval) +{ + int rc = 1; + + /* note - it is not an error to defer setting the value in the ra */ + *retval = 1; + if (agmt_get_update_in_progress(ra)) { + const char *distype = NULL; + int ii = 0; + while (get_next_disallow_attr_type(&ii, &distype)) { + if (slapi_attr_types_equivalent(type, distype)) { + char *tmpstr = slapi_entry_attr_get_charptr(e, type); + slapi_log_error(SLAPI_LOG_REPL, repl_plugin_name, + "windows_parse_config_entry: setting %s to %s will be " + "deferred until current update is completed\n", + type, tmpstr); + slapi_ch_free_string(&tmpstr); + rc = 0; + break; + } + } + } + + return rc; +} + static int windows_parse_config_entry(Repl_Agmt *ra, const char *type, Slapi_Entry *e) { char *tmpstr = NULL; int retval = 0; + + if (!check_update_allowed(ra, type, e, &retval)) + { + return retval; + } if (type == NULL || slapi_attr_types_equivalent(type,type_nsds7WindowsReplicaArea)) { @@ -190,6 +242,32 @@ windows_parse_config_entry(Repl_Agmt *ra, const char *type, Slapi_Entry *e) slapi_ch_free((void**)&tmpstr); retval = 1; } + if (type == NULL || slapi_attr_types_equivalent(type,type_winsyncMoveAction)) + { + tmpstr = slapi_entry_attr_get_charptr(e, type_winsyncMoveAction); + if (NULL != tmpstr) + { + if (strcasecmp(tmpstr, "delete") == 0) { + windows_private_set_move_action(ra, MOVE_DOES_DELETE); + } else if (strcasecmp(tmpstr, "unsync") == 0) { + windows_private_set_move_action(ra, MOVE_DOES_UNSYNC); + } else if (strcasecmp(tmpstr, "none") == 0) { + windows_private_set_move_action(ra, MOVE_DOES_NOTHING); + } else { + slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name, + "Ignoring illegal setting for %s attribute in replication " + "agreement \"%s\". Valid values are \"delete\" or " + "\"unsync\".\n", type_winsyncMoveAction, slapi_entry_get_dn(e)); + windows_private_set_move_action(ra, MOVE_DOES_NOTHING); + } + } + else + { + windows_private_set_move_action(ra, MOVE_DOES_NOTHING); + } + slapi_ch_free((void**)&tmpstr); + retval = 1; + } return retval; } @@ -207,6 +285,26 @@ windows_handle_modify_agreement(Repl_Agmt *ra, const char *type, Slapi_Entry *e) } } +void +windows_update_done(Repl_Agmt *agmt, int is_total) +{ + /* "flush" the changes made during the update to the agmt */ + /* get the agmt entry */ + Slapi_DN *agmtdn = slapi_sdn_dup(agmt_get_dn_byref(agmt)); + Slapi_Entry *agmte = NULL; + int rc = slapi_search_internal_get_entry(agmtdn, NULL, &agmte, + repl_get_plugin_identity (PLUGIN_MULTIMASTER_REPLICATION)); + if ((rc == 0) && agmte) { + int ii = 0; + const char *distype = NULL; + while (get_next_disallow_attr_type(&ii, &distype)) { + windows_handle_modify_agreement(agmt, distype, agmte); + } + } + slapi_entry_free(agmte); + slapi_sdn_free(&agmtdn); +} + void windows_init_agreement_from_entry(Repl_Agmt *ra, Slapi_Entry *e) { @@ -1024,6 +1122,39 @@ windows_private_set_sync_interval(Repl_Agmt *ra, char *str) LDAPDebug0Args( LDAP_DEBUG_TRACE, "<= windows_private_set_sync_interval\n" ); } +int +windows_private_get_move_action(const Repl_Agmt *ra) +{ + Dirsync_Private *dp; + + LDAPDebug0Args( LDAP_DEBUG_TRACE, "=> windows_private_get_move_action\n" ); + + PR_ASSERT(ra); + + dp = (Dirsync_Private *) agmt_get_priv(ra); + PR_ASSERT (dp); + + LDAPDebug0Args( LDAP_DEBUG_TRACE, "<= windows_private_get_move_action\n" ); + + return dp->move_action; +} + +void +windows_private_set_move_action(const Repl_Agmt *ra, int value) +{ + Dirsync_Private *dp; + + LDAPDebug0Args( LDAP_DEBUG_TRACE, "=> windows_private_set_move_action\n" ); + + PR_ASSERT(ra); + + dp = (Dirsync_Private *) agmt_get_priv(ra); + PR_ASSERT (dp); + dp->move_action = value; + + LDAPDebug0Args( LDAP_DEBUG_TRACE, "<= windows_private_set_move_action\n" ); +} + static PRCallOnceType winsync_callOnce = {0,0}; struct winsync_plugin { diff --git a/ldap/servers/plugins/replication/windows_protocol_util.c b/ldap/servers/plugins/replication/windows_protocol_util.c index af3cfa18b..84382e5d7 100644 --- a/ldap/servers/plugins/replication/windows_protocol_util.c +++ b/ldap/servers/plugins/replication/windows_protocol_util.c @@ -5049,6 +5049,85 @@ windows_get_local_entry(const Slapi_DN* local_dn,Slapi_Entry **local_entry) return retval; } +static int +windows_unsync_entry(Private_Repl_Protocol *prp, Slapi_Entry *e) +{ + /* remote the ntuser/ntgroup objectclass and all attributes whose + name begins with "nt" - this will effectively cause the entry + to become "unsynced" with the corresponding windows entry */ + Slapi_Mods *smods = NULL; + Slapi_Value *ntu = NULL, *ntg = NULL; + Slapi_Value *va[2] = {NULL, NULL}; + char **syncattrs = NULL; + PRUint32 ocflags = SLAPI_OC_FLAG_REQUIRED|SLAPI_OC_FLAG_ALLOWED; + Slapi_PBlock *pb = NULL; + int ii; + int rc = -1; + + smods = slapi_mods_new(); + ntu = slapi_value_new_string("ntuser"); + ntg = slapi_value_new_string("ntgroup"); + + if (slapi_entry_attr_has_syntax_value(e, "objectclass", ntu)) { + syncattrs = slapi_schema_list_objectclass_attributes(slapi_value_get_string(ntu), ocflags); + va[0] = ntu; + } else if (slapi_entry_attr_has_syntax_value(e, "objectclass", ntg)) { + syncattrs = slapi_schema_list_objectclass_attributes(slapi_value_get_string(ntg), ocflags); + va[0] = ntg; + } else { + rc = 0; /* not an error */ + goto done; /* nothing to see here, move along */ + } + slapi_mods_add_mod_values(smods, LDAP_MOD_DELETE, "objectclass", va); + slapi_log_error(SLAPI_LOG_REPL, windows_repl_plugin_name, + "%s: windows_unsync_entry: removing objectclass %s from %s\n", + agmt_get_long_name(prp->agmt), slapi_value_get_string(va[0]), + slapi_entry_get_dn_const(e)); + + for (ii = 0; syncattrs && syncattrs[ii]; ++ii) { + const char *type = syncattrs[ii]; + Slapi_Attr *attr = NULL; + + if (!slapi_entry_attr_find(e, type, &attr) && attr) { + if (!PL_strncasecmp(type, "nt", 2)) { /* begins with "nt" */ + slapi_mods_add_mod_values(smods, LDAP_MOD_DELETE, type, NULL); + slapi_log_error(SLAPI_LOG_REPL, windows_repl_plugin_name, + "%s: windows_unsync_entry: removing attribute %s from %s\n", + agmt_get_long_name(prp->agmt), type, + slapi_entry_get_dn_const(e)); + } + } + } + + pb = slapi_pblock_new(); + if (!pb) { + goto done; + } + slapi_log_error(SLAPI_LOG_REPL, windows_repl_plugin_name, + "%s: windows_unsync_entry: modifying entry %s\n", + agmt_get_long_name(prp->agmt), slapi_entry_get_dn_const(e)); + slapi_modify_internal_set_pb_ext(pb, slapi_entry_get_sdn(e), + slapi_mods_get_ldapmods_byref(smods), NULL, NULL, + repl_get_plugin_identity (PLUGIN_MULTIMASTER_REPLICATION), 0); + slapi_modify_internal_pb(pb); + slapi_pblock_get (pb, SLAPI_PLUGIN_INTOP_RESULT, &rc); + if (rc) { + slapi_log_error(SLAPI_LOG_FATAL, windows_repl_plugin_name, + "%s: windows_unsync_entry: failed to modify entry %s - error %d:%s\n", + agmt_get_long_name(prp->agmt), slapi_entry_get_dn_const(e), + rc, ldap_err2string(rc)); + } + slapi_pblock_destroy(pb); + +done: + slapi_ch_array_free(syncattrs); + slapi_mods_free(&smods); + slapi_value_free(&ntu); + slapi_value_free(&ntg); + + return rc; +} + static int windows_process_dirsync_entry(Private_Repl_Protocol *prp,Slapi_Entry *e, int is_total) { @@ -5183,13 +5262,34 @@ retry: rc = windows_get_local_entry(local_sdn, &local_entry); if ((0 == rc) && local_entry) { - /* Need to delete the local entry since the remote counter - * part is now moved out of scope of the agreement. */ - /* Since map_Entry_dn_oubound returned local_sdn, - * the entry is either user or group. */ - rc = windows_delete_local_entry(local_sdn); - slapi_entry_free(local_entry); + if (windows_private_get_move_action(prp->agmt) == MOVE_DOES_DELETE) { + /* Need to delete the local entry since the remote counter + * part is now moved out of scope of the agreement. */ + /* Since map_Entry_dn_oubound returned local_sdn, + * the entry is either user or group. */ + slapi_log_error(SLAPI_LOG_REPL, windows_repl_plugin_name, + "%s: windows_process_dirsync_entry: deleting out of " + "scope entry %s\n", agmt_get_long_name(prp->agmt), + slapi_sdn_get_dn(local_sdn)); + rc = windows_delete_local_entry(local_sdn); + } else if (windows_private_get_move_action(prp->agmt) == MOVE_DOES_UNSYNC) { + rc = windows_unsync_entry(prp, local_entry); + } else { + slapi_log_error(SLAPI_LOG_REPL, windows_repl_plugin_name, + "%s: windows_process_dirsync_entry: windows " + "inbound entry %s has the same name as local " + "entry %s but the windows entry is out of the " + "scope of the sync subtree [%s] - if you want " + "these entries to be in sync, add the ntUser/ntGroup " + "objectclass and required attributes to the local " + "entry, and move the windows entry into scope\n", + agmt_get_long_name(prp->agmt), + slapi_entry_get_dn_const(e), + slapi_sdn_get_dn(local_sdn), + slapi_sdn_get_dn(windows_private_get_windows_subtree(prp->agmt))); + } } + slapi_entry_free(local_entry); slapi_sdn_free(&local_sdn); } } diff --git a/ldap/servers/plugins/replication/windows_tot_protocol.c b/ldap/servers/plugins/replication/windows_tot_protocol.c index 6c540040d..079194186 100644 --- a/ldap/servers/plugins/replication/windows_tot_protocol.c +++ b/ldap/servers/plugins/replication/windows_tot_protocol.c @@ -151,6 +151,8 @@ windows_tot_run(Private_Repl_Protocol *prp) agmt_set_last_init_status(prp->agmt, 0, 0, "Total update in progress"); + agmt_set_update_in_progress(prp->agmt, PR_TRUE); + slapi_log_error(SLAPI_LOG_FATAL, windows_repl_plugin_name, "Beginning total update of replica " "\"%s\".\n", agmt_get_long_name(prp->agmt)); @@ -216,7 +218,6 @@ windows_tot_run(Private_Repl_Protocol *prp) server_controls = NULL; slapi_pblock_destroy (pb); - agmt_set_last_init_end(prp->agmt, current_time()); rc = cb_data.rc; windows_release_replica(prp); @@ -249,6 +250,10 @@ windows_tot_run(Private_Repl_Protocol *prp) /* Save the dirsync cookie. */ windows_private_save_dirsync_cookie(prp->agmt); + agmt_set_last_init_end(prp->agmt, current_time()); + agmt_set_update_in_progress(prp->agmt, PR_FALSE); + agmt_update_done(prp->agmt, 1); + /* call end total update callback */ winsync_plugin_call_end_update_cb(prp->agmt, windows_private_get_directory_subtree(prp->agmt), diff --git a/ldap/servers/plugins/replication/windowsrepl.h b/ldap/servers/plugins/replication/windowsrepl.h index 5780e7448..6047fef95 100644 --- a/ldap/servers/plugins/replication/windowsrepl.h +++ b/ldap/servers/plugins/replication/windowsrepl.h @@ -86,6 +86,8 @@ time_t windows_private_get_sync_interval(const Repl_Agmt *ra); void windows_private_set_sync_interval(Repl_Agmt *ra, char *str); PRBool windows_private_get_one_way(const Repl_Agmt *ra); void windows_private_set_one_way(const Repl_Agmt *ra, PRBool value); +int windows_private_get_move_action(const Repl_Agmt *ra); +void windows_private_set_move_action(const Repl_Agmt *ra, int value); /* in windows_connection.c */ ConnResult windows_conn_connect(Repl_Connection *conn); @@ -141,6 +143,14 @@ int windows_check_user_password(Repl_Connection *conn, Slapi_DN *sdn, char *pass #define ONE_WAY_SYNC_FROM_AD 1 #define ONE_WAY_SYNC_TO_AD 2 +/* + * Specifies what action for sync to take if it detects an AD entry has + * moved out of scope + */ +#define MOVE_DOES_NOTHING 0 +#define MOVE_DOES_UNSYNC 1 +#define MOVE_DOES_DELETE 2 + /* called for each replication agreement - so the winsync plugin can be agreement specific and store agreement specific data
0
23467126c33a1f6004bb8357d87bc355a53eb25d
389ds/389-ds-base
Bug 748575 - rhds81 modrn operation and 100% cpu use in replication https://bugzilla.redhat.com/show_bug.cgi?id=748575 Resolves: bug 748575 Bug Description: rhds81 modrn operation and 100% cpu use in replication Reviewed by: ??? Branch: master Fix Description: The modrdn operation causes the entry to be copied multiple times by calling slapi_entry_dup. This in turn does a csnset_dup of the e_dncsnset in the entry. This function was very inefficient. It would simply call csnset_add_csn, which would iterate to the end of the linked list for every addition. Once you get several thousand items in the list, it has to iterate to the end of several thousand items each time. I changed it to keep track of the last item in the list, and just add the new item to the end of the list. This improves the performance quite a bit, but the cpu still gets pegged at a high percentage eventually, it just takes longer to reach that point. Platforms tested: RHEL6 x86_64 Flag Day: no Doc impact: no
commit 23467126c33a1f6004bb8357d87bc355a53eb25d Author: Rich Megginson <[email protected]> Date: Wed Oct 26 17:13:28 2011 -0600 Bug 748575 - rhds81 modrn operation and 100% cpu use in replication https://bugzilla.redhat.com/show_bug.cgi?id=748575 Resolves: bug 748575 Bug Description: rhds81 modrn operation and 100% cpu use in replication Reviewed by: ??? Branch: master Fix Description: The modrdn operation causes the entry to be copied multiple times by calling slapi_entry_dup. This in turn does a csnset_dup of the e_dncsnset in the entry. This function was very inefficient. It would simply call csnset_add_csn, which would iterate to the end of the linked list for every addition. Once you get several thousand items in the list, it has to iterate to the end of several thousand items each time. I changed it to keep track of the last item in the list, and just add the new item to the end of the list. This improves the performance quite a bit, but the cpu still gets pegged at a high percentage eventually, it just takes longer to reach that point. Platforms tested: RHEL6 x86_64 Flag Day: no Doc impact: no diff --git a/ldap/servers/slapd/csnset.c b/ldap/servers/slapd/csnset.c index 645c17e96..d98cecf14 100644 --- a/ldap/servers/slapd/csnset.c +++ b/ldap/servers/slapd/csnset.c @@ -374,11 +374,13 @@ CSNSet * csnset_dup(const CSNSet *csnset) { CSNSet *newcsnset= NULL; + CSNSet **curnode = &newcsnset; const CSNSet *n= csnset; while(n!=NULL) { - csnset_add_csn(&newcsnset,n->type,&n->csn); + csnset_add_csn(curnode,n->type,&n->csn); n= n->next; + curnode = &((*curnode)->next); } return newcsnset; } diff --git a/ldap/servers/slapd/entrywsi.c b/ldap/servers/slapd/entrywsi.c index 9087651a5..dc038ba73 100644 --- a/ldap/servers/slapd/entrywsi.c +++ b/ldap/servers/slapd/entrywsi.c @@ -358,6 +358,7 @@ entry_purge_state_information(Slapi_Entry *e, const CSN *csnUpTo) */ attr_purge_state_information(e, a, csnUpTo); } + csnset_purge(&e->e_dncsnset, csnUpTo); } /*
0
765f4ec1b90be091a85eebbb0a254f59d94bb228
389ds/389-ds-base
Resolves: 387851 Summary: Added validation for nsslapd-maxsasliosize value.
commit 765f4ec1b90be091a85eebbb0a254f59d94bb228 Author: Nathan Kinder <[email protected]> Date: Wed Nov 26 17:32:21 2008 +0000 Resolves: 387851 Summary: Added validation for nsslapd-maxsasliosize value. diff --git a/ldap/servers/slapd/libglobs.c b/ldap/servers/slapd/libglobs.c index a4550b7a2..dac934644 100644 --- a/ldap/servers/slapd/libglobs.c +++ b/ldap/servers/slapd/libglobs.c @@ -856,6 +856,7 @@ FrontendConfig_init () { cfg->ioblocktimeout = SLAPD_DEFAULT_IOBLOCK_TIMEOUT; cfg->outbound_ldap_io_timeout = SLAPD_DEFAULT_OUTBOUND_LDAP_IO_TIMEOUT; cfg->max_filter_nest_level = SLAPD_DEFAULT_MAX_FILTER_NEST_LEVEL; + cfg->maxsasliosize = SLAPD_DEFAULT_MAX_SASLIO_SIZE; #ifdef _WIN32 cfg->conntablesize = SLAPD_DEFAULT_CONNTABLESIZE; @@ -4494,21 +4495,41 @@ int config_set_maxsasliosize( const char *attrname, char *value, char *errorbuf, int apply ) { int retVal = LDAP_SUCCESS; + long maxsasliosize; + char *endptr; slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); if ( config_value_is_null( attrname, value, errorbuf, 0 )) { return LDAP_OPERATIONS_ERROR; } - if ( !apply ) { - return retVal; + maxsasliosize = strtol(value, &endptr, 10); + + /* Check for non-numeric garbage in the value */ + if (*endptr != '\0') { + retVal = LDAP_OPERATIONS_ERROR; } - CFG_LOCK_WRITE(slapdFrontendConfig); + /* Check for a value overflow */ + if (((maxsasliosize == LONG_MAX) || (maxsasliosize == LONG_MIN)) && (errno == ERANGE)){ + retVal = LDAP_OPERATIONS_ERROR; + } + + /* A setting of -1 means unlimited. Don't allow other negative values. */ + if ((maxsasliosize < 0) && (maxsasliosize != -1)) { + retVal = LDAP_OPERATIONS_ERROR; + } - slapdFrontendConfig->maxsasliosize = atol(value); + if (retVal != LDAP_SUCCESS) { + PR_snprintf(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, + "%s: \"%s\" is invalid. Value must range from -1 to %ld", + attrname, value, LONG_MAX ); + } else if (apply) { + CFG_LOCK_WRITE(slapdFrontendConfig); + slapdFrontendConfig->maxsasliosize = maxsasliosize; + CFG_UNLOCK_WRITE(slapdFrontendConfig); + } - CFG_UNLOCK_WRITE(slapdFrontendConfig); return retVal; } @@ -4519,9 +4540,6 @@ config_get_maxsasliosize() slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig(); maxsasliosize = slapdFrontendConfig->maxsasliosize; - if (maxsasliosize == 0) { - maxsasliosize = 2 * 1024 * 1024; /* Default: 2Mb */ - } return maxsasliosize; } diff --git a/ldap/servers/slapd/sasl_io.c b/ldap/servers/slapd/sasl_io.c index 4c2a97ea9..3c19a0d20 100644 --- a/ldap/servers/slapd/sasl_io.c +++ b/ldap/servers/slapd/sasl_io.c @@ -195,6 +195,7 @@ sasl_io_start_packet(Connection *c, PRInt32 *err) int ret = 0; unsigned char buffer[4]; size_t packet_length = 0; + size_t saslio_limit; ret = PR_Recv(c->c_prfd,buffer,sizeof(buffer),0,PR_INTERVAL_NO_WAIT); if (ret < 0) { @@ -216,7 +217,10 @@ sasl_io_start_packet(Connection *c, PRInt32 *err) LDAPDebug( LDAP_DEBUG_CONNS, "read sasl packet length %ld on connection %" PRIu64 "\n", packet_length, c->c_connid, 0 ); - if (packet_length > config_get_maxsasliosize()) { + /* Check if the packet length is larger than our max allowed. A + * setting of -1 means that we allow any size SASL IO packet. */ + saslio_limit = config_get_maxsasliosize(); + if(((long)saslio_limit != -1) && (packet_length > saslio_limit)) { LDAPDebug( LDAP_DEBUG_ANY, "SASL encrypted packet length exceeds maximum allowed limit (length=%ld, limit=%ld)." " Change the nsslapd-maxsasliosize attribute in cn=config to increase limit.\n", diff --git a/ldap/servers/slapd/slap.h b/ldap/servers/slapd/slap.h index cca3178c2..414488193 100644 --- a/ldap/servers/slapd/slap.h +++ b/ldap/servers/slapd/slap.h @@ -279,6 +279,7 @@ typedef void (*VFP0)(); #define SLAPD_DEFAULT_LOOKTHROUGHLIMIT 5000 /* use -1 for no limit */ #define SLAPD_DEFAULT_GROUPNESTLEVEL 5 #define SLAPD_DEFAULT_MAX_FILTER_NEST_LEVEL 40 /* use -1 for no limit */ +#define SLAPD_DEFAULT_MAX_SASLIO_SIZE 2097152 /* 2MB in bytes. Use -1 for no limit */ #define SLAPD_DEFAULT_IOBLOCK_TIMEOUT 1800000 /* half hour in ms */ #define SLAPD_DEFAULT_OUTBOUND_LDAP_IO_TIMEOUT 300000 /* 5 minutes in ms */ #define SLAPD_DEFAULT_RESERVE_FDS 64
0
d91cd6344ca40f92c19856159add2a08d4f78993
389ds/389-ds-base
Bug 690584 - #10652 #10651 #10650 #10649 #10648 #10647 send_specific_attrs send_all_attrs - fix coverity resource leak issues https://bugzilla.redhat.com/show_bug.cgi?id=690584 Resolves: bug 690584 Bug Description: #10652 #10651 #10650 #10649 #10648 #10647 send_specific_attrs send_all_attrs - fix coverity resource leak issues Reviewed by: nkinder (Thanks!) Branch: master Fix Description: if it is possible for slapi_vattr_namespace_values_get_sp with rc != 0 or item_count == 0, make sure to free the values allocated Platforms tested: RHEL6 x86_64 Flag Day: no Doc impact: no
commit d91cd6344ca40f92c19856159add2a08d4f78993 Author: Rich Megginson <[email protected]> Date: Mon Mar 28 15:09:35 2011 -0600 Bug 690584 - #10652 #10651 #10650 #10649 #10648 #10647 send_specific_attrs send_all_attrs - fix coverity resource leak issues https://bugzilla.redhat.com/show_bug.cgi?id=690584 Resolves: bug 690584 Bug Description: #10652 #10651 #10650 #10649 #10648 #10647 send_specific_attrs send_all_attrs - fix coverity resource leak issues Reviewed by: nkinder (Thanks!) Branch: master Fix Description: if it is possible for slapi_vattr_namespace_values_get_sp with rc != 0 or item_count == 0, make sure to free the values allocated Platforms tested: RHEL6 x86_64 Flag Day: no Doc impact: no diff --git a/ldap/servers/slapd/result.c b/ldap/servers/slapd/result.c index d26947753..800fb39bd 100644 --- a/ldap/servers/slapd/result.c +++ b/ldap/servers/slapd/result.c @@ -1045,6 +1045,11 @@ static int send_all_attrs(Slapi_Entry *e,char **attrs,Slapi_Operation *op,Slapi_ } } else { + /* if we got here, then either values is NULL or values contains no elements + either way we can free it */ + slapi_ch_free((void**)&values); + slapi_ch_free((void**)&actual_type_name); + slapi_ch_free((void**)&type_name_disposition); rc = 0; } } @@ -1177,6 +1182,11 @@ int send_specific_attrs(Slapi_Entry *e,char **attrs,Slapi_Operation *op,Slapi_PB } } else { + /* if we got here, then either values is NULL or values contains no elements + either way we can free it */ + slapi_ch_free((void**)&values); + slapi_ch_free((void**)&actual_type_name); + slapi_ch_free((void**)&type_name_disposition); rc = 0; } }
0
70ccd9bcaa66133d71654c11d3aac53764d32901
389ds/389-ds-base
Ticket 48242 - Improve dirsrvtests/create_test.py script From: Simon Pichugin <[email protected]> Date: Fri, 7 Aug 2015 14:00:39 +0200 Description: Refactor docstrings, now they match the PEP 0257 standard Refactor script, depends on the pytest mechanic(finalizers, fixtures and main function) https://fedorahosted.org/389/ticket/48242 Reviewed by: mreynolds
commit 70ccd9bcaa66133d71654c11d3aac53764d32901 Author: Mark Reynolds <[email protected]> Date: Wed Aug 12 11:54:37 2015 -0400 Ticket 48242 - Improve dirsrvtests/create_test.py script From: Simon Pichugin <[email protected]> Date: Fri, 7 Aug 2015 14:00:39 +0200 Description: Refactor docstrings, now they match the PEP 0257 standard Refactor script, depends on the pytest mechanic(finalizers, fixtures and main function) https://fedorahosted.org/389/ticket/48242 Reviewed by: mreynolds diff --git a/dirsrvtests/create_test.py b/dirsrvtests/create_test.py index 941e922a5..ad9e5aa89 100755 --- a/dirsrvtests/create_test.py +++ b/dirsrvtests/create_test.py @@ -5,20 +5,19 @@ # All rights reserved. # # License: GPL (version 3 or any later version). -# See LICENSE for details. +# See LICENSE for details. # --- END COPYRIGHT BLOCK --- import sys import optparse -''' - This script generates a template test script that handles the - non-interesting parts of a test script: - topology, - test (to be completed by the user), - final, - and run-isolated functions -''' +"""This script generates a template test script that handles the +non-interesting parts of a test script: +- topology, +- test (to be completed by the user), +- final, +- and run-isolated function +""" def displayUsage(): @@ -33,6 +32,34 @@ def displayUsage(): 'There is a maximum of 10 masters, 10 hubs, and 10 consumers.') exit(1) + +def writeFinalizer(): + """Write the finalizer function - delete each instance""" + + TEST.write(' # Delete each instance in the end\n') + TEST.write(' def fin():\n') + if repl_deployment: + for idx in range(masters): + idx += 1 + TEST.write(' master' + str(idx) + '.delete()\n') + for idx in range(hubs): + idx += 1 + TEST.write(' hub' + str(idx) + '.delete()\n') + for idx in range(consumers): + idx += 1 + TEST.write(' consumer' + str(idx) + '.delete()\n') + else: + for idx in range(instances): + idx += 1 + if idx == 1: + idx = '' + else: + idx = str(idx) + TEST.write(' standalone' + idx + '.delete()\n') + TEST.write(' request.addfinalizer(fin)') + TEST.write('\n\n') + + desc = 'Script to generate an initial lib389 test script. ' + \ 'This generates the topology, test, final, and run-isolated functions.' @@ -435,6 +462,8 @@ if len(sys.argv) > 0: TEST.write(' assert False\n') TEST.write('\n') + writeFinalizer() + # # Write the finals steps for replication # @@ -483,9 +512,12 @@ if len(sys.argv) > 0: TEST.write(' standalone' + idx + '.create()\n') TEST.write(' standalone' + idx + '.open()\n\n') + writeFinalizer() + TEST.write(' # Clear out the tmp dir\n') TEST.write(' standalone.clearTmpDir(__file__)\n') TEST.write('\n') + TEST.write(' return TopologyStandalone(standalone') for idx in range(instances): idx += 1 @@ -501,82 +533,35 @@ if len(sys.argv) > 0: # if ticket: TEST.write('def test_ticket' + ticket + '(topology):\n') - TEST.write(" '''\n") if repl_deployment: - TEST.write(' Write your replication testcase here.\n\n') + TEST.write(' """Write your replication testcase here.\n\n') TEST.write(' To access each DirSrv instance use: topology.master1, topology.master2,\n' + - ' ..., topology.hub1, ..., topology.consumer1, ...\n') + ' ..., topology.hub1, ..., topology.consumer1, ...\n\n') + TEST.write(' Also, if you need any testcase initialization,\n') + TEST.write(' please, write additional fixture for that(include finalizer).\n') else: - TEST.write(' Write your testcase here...\n') - TEST.write(" '''\n\n") + TEST.write(' """Write your testcase here...\n\n') + TEST.write(' Also, if you need any testcase initialization,\n') + TEST.write(' please, write additional fixture for that(include finalizer).\n') + TEST.write(' """\n\n') TEST.write(" log.info('Test complete')\n") - TEST.write("\n\n") + TEST.write('\n\n') else: - # For suite we start with an init function - TEST.write('def test_' + suite + '_init(topology):\n') - TEST.write(" '''\n") - TEST.write(' Write any test suite initialization here(if needed)\n') - TEST.write(" '''\n\n return\n\n\n") - # Write the first initial empty test function TEST.write('def test_' + suite + '_#####(topology):\n') - TEST.write(" '''\n") - TEST.write(' Write a single test here...\n') - TEST.write(" '''\n\n return\n\n\n") - - # - # Write the final function here - delete each instance - # - if ticket: - TEST.write('def test_ticket' + ticket + '_final(topology):\n') - else: - # suite - TEST.write('def test_' + suite + '_final(topology):\n') - if repl_deployment: - for idx in range(masters): - idx += 1 - TEST.write(' topology.master' + str(idx) + '.delete()\n') - for idx in range(hubs): - idx += 1 - TEST.write(' topology.hub' + str(idx) + '.delete()\n') - for idx in range(consumers): - idx += 1 - TEST.write(' topology.consumer' + str(idx) + '.delete()\n') - else: - for idx in range(instances): - idx += 1 - if idx == 1: - idx = '' - else: - idx = str(idx) - TEST.write(' topology.standalone' + idx + '.delete()\n') - - if ticket: - TEST.write(" log.info('Testcase PASSED')\n") - else: - # suite - TEST.write(" log.info('" + suite + " test suite PASSED')\n") - TEST.write('\n\n') + TEST.write(' """Write a single test here...\n\n') + TEST.write(' Also, if you need any test suite initialization,\n') + TEST.write(' please, write additional fixture for that(include finalizer).\n') + TEST.write(' """\n\n return\n\n\n') # # Write the main function # - TEST.write('def run_isolated():\n') - TEST.write(' global installation1_prefix\n') - TEST.write(' installation1_prefix = None\n\n') - TEST.write(' topo = topology(True)\n') - if ticket: - TEST.write(' test_ticket' + ticket + '(topo)\n') - TEST.write(' test_ticket' + ticket + '_final(topo)\n') - else: - # suite - TEST.write(' test_' + suite + '_init(topo)\n') - TEST.write(' test_' + suite + '_#####(topo)\n') - TEST.write(' test_' + suite + '_final(topo)\n') - TEST.write('\n\n') - TEST.write("if __name__ == '__main__':\n") - TEST.write(' run_isolated()\n\n') + TEST.write(' # Run isolated\n') + TEST.write(' # -s for DEBUG mode\n') + TEST.write(' CURRENT_FILE = os.path.realpath(__file__)\n') + TEST.write(' pytest.main("-s %s" % CURRENT_FILE)') # # Done, close things up
0
4295210b2ce5fd8374dc66354965b8fc243ed649
389ds/389-ds-base
Ticket 50510 - etime can contain invalid nanosecond value Bug Description: When computing the etime, it takes into account the nanosecond. At border of a second, the ending nsec can be lower than starting nsec. In such case the computation is wrong as delta=(ending_nsec - starting_nsec) is negative. final_nsec = 1 - delta > 1sec Fix Description: if delta=(ending_nsec - starting_nsec) is negative final_nsec = 1 + delta < 1sec https://pagure.io/389-ds-base/issue/50510 Reviewed by: Mark Reynolds (Thanks!) Platforms tested: F28 Flag Day: no Doc impact: no
commit 4295210b2ce5fd8374dc66354965b8fc243ed649 Author: Thierry Bordaz <[email protected]> Date: Tue Jul 23 13:59:01 2019 +0200 Ticket 50510 - etime can contain invalid nanosecond value Bug Description: When computing the etime, it takes into account the nanosecond. At border of a second, the ending nsec can be lower than starting nsec. In such case the computation is wrong as delta=(ending_nsec - starting_nsec) is negative. final_nsec = 1 - delta > 1sec Fix Description: if delta=(ending_nsec - starting_nsec) is negative final_nsec = 1 + delta < 1sec https://pagure.io/389-ds-base/issue/50510 Reviewed by: Mark Reynolds (Thanks!) Platforms tested: F28 Flag Day: no Doc impact: no diff --git a/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py b/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py index 07e8a974e..bb45617ca 100644 --- a/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py +++ b/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py @@ -9,12 +9,13 @@ import os import logging import pytest +import subprocess from lib389.topologies import topology_st from lib389.plugins import AutoMembershipPlugin, ReferentialIntegrityPlugin, AutoMembershipDefinitions from lib389.idm.user import UserAccounts from lib389.idm.group import Groups from lib389.idm.organizationalunit import OrganizationalUnits -from lib389._constants import DEFAULT_SUFFIX, LOG_ACCESS_LEVEL +from lib389._constants import DEFAULT_SUFFIX, LOG_ACCESS_LEVEL, DN_CONFIG, HOST_STANDALONE, PORT_STANDALONE, DN_DM, PASSWORD from lib389.utils import ds_is_older import ldap @@ -664,7 +665,51 @@ def test_access_log_truncated_search_message(topology_st): log.info('Delete the previous access logs for the next test') topo.deleteAccessLogs() [email protected] [email protected] +def test_etime_at_border_of_second(topology_st): + topo = topology_st.standalone + + # be sure to analyze only the following rapid OPs + topo.stop() + os.remove(topo.accesslog) + topo.start() + + prog = os.path.join(topo.ds_paths.bin_dir, 'rsearch') + + cmd = [prog] + + # base search + cmd.extend(['-s', DN_CONFIG]) + + # scope of the search + cmd.extend(['-S', '0']) + + # host / port + cmd.extend(['-h', HOST_STANDALONE]) + cmd.extend(['-p', str(PORT_STANDALONE)]) + + # bound as DM to make it faster + cmd.extend(['-D', DN_DM]) + cmd.extend(['-w', PASSWORD]) + + # filter + cmd.extend(['-f', "(cn=config)"]) + + # 2 samples SRCH + cmd.extend(['-C', "2"]) + + output = subprocess.check_output(cmd) + topo.stop() + + # No etime with 0.199xxx (everything should be few ms) + invalid_etime = topo.ds_access_log.match(r'.*etime=0\.19.*') + if invalid_etime: + for i in range(len(invalid_etime)): + log.error('It remains invalid or weird etime: %s' % invalid_etime[i]) + assert not invalid_etime + if __name__ == '__main__': # Run isolated # -s for DEBUG mode diff --git a/ldap/servers/slapd/time.c b/ldap/servers/slapd/time.c index 584bd1e63..8048a3359 100644 --- a/ldap/servers/slapd/time.c +++ b/ldap/servers/slapd/time.c @@ -235,8 +235,10 @@ slapi_timespec_diff(struct timespec *a, struct timespec *b, struct timespec *dif if (nsec < 0) { /* It's negative so take one second */ sec -= 1; - /* And set nsec to to a whole value */ - nsec = 1000000000 - nsec; + /* And set nsec to to a whole value + * nsec is negative => nsec = 1s - abs(nsec) + */ + nsec = 1000000000 + nsec; } diff->tv_sec = sec;
0
34fe2125051bfac00994dce9794d092cfcb4c06b
389ds/389-ds-base
Issue 6772 - dsconf - Replicas with the "consumer" role allow for viewing and modification of their changelog. (#6773) dsconf currently allows users to set and retrieve changelogs in consumer replicas, which do not have officially supported changelogs. This can lead to undefined behavior and confusion. This commit prints a warning message if the user tries to interact with a changelog on a consumer replica. Resolves: https://github.com/389ds/389-ds-base/issues/6772 Reviewed by: @droideck
commit 34fe2125051bfac00994dce9794d092cfcb4c06b Author: Anuar Beisembayev <[email protected]> Date: Wed Jul 23 23:48:11 2025 -0400 Issue 6772 - dsconf - Replicas with the "consumer" role allow for viewing and modification of their changelog. (#6773) dsconf currently allows users to set and retrieve changelogs in consumer replicas, which do not have officially supported changelogs. This can lead to undefined behavior and confusion. This commit prints a warning message if the user tries to interact with a changelog on a consumer replica. Resolves: https://github.com/389ds/389-ds-base/issues/6772 Reviewed by: @droideck diff --git a/src/lib389/lib389/cli_conf/replication.py b/src/lib389/lib389/cli_conf/replication.py index 6f77f34ca..a18bf83ca 100644 --- a/src/lib389/lib389/cli_conf/replication.py +++ b/src/lib389/lib389/cli_conf/replication.py @@ -686,6 +686,9 @@ def set_per_backend_cl(inst, basedn, log, args): replace_list = [] did_something = False + if (is_replica_role_consumer(inst, suffix)): + log.info("Warning: Changelogs are not supported for consumer replicas. You may run into undefined behavior.") + if args.encrypt: cl.replace('nsslapd-encryptionalgorithm', 'AES') del args.encrypt @@ -715,6 +718,10 @@ def set_per_backend_cl(inst, basedn, log, args): # that means there is a changelog config entry per backend (aka suffix) def get_per_backend_cl(inst, basedn, log, args): suffix = args.suffix + + if (is_replica_role_consumer(inst, suffix)): + log.info("Warning: Changelogs are not supported for consumer replicas. You may run into undefined behavior.") + cl = Changelog(inst, suffix) if args and args.json: log.info(cl.get_all_attrs_json()) @@ -822,6 +829,22 @@ def del_repl_manager(inst, basedn, log, args): log.info("Successfully deleted replication manager: " + manager_dn) +def is_replica_role_consumer(inst, suffix): + """Helper function for get_per_backend_cl and set_per_backend_cl. + Makes sure the instance in question is not a consumer, which is a role that + does not support changelogs. + """ + replicas = Replicas(inst) + try: + replica = replicas.get(suffix) + role = replica.get_role() + except ldap.NO_SUCH_OBJECT: + raise ValueError(f"Backend \"{suffix}\" is not enabled for replication") + + if role == ReplicaRole.CONSUMER: + return True + else: + return False # # Agreements
0
a2c2bc184f25bb03c98a640b23e0d75c445b2014
389ds/389-ds-base
Ticket #48449 - Import readNSState.py from RichM's repo Description: Adding readnsstate.1 to dist_man_MANS in Makefile.{am,in} to install the readnsstate man page as /usr/share/man/man1/readnsstate.1.gz.
commit a2c2bc184f25bb03c98a640b23e0d75c445b2014 Author: Noriko Hosoi <[email protected]> Date: Tue Jun 14 13:26:16 2016 -0700 Ticket #48449 - Import readNSState.py from RichM's repo Description: Adding readnsstate.1 to dist_man_MANS in Makefile.{am,in} to install the readnsstate man page as /usr/share/man/man1/readnsstate.1.gz. diff --git a/Makefile.am b/Makefile.am index c5567fd45..a0ba4b61c 100644 --- a/Makefile.am +++ b/Makefile.am @@ -754,6 +754,7 @@ dist_man_MANS = man/man1/dbscan.1 \ man/man1/pwdhash.1 \ man/man1/repl-monitor.1 \ man/man1/rsearch.1 \ + man/man1/readnsstate.1 \ man/man8/migrate-ds.pl.8 \ man/man8/ns-slapd.8 \ man/man8/restart-dirsrv.8 \ diff --git a/Makefile.in b/Makefile.in index 062d78713..0c0176b38 100644 --- a/Makefile.in +++ b/Makefile.in @@ -2171,6 +2171,7 @@ dist_man_MANS = man/man1/dbscan.1 \ man/man1/pwdhash.1 \ man/man1/repl-monitor.1 \ man/man1/rsearch.1 \ + man/man1/readnsstate.1 \ man/man8/migrate-ds.pl.8 \ man/man8/ns-slapd.8 \ man/man8/restart-dirsrv.8 \
0
554e29d0f9ad1bdf77b3b868cccd853f023f0a05
389ds/389-ds-base
Coverity Fixes Bug Description: Fixes coverity errors from ticket 315 & 20
commit 554e29d0f9ad1bdf77b3b868cccd853f023f0a05 Author: Mark Reynolds <[email protected]> Date: Mon Apr 9 15:56:46 2012 -0400 Coverity Fixes Bug Description: Fixes coverity errors from ticket 315 & 20 diff --git a/ldap/servers/plugins/automember/automember.c b/ldap/servers/plugins/automember/automember.c index d6383740c..17590efc9 100644 --- a/ldap/servers/plugins/automember/automember.c +++ b/ldap/servers/plugins/automember/automember.c @@ -2043,7 +2043,6 @@ automember_task_add(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eAfter, int rv = SLAPI_DSE_CALLBACK_OK; task_data *mytaskdata = NULL; Slapi_Task *task = NULL; - Slapi_DN *basedn = NULL; PRThread *thread = NULL; char *bind_dn = NULL; const char *base_dn; @@ -2067,9 +2066,6 @@ automember_task_add(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eAfter, *returncode = LDAP_OBJECT_CLASS_VIOLATION; rv = SLAPI_DSE_CALLBACK_ERROR; goto out; - } else { - /* convert the base_dn to a slapi dn */ - basedn = slapi_sdn_new_dn_byval(base_dn); } if((filter = fetch_attr(e, "filter", 0)) == NULL){ *returncode = LDAP_OBJECT_CLASS_VIOLATION; @@ -2089,7 +2085,7 @@ automember_task_add(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eAfter, slapi_pblock_get(pb, SLAPI_REQUESTOR_DN, &bind_dn); mytaskdata->bind_dn = slapi_ch_strdup(bind_dn); - mytaskdata->base_dn = basedn; + mytaskdata->base_dn = slapi_sdn_new_dn_byval(base_dn); mytaskdata->filter_str = slapi_ch_strdup(filter); if(scope){ if(strcasecmp(scope,"sub")== 0){ @@ -2236,7 +2232,6 @@ automember_task_add_export_updates(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry int rv = SLAPI_DSE_CALLBACK_OK; task_data *mytaskdata = NULL; Slapi_Task *task = NULL; - Slapi_DN *basedn = NULL; PRThread *thread = NULL; char *bind_dn = NULL; const char *base_dn = NULL; @@ -2264,9 +2259,6 @@ automember_task_add_export_updates(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *returncode = LDAP_OBJECT_CLASS_VIOLATION; rv = SLAPI_DSE_CALLBACK_ERROR; goto out; - } else { - /* convert the base dn to a slapi dn */ - basedn = slapi_sdn_new_dn_byval(base_dn); } if((filter = fetch_attr(e, "filter", 0)) == NULL){ *returncode = LDAP_OBJECT_CLASS_VIOLATION; @@ -2285,7 +2277,7 @@ automember_task_add_export_updates(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry } mytaskdata->bind_dn = slapi_ch_strdup(bind_dn); mytaskdata->ldif_out = slapi_ch_strdup(ldif); - mytaskdata->base_dn = basedn; + mytaskdata->base_dn = slapi_sdn_new_dn_byval(base_dn); mytaskdata->filter_str = slapi_ch_strdup(filter); if(scope){ if(strcasecmp(scope,"sub")== 0){ diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c index 01d307d41..597e1318d 100644 --- a/ldap/servers/slapd/daemon.c +++ b/ldap/servers/slapd/daemon.c @@ -629,6 +629,7 @@ disk_mon_get_dirs(char ***list, int logs_critical){ } be = (backend *)slapi_get_next_backend (cookie); } + slapi_ch_free((void **)&cookie); } /*
0
8eefcd5606a149ed3f4f19dbfab0f85e5aa0bf58
389ds/389-ds-base
Resolves: #475899 Summary: extensible filter having range operation crashes the server Description: we should prevent accessing the inside of NULL pointer.
commit 8eefcd5606a149ed3f4f19dbfab0f85e5aa0bf58 Author: Noriko Hosoi <[email protected]> Date: Thu Dec 11 00:08:18 2008 +0000 Resolves: #475899 Summary: extensible filter having range operation crashes the server Description: we should prevent accessing the inside of NULL pointer. diff --git a/ldap/servers/slapd/operation.c b/ldap/servers/slapd/operation.c index ed8cbd885..85a19a458 100644 --- a/ldap/servers/slapd/operation.c +++ b/ldap/servers/slapd/operation.c @@ -55,9 +55,11 @@ slapi_op_abandoned( Slapi_PBlock *pb ) { int op_status; - op_status = pb->pb_op->o_status; - - return( op_status == SLAPI_OP_STATUS_ABANDONED ); + if (pb && pb->pb_op) { + op_status = pb->pb_op->o_status; + return( op_status == SLAPI_OP_STATUS_ABANDONED ); + } + return 0; } void
0
38d4e5231bebcd03715abec7a67caa33161dc5b0
389ds/389-ds-base
Ticket 49873 - (cont 2nd) Contention on virtual attribute lookup Bug Description: SSL initialization does internal searches that access the vattr_global_lock Thread private counter needs to be initialized by that time. Currently it is initialized after SSL init. Second problem was a leak of one 'int' per worker. It was used to keep the private counter. Fix Description: Call of vattr_global_lock_create needs to be called before slapd_do_all_nss_ssl_init. Also, 'main' may or may not fork, the initialization fo the thread private variable is done either on the child or parent depending if main forks or not. The leak is fixed using a destructor callback of the private variable and so call PR_SetThreadPrivate only if there is no private variable. https://pagure.io/389-ds-base/issue/49873 Reviewed by: Mark Reynolds, Simon Pichugi (thanks) Platforms tested: F28 Flag Day: no Doc impact: no Ticket foo
commit 38d4e5231bebcd03715abec7a67caa33161dc5b0 Author: Thierry Bordaz <[email protected]> Date: Tue Mar 19 14:17:52 2019 +0100 Ticket 49873 - (cont 2nd) Contention on virtual attribute lookup Bug Description: SSL initialization does internal searches that access the vattr_global_lock Thread private counter needs to be initialized by that time. Currently it is initialized after SSL init. Second problem was a leak of one 'int' per worker. It was used to keep the private counter. Fix Description: Call of vattr_global_lock_create needs to be called before slapd_do_all_nss_ssl_init. Also, 'main' may or may not fork, the initialization fo the thread private variable is done either on the child or parent depending if main forks or not. The leak is fixed using a destructor callback of the private variable and so call PR_SetThreadPrivate only if there is no private variable. https://pagure.io/389-ds-base/issue/49873 Reviewed by: Mark Reynolds, Simon Pichugi (thanks) Platforms tested: F28 Flag Day: no Doc impact: no Ticket foo diff --git a/ldap/servers/slapd/detach.c b/ldap/servers/slapd/detach.c index 681e6a701..d5c95a04f 100644 --- a/ldap/servers/slapd/detach.c +++ b/ldap/servers/slapd/detach.c @@ -144,6 +144,10 @@ detach(int slapd_exemode, int importexport_encrypt, int s_port, daemon_ports_t * } break; } + /* The thread private counter needs to be allocated after the fork + * it is not inherited from parent process + */ + vattr_global_lock_create(); /* call this right after the fork, but before closing stdin */ if (slapd_do_all_nss_ssl_init(slapd_exemode, importexport_encrypt, s_port, ports_info)) { @@ -174,6 +178,11 @@ detach(int slapd_exemode, int importexport_encrypt, int s_port, daemon_ports_t * g_set_detached(1); } else { /* not detaching - call nss/ssl init */ + /* The thread private counter needs to be allocated after the fork + * it is not inherited from parent process + */ + vattr_global_lock_create(); + if (slapd_do_all_nss_ssl_init(slapd_exemode, importexport_encrypt, s_port, ports_info)) { return 1; } diff --git a/ldap/servers/slapd/main.c b/ldap/servers/slapd/main.c index 5a86e2e05..185ba9073 100644 --- a/ldap/servers/slapd/main.c +++ b/ldap/servers/slapd/main.c @@ -950,10 +950,6 @@ main(int argc, char **argv) return_value = 1; goto cleanup; } - /* The thread private counter needs to be allocated after the fork - * it is not inherited from parent process - */ - vattr_global_lock_create(); /* * Create our thread pool here for tasks to utilise. diff --git a/ldap/servers/slapd/vattr.c b/ldap/servers/slapd/vattr.c index ce63f50f8..bc4d0e93f 100644 --- a/ldap/servers/slapd/vattr.c +++ b/ldap/servers/slapd/vattr.c @@ -125,11 +125,25 @@ vattr_init() vattr_basic_sp_init(); #endif } +/* + * https://developer.mozilla.org/en-US/docs/Mozilla/Projects/NSPR/Reference/PR_NewThreadPrivateIndex + * It is called each time: + * - PR_SetThreadPrivate is call with a not NULL private value + * - on thread exit + */ +static void +vattr_global_lock_free(void *ptr) +{ + int *nb_acquired = ptr; + if (nb_acquired) { + slapi_ch_free((void **)&nb_acquired); + } +} /* Create a private variable for each individual thread of the current process */ void vattr_global_lock_create() { - if (PR_NewThreadPrivateIndex(&thread_private_global_vattr_lock, NULL) != PR_SUCCESS) { + if (PR_NewThreadPrivateIndex(&thread_private_global_vattr_lock, vattr_global_lock_free) != PR_SUCCESS) { slapi_log_err(SLAPI_LOG_ALERT, "vattr_global_lock_create", "Failure to create global lock for virtual attribute !\n"); PR_ASSERT(0); @@ -155,9 +169,9 @@ global_vattr_lock_set_acquired_count(int nb_acquired) if (val == NULL) { /* if it was not initialized set it to zero */ val = (int *) slapi_ch_calloc(1, sizeof(int)); + PR_SetThreadPrivate(thread_private_global_vattr_lock, (void *) val); } *val = nb_acquired; - PR_SetThreadPrivate(thread_private_global_vattr_lock, (void *) val); } /* The map lock can be acquired recursively. So only the first rdlock * will acquire the lock.
0
37bff7c59f8cca09ba2a2e6eb3e6c852327bbb17
389ds/389-ds-base
Bug(s) fixed: 157377 Bug Description: RL16: Server core dumped while configuring replication agreements Reviewed by: David (Thanks!) Fix Description: The code for handling bad agreements was wrong. It was not returning the correct error code. The result of this is that the client did not receive an appropriate error code/message from the server. The reason for the core dump was that, even though the agreement was not created, the DSE entry was still there, causing the server to become very confused. Returning the correct error code causes the server to refuse to create the bad DSE entry. Platforms tested: RHEL3 Flag Day: no Doc impact: no QA impact: should be covered by regular nightly and manual testing New Tests integrated into TET: none
commit 37bff7c59f8cca09ba2a2e6eb3e6c852327bbb17 Author: Rich Megginson <[email protected]> Date: Wed May 11 18:51:54 2005 +0000 Bug(s) fixed: 157377 Bug Description: RL16: Server core dumped while configuring replication agreements Reviewed by: David (Thanks!) Fix Description: The code for handling bad agreements was wrong. It was not returning the correct error code. The result of this is that the client did not receive an appropriate error code/message from the server. The reason for the core dump was that, even though the agreement was not created, the DSE entry was still there, causing the server to become very confused. Returning the correct error code causes the server to refuse to create the bad DSE entry. Platforms tested: RHEL3 Flag Day: no Doc impact: no QA impact: should be covered by regular nightly and manual testing New Tests integrated into TET: none diff --git a/ldap/servers/plugins/replication/repl5_agmtlist.c b/ldap/servers/plugins/replication/repl5_agmtlist.c index b8abe4c06..2206f83d3 100644 --- a/ldap/servers/plugins/replication/repl5_agmtlist.c +++ b/ldap/servers/plugins/replication/repl5_agmtlist.c @@ -147,7 +147,7 @@ add_new_agreement(Slapi_Entry *e) Object *repl_obj = NULL; Object *ro = NULL; - if (ra == NULL) return 0; + if (ra == NULL) return 1; /* tell search result handler callback this entry was not sent */ ro = object_new((void *)ra, agmt_delete); objset_add_obj(agmt_set, ro);
0
7a5a0221ea05b48df5add4cd839fcbb55dbbea69
389ds/389-ds-base
Move ssl init on the secure socket into main with the rest of the nss/ssl init
commit 7a5a0221ea05b48df5add4cd839fcbb55dbbea69 Author: Rich Megginson <[email protected]> Date: Fri Nov 18 21:07:38 2005 +0000 Move ssl init on the secure socket into main with the rest of the nss/ssl init diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c index 658365a0a..1f3de4f9d 100644 --- a/ldap/servers/slapd/daemon.c +++ b/ldap/servers/slapd/daemon.c @@ -406,8 +406,6 @@ int daemon_pre_setuid_init(daemon_ports_t *ports) #ifdef XP_WIN32 ports->s_socket_native = PR_FileDesc2NativeHandle(ports->s_socket); #endif - /* check if ports->s_socket != -1 ? */ - rc = slapd_ssl_init2 ( &ports->s_socket, 0 ); } else { ports->s_socket = SLAPD_INVALID_SOCKET; #ifdef XP_WIN32
0
b621e8594f18242a6f9b45a4203b4a84a9c9829a
389ds/389-ds-base
Resolves: bug 458677 Bug Description: Memory leaks in index code doing indexed & range & matching rule searches Reviewed by: nkinder (Thanks!) Branch: HEAD Fix Description: This leak occurs when doing ranged, indexed searches. The code calls index2prefix to get the index prefix. In the case of a matching rule search, this prefix is allocated. The function free_prefix was not being called in all cases. Platforms tested: RHEL5, Fedora 8 Flag Day: no Doc impact: no QA impact: should be covered by regular nightly and manual testing New Tests integrated into TET: none
commit b621e8594f18242a6f9b45a4203b4a84a9c9829a Author: Rich Megginson <[email protected]> Date: Wed Aug 27 21:05:44 2008 +0000 Resolves: bug 458677 Bug Description: Memory leaks in index code doing indexed & range & matching rule searches Reviewed by: nkinder (Thanks!) Branch: HEAD Fix Description: This leak occurs when doing ranged, indexed searches. The code calls index2prefix to get the index prefix. In the case of a matching rule search, this prefix is allocated. The function free_prefix was not being called in all cases. Platforms tested: RHEL5, Fedora 8 Flag Day: no Doc impact: no QA impact: should be covered by regular nightly and manual testing New Tests integrated into TET: none diff --git a/ldap/servers/slapd/back-ldbm/index.c b/ldap/servers/slapd/back-ldbm/index.c index 49c8eda24..d7dc0b79d 100644 --- a/ldap/servers/slapd/back-ldbm/index.c +++ b/ldap/servers/slapd/back-ldbm/index.c @@ -1047,7 +1047,7 @@ index_range_read( DBT cur_key = {0}; DBT data = {0} ; IDList *idl= NULL; - char *prefix; + char *prefix = NULL; char *realbuf, *nextrealbuf; size_t reallen, nextreallen; size_t plen; @@ -1100,10 +1100,14 @@ index_range_read( LDAPDebug( LDAP_DEBUG_ANY, "<= index_range_read(%s,%s) NULL (operator %i)\n", type, prefix, operator ); + free_prefix(prefix); return( NULL ); } ainfo_get( be, type, &ai ); - if (ai == NULL) return NULL; + if (ai == NULL) { + free_prefix(prefix); + return NULL; + } LDAPDebug( LDAP_DEBUG_ARGS, " indextype: \"%s\" indexmask: 0x%x\n", indextype, ai->ai_indexmask, 0 ); if ( !is_indexed( indextype, ai->ai_indexmask, ai->ai_index_rules )) { @@ -1111,12 +1115,14 @@ index_range_read( LDAPDebug( LDAP_DEBUG_TRACE, "<= index_range_read(%s,%s) %lu candidates (allids)\n", type, prefix, (u_long)IDL_NIDS(idl) ); + free_prefix(prefix); return( idl ); } if ( (*err = dblayer_get_index_file( be, ai, &db, DBOPEN_CREATE )) != 0 ) { LDAPDebug( LDAP_DEBUG_ANY, "<= index_range_read(%s,%s) NULL (could not open index file)\n", type, prefix, 0 ); + free_prefix(prefix); return( NULL ); /* why not allids? */ } if (NULL != txn) { @@ -1130,6 +1136,7 @@ index_range_read( "<= index_range_read(%s,%s) NULL: db->cursor() == %i\n", type, prefix, *err ); dblayer_release_index_file( be, ai, db ); + free_prefix(prefix); return( NULL ); /* why not allids? */ } @@ -1377,6 +1384,7 @@ index_range_read( } #endif error: + free_prefix(prefix); DBT_FREE_PAYLOAD(cur_key); DBT_FREE_PAYLOAD(upperkey);
0
81dcdd91c7c8c9735cbc87b8b4c421c4ac8ec3d8
389ds/389-ds-base
Ticket #47553 - Automated the verification procedure Description: Test case checks, that MODRDN operation is allowed, if user has ACI right '(all)' under superior entries, but doesn't have '(modrdn)'. https://fedorahosted.org/389/ticket/47553 Reviewed by: [email protected] (Thanks!) Signed-off-by: Mark Reynolds <[email protected]>
commit 81dcdd91c7c8c9735cbc87b8b4c421c4ac8ec3d8 Author: Simon Pichugin <[email protected]> Date: Mon Aug 31 17:22:34 2015 +0200 Ticket #47553 - Automated the verification procedure Description: Test case checks, that MODRDN operation is allowed, if user has ACI right '(all)' under superior entries, but doesn't have '(modrdn)'. https://fedorahosted.org/389/ticket/47553 Reviewed by: [email protected] (Thanks!) Signed-off-by: Mark Reynolds <[email protected]> diff --git a/dirsrvtests/tickets/ticket47553_test.py b/dirsrvtests/tickets/ticket47553_test.py new file mode 100644 index 000000000..84d462d1c --- /dev/null +++ b/dirsrvtests/tickets/ticket47553_test.py @@ -0,0 +1,166 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +installation1_prefix = None + +CONTAINER_1_OU = 'test_ou_1' +CONTAINER_2_OU = 'test_ou_2' +CONTAINER_1 = 'ou=%s,dc=example,dc=com' % CONTAINER_1_OU +CONTAINER_2 = 'ou=%s,dc=example,dc=com' % CONTAINER_2_OU +USER_CN = 'test_user' +USER_PWD = 'Secret123' +USER = 'cn=%s,%s' % (USER_CN, CONTAINER_1) + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + [email protected](scope="module") +def topology(request): + global installation1_prefix + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + + # Creating standalone instance ... + standalone = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + instance_standalone = standalone.exists() + if instance_standalone: + standalone.delete() + standalone.create() + standalone.open() + + # Delete each instance in the end + def fin(): + standalone.delete() + request.addfinalizer(fin) + + # Clear out the tmp dir + standalone.clearTmpDir(__file__) + + return TopologyStandalone(standalone) + + [email protected](scope="module") +def env_setup(topology): + """Adds two containers, one user and two ACI rules""" + + try: + log.info("Add a container: %s" % CONTAINER_1) + topology.standalone.add_s(Entry((CONTAINER_1, + {'objectclass': 'top', + 'objectclass': 'organizationalunit', + 'ou': CONTAINER_1_OU, + }))) + + log.info("Add a container: %s" % CONTAINER_2) + topology.standalone.add_s(Entry((CONTAINER_2, + {'objectclass': 'top', + 'objectclass': 'organizationalunit', + 'ou': CONTAINER_2_OU, + }))) + + log.info("Add a user: %s" % USER) + topology.standalone.add_s(Entry((USER, + {'objectclass': 'top person'.split(), + 'cn': USER_CN, + 'sn': USER_CN, + 'userpassword': USER_PWD + }))) + except ldap.LDAPError as e: + log.error('Failed to add object to database: %s' % e.message['desc']) + assert False + + ACI_TARGET = '(targetattr="*")' + ACI_ALLOW = '(version 3.0; acl "All rights for %s"; allow (all) ' % USER + ACI_SUBJECT = 'userdn="ldap:///%s";)' % USER + ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT + mod = [(ldap.MOD_ADD, 'aci', ACI_BODY)] + + try: + log.info("Add an ACI 'allow (all)' by %s to the %s" % (USER, + CONTAINER_1)) + topology.standalone.modify_s(CONTAINER_1, mod) + + log.info("Add an ACI 'allow (all)' by %s to the %s" % (USER, + CONTAINER_2)) + topology.standalone.modify_s(CONTAINER_2, mod) + except ldap.LDAPError as e: + log.fatal('Failed to add ACI: error (%s)' % (e.message['desc'])) + assert False + + +def test_ticket47553(topology, env_setup): + """Tests, that MODRDN operation is allowed, + if user has ACI right '(all)' under superior entries, + but doesn't have '(modrdn)' + """ + + log.info("Bind as %s" % USER) + try: + topology.standalone.simple_bind_s(USER, USER_PWD) + except ldap.LDAPError as e: + log.error('Bind failed for %s, error %s' % (USER, e.message['desc'])) + assert False + + log.info("User MODRDN operation from %s to %s" % (CONTAINER_1, + CONTAINER_2)) + try: + topology.standalone.rename_s(USER, "cn=%s" % USER_CN, + newsuperior=CONTAINER_2, delold=1) + except ldap.LDAPError as e: + log.error('MODRDN failed for %s, error %s' % (USER, e.message['desc'])) + assert False + + try: + log.info("Check there is no user in %s" % CONTAINER_1) + entries = topology.standalone.search_s(CONTAINER_1, + ldap.SCOPE_ONELEVEL, + 'cn=%s' % USER_CN) + assert not entries + + log.info("Check there is our user in %s" % CONTAINER_2) + entries = topology.standalone.search_s(CONTAINER_2, + ldap.SCOPE_ONELEVEL, + 'cn=%s' % USER_CN) + assert entries + except ldap.LDAPError as e: + log.fatal('Search failed, error: ' + e.message['desc']) + assert False + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + # -v for additional verbose + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s -v %s" % CURRENT_FILE)
0
a2cbef23d1fe5e0a86563575ec4a09d0c9120b86
389ds/389-ds-base
Issue 49588 - Add py3 support for tickets Another round of py3 fixes for tests in tickets/. https://pagure.io/389-ds-base/issue/49588 Reviewed by: mreynolds (Thanks!)
commit a2cbef23d1fe5e0a86563575ec4a09d0c9120b86 Author: Viktor Ashirov <[email protected]> Date: Tue Jun 26 21:41:48 2018 +0200 Issue 49588 - Add py3 support for tickets Another round of py3 fixes for tests in tickets/. https://pagure.io/389-ds-base/issue/49588 Reviewed by: mreynolds (Thanks!) diff --git a/dirsrvtests/tests/tickets/ticket47927_test.py b/dirsrvtests/tests/tickets/ticket47927_test.py index 3dbd08b27..43165d324 100644 --- a/dirsrvtests/tests/tickets/ticket47927_test.py +++ b/dirsrvtests/tests/tickets/ticket47927_test.py @@ -42,11 +42,11 @@ def test_ticket47927_init(topology_st): topology_st.standalone.plugins.enable(name=PLUGIN_ATTR_UNIQUENESS) try: topology_st.standalone.modify_s('cn=' + PLUGIN_ATTR_UNIQUENESS + ',cn=plugins,cn=config', - [(ldap.MOD_REPLACE, 'uniqueness-attribute-name', 'telephonenumber'), - (ldap.MOD_REPLACE, 'uniqueness-subtrees', DEFAULT_SUFFIX), + [(ldap.MOD_REPLACE, 'uniqueness-attribute-name', b'telephonenumber'), + (ldap.MOD_REPLACE, 'uniqueness-subtrees', ensure_bytes(DEFAULT_SUFFIX)), ]) except ldap.LDAPError as e: - log.fatal('test_ticket47927: Failed to configure plugin for "telephonenumber": error ' + e.message['desc']) + log.fatal('test_ticket47927: Failed to configure plugin for "telephonenumber": error ' + e.args[0]['desc']) assert False topology_st.standalone.restart(timeout=120) @@ -81,12 +81,12 @@ def test_ticket47927_one(topology_st): ''' Check that uniqueness is enforce on all SUFFIX ''' - UNIQUE_VALUE = '1234' + UNIQUE_VALUE = b'1234' try: topology_st.standalone.modify_s(USER_1_DN, [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)]) except ldap.LDAPError as e: - log.fatal('test_ticket47927_one: Failed to set the telephonenumber for %s: %s' % (USER_1_DN, e.message['desc'])) + log.fatal('test_ticket47927_one: Failed to set the telephonenumber for %s: %s' % (USER_1_DN, e.args[0]['desc'])) assert False # we expect to fail because user1 is in the scope of the plugin @@ -97,7 +97,7 @@ def test_ticket47927_one(topology_st): assert False except ldap.LDAPError as e: log.fatal('test_ticket47927_one: Failed (expected) to set the telephonenumber for %s: %s' % ( - USER_2_DN, e.message['desc'])) + USER_2_DN, e.args[0]['desc'])) pass # we expect to fail because user1 is in the scope of the plugin @@ -108,7 +108,7 @@ def test_ticket47927_one(topology_st): assert False except ldap.LDAPError as e: log.fatal('test_ticket47927_one: Failed (expected) to set the telephonenumber for %s: %s' % ( - USER_3_DN, e.message['desc'])) + USER_3_DN, e.args[0]['desc'])) pass @@ -118,10 +118,10 @@ def test_ticket47927_two(topology_st): ''' try: topology_st.standalone.modify_s('cn=' + PLUGIN_ATTR_UNIQUENESS + ',cn=plugins,cn=config', - [(ldap.MOD_REPLACE, 'uniqueness-exclude-subtrees', EXCLUDED_CONTAINER_DN)]) + [(ldap.MOD_REPLACE, 'uniqueness-exclude-subtrees', ensure_bytes(EXCLUDED_CONTAINER_DN))]) except ldap.LDAPError as e: log.fatal('test_ticket47927_two: Failed to configure plugin for to exclude %s: error %s' % ( - EXCLUDED_CONTAINER_DN, e.message['desc'])) + EXCLUDED_CONTAINER_DN, e.args[0]['desc'])) assert False topology_st.standalone.restart(timeout=120) @@ -132,12 +132,12 @@ def test_ticket47927_three(topology_st): First case: it exists an entry (with the same attribute value) in the scope of the plugin and we set the value in an entry that is in an excluded scope ''' - UNIQUE_VALUE = '9876' + UNIQUE_VALUE = b'9876' try: topology_st.standalone.modify_s(USER_1_DN, [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)]) except ldap.LDAPError as e: - log.fatal('test_ticket47927_three: Failed to set the telephonenumber ' + e.message['desc']) + log.fatal('test_ticket47927_three: Failed to set the telephonenumber ' + e.args[0]['desc']) assert False # we should not be allowed to set this value (because user1 is in the scope) @@ -148,7 +148,7 @@ def test_ticket47927_three(topology_st): assert False except ldap.LDAPError as e: log.fatal('test_ticket47927_three: Failed (expected) to set the telephonenumber for %s: %s' % ( - USER_2_DN, e.message['desc'])) + USER_2_DN, e.args[0]['desc'])) # USER_3_DN is in EXCLUDED_CONTAINER_DN so update should be successful try: @@ -157,7 +157,7 @@ def test_ticket47927_three(topology_st): log.fatal('test_ticket47927_three: success to set the telephonenumber for %s' % (USER_3_DN)) except ldap.LDAPError as e: log.fatal('test_ticket47927_three: Failed (unexpected) to set the telephonenumber for %s: %s' % ( - USER_3_DN, e.message['desc'])) + USER_3_DN, e.args[0]['desc'])) assert False @@ -167,7 +167,7 @@ def test_ticket47927_four(topology_st): Second case: it exists an entry (with the same attribute value) in an excluded scope of the plugin and we set the value in an entry is in the scope ''' - UNIQUE_VALUE = '1111' + UNIQUE_VALUE = b'1111' # USER_3_DN is in EXCLUDED_CONTAINER_DN so update should be successful try: topology_st.standalone.modify_s(USER_3_DN, @@ -175,7 +175,7 @@ def test_ticket47927_four(topology_st): log.fatal('test_ticket47927_four: success to set the telephonenumber for %s' % USER_3_DN) except ldap.LDAPError as e: log.fatal('test_ticket47927_four: Failed (unexpected) to set the telephonenumber for %s: %s' % ( - USER_3_DN, e.message['desc'])) + USER_3_DN, e.args[0]['desc'])) assert False # we should be allowed to set this value (because user3 is excluded from scope) @@ -184,7 +184,7 @@ def test_ticket47927_four(topology_st): [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)]) except ldap.LDAPError as e: log.fatal( - 'test_ticket47927_four: Failed to set the telephonenumber for %s: %s' % (USER_1_DN, e.message['desc'])) + 'test_ticket47927_four: Failed to set the telephonenumber for %s: %s' % (USER_1_DN, e.args[0]['desc'])) assert False # we should not be allowed to set this value (because user1 is in the scope) @@ -195,7 +195,7 @@ def test_ticket47927_four(topology_st): assert False except ldap.LDAPError as e: log.fatal('test_ticket47927_four: Failed (expected) to set the telephonenumber for %s: %s' % ( - USER_2_DN, e.message['desc'])) + USER_2_DN, e.args[0]['desc'])) pass @@ -205,10 +205,10 @@ def test_ticket47927_five(topology_st): ''' try: topology_st.standalone.modify_s('cn=' + PLUGIN_ATTR_UNIQUENESS + ',cn=plugins,cn=config', - [(ldap.MOD_ADD, 'uniqueness-exclude-subtrees', EXCLUDED_BIS_CONTAINER_DN)]) + [(ldap.MOD_ADD, 'uniqueness-exclude-subtrees', ensure_bytes(EXCLUDED_BIS_CONTAINER_DN))]) except ldap.LDAPError as e: log.fatal('test_ticket47927_five: Failed to configure plugin for to exclude %s: error %s' % ( - EXCLUDED_BIS_CONTAINER_DN, e.message['desc'])) + EXCLUDED_BIS_CONTAINER_DN, e.args[0]['desc'])) assert False topology_st.standalone.restart(timeout=120) topology_st.standalone.getEntry('cn=' + PLUGIN_ATTR_UNIQUENESS + ',cn=plugins,cn=config', ldap.SCOPE_BASE) @@ -221,12 +221,12 @@ def test_ticket47927_six(topology_st): First case: it exists an entry (with the same attribute value) in the scope of the plugin and we set the value in an entry that is in an excluded scope ''' - UNIQUE_VALUE = '222' + UNIQUE_VALUE = b'222' try: topology_st.standalone.modify_s(USER_1_DN, [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)]) except ldap.LDAPError as e: - log.fatal('test_ticket47927_six: Failed to set the telephonenumber ' + e.message['desc']) + log.fatal('test_ticket47927_six: Failed to set the telephonenumber ' + e.args[0]['desc']) assert False # we should not be allowed to set this value (because user1 is in the scope) @@ -237,7 +237,7 @@ def test_ticket47927_six(topology_st): assert False except ldap.LDAPError as e: log.fatal('test_ticket47927_six: Failed (expected) to set the telephonenumber for %s: %s' % ( - USER_2_DN, e.message['desc'])) + USER_2_DN, e.args[0]['desc'])) # USER_3_DN is in EXCLUDED_CONTAINER_DN so update should be successful try: @@ -246,7 +246,7 @@ def test_ticket47927_six(topology_st): log.fatal('test_ticket47927_six: success to set the telephonenumber for %s' % (USER_3_DN)) except ldap.LDAPError as e: log.fatal('test_ticket47927_six: Failed (unexpected) to set the telephonenumber for %s: %s' % ( - USER_3_DN, e.message['desc'])) + USER_3_DN, e.args[0]['desc'])) assert False # USER_4_DN is in EXCLUDED_CONTAINER_DN so update should be successful try: @@ -255,7 +255,7 @@ def test_ticket47927_six(topology_st): log.fatal('test_ticket47927_six: success to set the telephonenumber for %s' % (USER_4_DN)) except ldap.LDAPError as e: log.fatal('test_ticket47927_six: Failed (unexpected) to set the telephonenumber for %s: %s' % ( - USER_4_DN, e.message['desc'])) + USER_4_DN, e.args[0]['desc'])) assert False diff --git a/dirsrvtests/tests/tickets/ticket47931_test.py b/dirsrvtests/tests/tickets/ticket47931_test.py index 341fb0f4b..82657c961 100644 --- a/dirsrvtests/tests/tickets/ticket47931_test.py +++ b/dirsrvtests/tests/tickets/ticket47931_test.py @@ -31,7 +31,7 @@ class modifySecondBackendThread(threading.Thread): self.timeout = timeout def run(self): - conn = self.inst.openConnection() + conn = self.inst.clone() conn.set_option(ldap.OPT_TIMEOUT, self.timeout) log.info('Modify second suffix...') for x in range(0, 5000): @@ -39,10 +39,10 @@ class modifySecondBackendThread(threading.Thread): conn.modify_s(SECOND_SUFFIX, [(ldap.MOD_REPLACE, 'description', - 'new description')]) + b'new description')]) except ldap.LDAPError as e: log.fatal('Failed to modify second suffix - error: %s' % - (e.message['desc'])) + (e.args[0]['desc'])) assert False conn.close() @@ -69,9 +69,9 @@ def test_ticket47931(topology_st): topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', - 'on')]) + b'on')]) except ldap.LDAPError as e: - log.error('Failed to enable dynamic plugins! ' + e.message['desc']) + log.error('Failed to enable dynamic plugins! ' + e.args[0]['desc']) assert False # Enable the plugins @@ -88,7 +88,7 @@ def test_ticket47931(topology_st): {'objectclass': 'top domain'.split(), 'dc': 'deadlock'}))) except ldap.LDAPError as e: - log.fatal('Failed to create suffix entry: error ' + e.message['desc']) + log.fatal('Failed to create suffix entry: error ' + e.args[0]['desc']) assert False # Configure retrocl scope @@ -96,9 +96,9 @@ def test_ticket47931(topology_st): topology_st.standalone.modify_s(RETROCL_PLUGIN_DN, [(ldap.MOD_REPLACE, 'nsslapd-include-suffix', - DEFAULT_SUFFIX)]) + ensure_bytes(DEFAULT_SUFFIX))]) except ldap.LDAPError as e: - log.error('Failed to configure retrocl plugin: ' + e.message['desc']) + log.error('Failed to configure retrocl plugin: ' + e.args[0]['desc']) assert False # Configure memberOf group attribute @@ -106,9 +106,9 @@ def test_ticket47931(topology_st): topology_st.standalone.modify_s(MEMBEROF_PLUGIN_DN, [(ldap.MOD_REPLACE, 'memberofgroupattr', - 'uniquemember')]) + b'uniquemember')]) except ldap.LDAPError as e: - log.fatal('Failed to configure memberOf plugin: error ' + e.message['desc']) + log.fatal('Failed to configure memberOf plugin: error ' + e.args[0]['desc']) assert False time.sleep(1) @@ -118,7 +118,7 @@ def test_ticket47931(topology_st): {'objectclass': 'top extensibleObject'.split(), 'cn': 'group'}))) except ldap.LDAPError as e: - log.fatal('Failed to add grouo: error ' + e.message['desc']) + log.fatal('Failed to add grouo: error ' + e.args[0]['desc']) assert False # Create 1500 entries (future members of the group) @@ -129,7 +129,7 @@ def test_ticket47931(topology_st): {'objectclass': 'top extensibleObject'.split(), 'uid': 'member%d' % (idx)}))) except ldap.LDAPError as e: - log.fatal('Failed to add user (%s): error %s' % (USER_DN, e.message['desc'])) + log.fatal('Failed to add user (%s): error %s' % (USER_DN, e.args[0]['desc'])) assert False # Modify second backend (separate thread) @@ -146,13 +146,13 @@ def test_ticket47931(topology_st): topology_st.standalone.modify_s(GROUP_DN, [(ldap.MOD_ADD, 'uniquemember', - MEMBER_VAL)]) + ensure_bytes(MEMBER_VAL))]) except ldap.TIMEOUT: log.fatal('Deadlock! Bug verification failed.') assert False except ldap.LDAPError as e: log.fatal('Failed to update group(not a deadlock) member (%s) - error: %s' % - (MEMBER_VAL, e.message['desc'])) + (MEMBER_VAL, e.args[0]['desc'])) assert False log.info('Finished adding members to the group.') diff --git a/dirsrvtests/tests/tickets/ticket47937_test.py b/dirsrvtests/tests/tickets/ticket47937_test.py index 5af6eecbb..0a4c18db8 100644 --- a/dirsrvtests/tests/tickets/ticket47937_test.py +++ b/dirsrvtests/tests/tickets/ticket47937_test.py @@ -33,7 +33,7 @@ def test_ticket47937(topology_st): except ldap.ALREADY_EXISTS: pass except ldap.LDAPError as e: - log.error('Failed to add ou=people org unit: error ' + e.message['desc']) + log.error('Failed to add ou=people org unit: error ' + e.args[0]['desc']) assert False log.info("Creating \"ou=ranges\"...") @@ -44,7 +44,7 @@ def test_ticket47937(topology_st): }))) except ldap.LDAPError as e: - log.error('Failed to add ou=ranges org unit: error ' + e.message['desc']) + log.error('Failed to add ou=ranges org unit: error ' + e.args[0]['desc']) assert False log.info("Creating \"cn=entry\"...") @@ -55,7 +55,7 @@ def test_ticket47937(topology_st): }))) except ldap.LDAPError as e: - log.error('Failed to add test entry: error ' + e.message['desc']) + log.error('Failed to add test entry: error ' + e.args[0]['desc']) assert False log.info("Creating DNA shared config entry...") @@ -69,7 +69,7 @@ def test_ticket47937(topology_st): }))) except ldap.LDAPError as e: - log.error('Failed to add shared config entry: error ' + e.message['desc']) + log.error('Failed to add shared config entry: error ' + e.args[0]['desc']) assert False log.info("Add dna plugin config entry...") @@ -87,14 +87,14 @@ def test_ticket47937(topology_st): }))) except ldap.LDAPError as e: - log.error('Failed to add DNA config entry: error ' + e.message['desc']) + log.error('Failed to add DNA config entry: error ' + e.args[0]['desc']) assert False log.info("Enable the DNA plugin...") try: topology_st.standalone.plugins.enable(name=PLUGIN_DNA) except e: - log.error("Failed to enable DNA Plugin: error " + e.message['desc']) + log.error("Failed to enable DNA Plugin: error " + e.args[0]['desc']) assert False log.info("Restarting the server...") @@ -107,9 +107,9 @@ def test_ticket47937(topology_st): try: topology_st.standalone.modify_s('cn=dna config,cn=Distributed Numeric Assignment Plugin,cn=plugins,cn=config', - [(ldap.MOD_REPLACE, 'dnaType', 'foo')]) + [(ldap.MOD_REPLACE, 'dnaType', b'foo')]) except ldap.LDAPError as e: - log.info('Operation failed as expected (error: %s)' % e.message['desc']) + log.info('Operation failed as expected (error: %s)' % e.args[0]['desc']) else: log.error('Operation incorectly succeeded! Test Failed!') assert False diff --git a/dirsrvtests/tests/tickets/ticket47953_test.py b/dirsrvtests/tests/tickets/ticket47953_test.py index f828835a1..0aebe55ec 100644 --- a/dirsrvtests/tests/tickets/ticket47953_test.py +++ b/dirsrvtests/tests/tickets/ticket47953_test.py @@ -11,6 +11,7 @@ import shutil import pytest from lib389.tasks import * +from lib389.utils import * from lib389.topologies import topology_st from lib389._constants import DATA_DIR, DEFAULT_SUFFIX @@ -56,10 +57,10 @@ def test_ticket47953(topology_st): log.info('Attempting to remove invalid aci...') try: - topology_st.standalone.modify_s(DEFAULT_SUFFIX, [(ldap.MOD_DELETE, 'aci', acival)]) + topology_st.standalone.modify_s(DEFAULT_SUFFIX, [(ldap.MOD_DELETE, 'aci', ensure_bytes(acival))]) log.info('Removed invalid aci.') except ldap.LDAPError as e: - log.error('Failed to remove invalid aci: ' + e.message['desc']) + log.error('Failed to remove invalid aci: ' + e.args[0]['desc']) assert False diff --git a/dirsrvtests/tests/tickets/ticket47963_test.py b/dirsrvtests/tests/tickets/ticket47963_test.py index e38fb3093..24f6eeab8 100644 --- a/dirsrvtests/tests/tickets/ticket47963_test.py +++ b/dirsrvtests/tests/tickets/ticket47963_test.py @@ -10,6 +10,7 @@ import logging import pytest from lib389.tasks import * +from lib389.utils import * from lib389.topologies import topology_st from lib389._constants import DEFAULT_SUFFIX, PLUGIN_MEMBER_OF @@ -36,9 +37,9 @@ def test_ticket47963(topology_st): # topology_st.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) try: - topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'memberofskipnested', 'on')]) + topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'memberofskipnested', b'on')]) except ldap.LDAPError as e: - log.error('test_automember: Failed to modify config entry: error ' + e.message['desc']) + log.error('test_automember: Failed to modify config entry: error ' + e.args[0]['desc']) assert False topology_st.standalone.restart(timeout=10) @@ -52,7 +53,7 @@ def test_ticket47963(topology_st): 'uid': 'test_user' }))) except ldap.LDAPError as e: - log.error('Failed to add teset user: error ' + e.message['desc']) + log.error('Failed to add teset user: error ' + e.args[0]['desc']) assert False try: @@ -62,7 +63,7 @@ def test_ticket47963(topology_st): 'member': USER_DN }))) except ldap.LDAPError as e: - log.error('Failed to add group1: error ' + e.message['desc']) + log.error('Failed to add group1: error ' + e.args[0]['desc']) assert False try: @@ -72,7 +73,7 @@ def test_ticket47963(topology_st): 'member': USER_DN }))) except ldap.LDAPError as e: - log.error('Failed to add group2: error ' + e.message['desc']) + log.error('Failed to add group2: error ' + e.args[0]['desc']) assert False # Add group with no member(yet) @@ -82,7 +83,7 @@ def test_ticket47963(topology_st): 'cn': 'group' }))) except ldap.LDAPError as e: - log.error('Failed to add group3: error ' + e.message['desc']) + log.error('Failed to add group3: error ' + e.args[0]['desc']) assert False time.sleep(1) @@ -96,14 +97,14 @@ def test_ticket47963(topology_st): log.fatal('User is missing expected memberOf attrs') assert False except ldap.LDAPError as e: - log.fatal('Search for user1 failed: ' + e.message['desc']) + log.fatal('Search for user1 failed: ' + e.args[0]['desc']) assert False # Add the user to the group try: - topology_st.standalone.modify_s(GROUP_DN3, [(ldap.MOD_ADD, 'member', USER_DN)]) + topology_st.standalone.modify_s(GROUP_DN3, [(ldap.MOD_ADD, 'member', ensure_bytes(USER_DN))]) except ldap.LDAPError as e: - log.error('Failed to member to group: error ' + e.message['desc']) + log.error('Failed to member to group: error ' + e.args[0]['desc']) assert False time.sleep(1) @@ -116,7 +117,7 @@ def test_ticket47963(topology_st): log.fatal('User is missing expected memberOf attrs') assert False except ldap.LDAPError as e: - log.fatal('Search for user1 failed: ' + e.message['desc']) + log.fatal('Search for user1 failed: ' + e.args[0]['desc']) assert False # @@ -125,7 +126,7 @@ def test_ticket47963(topology_st): try: topology_st.standalone.delete_s(GROUP_DN2) except ldap.LDAPError as e: - log.error('Failed to delete test group2: ' + e.message['desc']) + log.error('Failed to delete test group2: ' + e.args[0]['desc']) assert False time.sleep(1) @@ -136,7 +137,7 @@ def test_ticket47963(topology_st): log.fatal('User incorrect memberOf attrs') assert False except ldap.LDAPError as e: - log.fatal('Search for user1 failed: ' + e.message['desc']) + log.fatal('Search for user1 failed: ' + e.args[0]['desc']) assert False log.info('Test complete') diff --git a/dirsrvtests/tests/tickets/ticket47970_test.py b/dirsrvtests/tests/tickets/ticket47970_test.py index 86d67e3e4..410a290cb 100644 --- a/dirsrvtests/tests/tickets/ticket47970_test.py +++ b/dirsrvtests/tests/tickets/ticket47970_test.py @@ -31,17 +31,17 @@ def test_ticket47970(topology_st): # Enable account lockout # try: - topology_st.standalone.modify_s("cn=config", [(ldap.MOD_REPLACE, 'passwordLockout', 'on')]) + topology_st.standalone.modify_s("cn=config", [(ldap.MOD_REPLACE, 'passwordLockout', b'on')]) log.info('account lockout enabled.') except ldap.LDAPError as e: - log.error('Failed to enable account lockout: ' + e.message['desc']) + log.error('Failed to enable account lockout: ' + e.args[0]['desc']) assert False try: - topology_st.standalone.modify_s("cn=config", [(ldap.MOD_REPLACE, 'passwordMaxFailure', '5')]) + topology_st.standalone.modify_s("cn=config", [(ldap.MOD_REPLACE, 'passwordMaxFailure', b'5')]) log.info('passwordMaxFailure set.') except ldap.LDAPError as e: - log.error('Failed to to set passwordMaxFailure: ' + e.message['desc']) + log.error('Failed to to set passwordMaxFailure: ' + e.args[0]['desc']) assert False # @@ -69,7 +69,7 @@ def test_ticket47970(topology_st): "passwordRetryCount=*", ['passwordRetryCount']) except ldap.LDAPError as e: - log.error('Failed to search Root DSE entry: ' + e.message['desc']) + log.error('Failed to search Root DSE entry: ' + e.args[0]['desc']) assert False if entry: diff --git a/dirsrvtests/tests/tickets/ticket47973_test.py b/dirsrvtests/tests/tickets/ticket47973_test.py index c0751a137..c73388b75 100644 --- a/dirsrvtests/tests/tickets/ticket47973_test.py +++ b/dirsrvtests/tests/tickets/ticket47973_test.py @@ -34,7 +34,7 @@ def task_complete(conn, task_dn): # task is done finished = True except ldap.LDAPError as e: - log.fatal('wait_for_task: Search failed: ' + e.message['desc']) + log.fatal('wait_for_task: Search failed: ' + e.args[0]['desc']) assert False return finished @@ -57,7 +57,7 @@ def test_ticket47973(topology_st): 'uid': 'user1' }))) except ldap.LDAPError as e: - log.error('Failed to add user1: error ' + e.message['desc']) + log.error('Failed to add user1: error ' + e.args[0]['desc']) assert False # @@ -77,7 +77,7 @@ def test_ticket47973(topology_st): 'cn': 'task-' + str(task_count) }))) except ldap.LDAPError as e: - log.error('Failed to add task entry: error ' + e.message['desc']) + log.error('Failed to add task entry: error ' + e.args[0]['desc']) assert False # @@ -96,7 +96,7 @@ def test_ticket47973(topology_st): log.fatal('User was not returned from search!') assert False except ldap.LDAPError as e: - log.fatal('Unable to search for entry %s: error %s' % (USER_DN, e.message['desc'])) + log.fatal('Unable to search for entry %s: error %s' % (USER_DN, e.args[0]['desc'])) assert False # @@ -138,13 +138,13 @@ def test_ticket47973_case(topology_st): ["objectclasses"]) oclist = schemaentry[0].data.get("objectclasses") except ldap.LDAPError as e: - log.error('Failed to get schema entry: error (%s)' % e.message['desc']) + log.error('Failed to get schema entry: error (%s)' % e.args[0]['desc']) raise e found = 0 for oc in oclist: log.info('OC: %s' % oc) - moz = re.findall(Mozattr0, oc) + moz = re.findall(Mozattr0, oc.decode('utf-8')) if moz: found = 1 log.info('case 1: %s is in the objectclasses list -- PASS' % Mozattr0) @@ -178,12 +178,12 @@ def test_ticket47973_case(topology_st): ["objectclasses"]) oclist = schemaentry[0].data.get("objectclasses") except ldap.LDAPError as e: - log.error('Failed to get schema entry: error (%s)' % e.message['desc']) + log.error('Failed to get schema entry: error (%s)' % e.args[0]['desc']) raise e for oc in oclist: log.info('OC: %s' % oc) - moz = re.findall(Mozattr1, oc) + moz = re.findall(Mozattr1, oc.decode('utf-8')) if moz: log.error('case 2: %s is in the objectclasses list -- FAILURE' % Mozattr1) assert False @@ -200,7 +200,7 @@ def test_ticket47973_case(topology_st): 'cn': name, Mozattr2: name}))) except ldap.LDAPError as e: - log.error('Failed to add a test entry: error (%s)' % e.message['desc']) + log.error('Failed to add a test entry: error (%s)' % e.args[0]['desc']) raise e try: @@ -208,7 +208,7 @@ def test_ticket47973_case(topology_st): 'objectclass=mozillaobject', [Mozattr2]) except ldap.LDAPError as e: - log.error('Failed to get schema entry: error (%s)' % e.message['desc']) + log.error('Failed to get schema entry: error (%s)' % e.args[0]['desc']) raise e mozattrval = testentry[0].data.get(Mozattr2) diff --git a/dirsrvtests/tests/tickets/ticket47976_test.py b/dirsrvtests/tests/tickets/ticket47976_test.py index 13ada9615..850ffe17f 100644 --- a/dirsrvtests/tests/tickets/ticket47976_test.py +++ b/dirsrvtests/tests/tickets/ticket47976_test.py @@ -73,7 +73,7 @@ def test_ticket47976_init(topology_st): def test_ticket47976_1(topology_st): - mod = [(ldap.MOD_REPLACE, 'nsslapd-pluginConfigArea', DEFINITIONS_DN)] + mod = [(ldap.MOD_REPLACE, 'nsslapd-pluginConfigArea', ensure_bytes(DEFINITIONS_DN))] topology_st.standalone.modify_s('cn=%s,cn=plugins,cn=config' % PLUGIN_MANAGED_ENTRY, mod) topology_st.standalone.stop(timeout=10) topology_st.standalone.start(timeout=10) @@ -95,7 +95,7 @@ def test_ticket47976_2(topology_st): """ log.info('Test complete') - mod = [(ldap.MOD_REPLACE, 'nsslapd-db-page-size', str(128 * 1024))] + mod = [(ldap.MOD_REPLACE, 'nsslapd-db-page-size', ensure_bytes(str(128 * 1024)))] topology_st.standalone.modify_s(DN_LDBM, mod) # Get the the full path and name for our LDIF we will be exporting diff --git a/dirsrvtests/tests/tickets/ticket47980_test.py b/dirsrvtests/tests/tickets/ticket47980_test.py index 0bbd7d245..510d67972 100644 --- a/dirsrvtests/tests/tickets/ticket47980_test.py +++ b/dirsrvtests/tests/tickets/ticket47980_test.py @@ -89,7 +89,7 @@ def test_ticket47980(topology_st): 'ou': 'level1' }))) except ldap.LDAPError as e: - log.error('Failed to add level1: error ' + e.message['desc']) + log.error('Failed to add level1: error ' + e.args[0]['desc']) assert False try: @@ -98,7 +98,7 @@ def test_ticket47980(topology_st): 'ou': 'level2' }))) except ldap.LDAPError as e: - log.error('Failed to add level2: error ' + e.message['desc']) + log.error('Failed to add level2: error ' + e.args[0]['desc']) assert False try: @@ -107,7 +107,7 @@ def test_ticket47980(topology_st): 'uid': 'level3' }))) except ldap.LDAPError as e: - log.error('Failed to add level3: error ' + e.message['desc']) + log.error('Failed to add level3: error ' + e.args[0]['desc']) assert False # People branch, might already exist @@ -119,7 +119,7 @@ def test_ticket47980(topology_st): except ldap.ALREADY_EXISTS: pass except ldap.LDAPError as e: - log.error('Failed to add level4: error ' + e.message['desc']) + log.error('Failed to add level4: error ' + e.args[0]['desc']) assert False try: @@ -128,7 +128,7 @@ def test_ticket47980(topology_st): 'ou': 'level5' }))) except ldap.LDAPError as e: - log.error('Failed to add level5: error ' + e.message['desc']) + log.error('Failed to add level5: error ' + e.args[0]['desc']) assert False try: @@ -137,7 +137,7 @@ def test_ticket47980(topology_st): 'uid': 'level6' }))) except ldap.LDAPError as e: - log.error('Failed to add level6: error ' + e.message['desc']) + log.error('Failed to add level6: error ' + e.args[0]['desc']) assert False # Add users to each branch @@ -147,7 +147,7 @@ def test_ticket47980(topology_st): 'uid': 'user1' }))) except ldap.LDAPError as e: - log.error('Failed to add user1: error ' + e.message['desc']) + log.error('Failed to add user1: error ' + e.args[0]['desc']) assert False try: @@ -156,7 +156,7 @@ def test_ticket47980(topology_st): 'uid': 'user2' }))) except ldap.LDAPError as e: - log.error('Failed to add user2: error ' + e.message['desc']) + log.error('Failed to add user2: error ' + e.args[0]['desc']) assert False try: @@ -165,7 +165,7 @@ def test_ticket47980(topology_st): 'uid': 'user3' }))) except ldap.LDAPError as e: - log.error('Failed to add user3: error ' + e.message['desc']) + log.error('Failed to add user3: error ' + e.args[0]['desc']) assert False try: @@ -174,7 +174,7 @@ def test_ticket47980(topology_st): 'uid': 'user4' }))) except ldap.LDAPError as e: - log.error('Failed to add user4: error ' + e.message['desc']) + log.error('Failed to add user4: error ' + e.args[0]['desc']) assert False try: @@ -183,7 +183,7 @@ def test_ticket47980(topology_st): 'uid': 'user5' }))) except ldap.LDAPError as e: - log.error('Failed to add user5: error ' + e.message['desc']) + log.error('Failed to add user5: error ' + e.args[0]['desc']) assert False try: @@ -192,14 +192,14 @@ def test_ticket47980(topology_st): 'uid': 'user6' }))) except ldap.LDAPError as e: - log.error('Failed to add user6: error ' + e.message['desc']) + log.error('Failed to add user6: error ' + e.args[0]['desc']) assert False # Enable password policy try: - topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-pwpolicy-local', 'on')]) + topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-pwpolicy-local', b'on')]) except ldap.LDAPError as e: - log.error('Failed to set pwpolicy-local: error ' + e.message['desc']) + log.error('Failed to set pwpolicy-local: error ' + e.args[0]['desc']) assert False # @@ -212,7 +212,7 @@ def test_ticket47980(topology_st): 'cn': 'nsPwPolicyContainer' }))) except ldap.LDAPError as e: - log.error('Failed to add subtree container for level1: error ' + e.message['desc']) + log.error('Failed to add subtree container for level1: error ' + e.args[0]['desc']) assert False # Add the password policy subentry @@ -228,7 +228,7 @@ def test_ticket47980(topology_st): 'passwordStorageScheme': 'ssha' }))) except ldap.LDAPError as e: - log.error('Failed to add passwordpolicy for level1: error ' + e.message['desc']) + log.error('Failed to add passwordpolicy for level1: error ' + e.args[0]['desc']) assert False # Add the COS template @@ -241,7 +241,7 @@ def test_ticket47980(topology_st): 'pwdpolicysubentry': BRANCH1_PWP }))) except ldap.LDAPError as e: - log.error('Failed to add COS template for level1: error ' + e.message['desc']) + log.error('Failed to add COS template for level1: error ' + e.args[0]['desc']) assert False # Add the COS definition @@ -253,7 +253,7 @@ def test_ticket47980(topology_st): 'cosAttribute': 'pwdpolicysubentry default operational-default' }))) except ldap.LDAPError as e: - log.error('Failed to add COS def for level1: error ' + e.message['desc']) + log.error('Failed to add COS def for level1: error ' + e.args[0]['desc']) assert False # @@ -266,7 +266,7 @@ def test_ticket47980(topology_st): 'cn': 'nsPwPolicyContainer' }))) except ldap.LDAPError as e: - log.error('Failed to add subtree container for level2: error ' + e.message['desc']) + log.error('Failed to add subtree container for level2: error ' + e.args[0]['desc']) assert False # Add the password policy subentry @@ -282,7 +282,7 @@ def test_ticket47980(topology_st): 'passwordStorageScheme': 'ssha' }))) except ldap.LDAPError as e: - log.error('Failed to add passwordpolicy for level2: error ' + e.message['desc']) + log.error('Failed to add passwordpolicy for level2: error ' + e.args[0]['desc']) assert False # Add the COS template @@ -295,7 +295,7 @@ def test_ticket47980(topology_st): 'pwdpolicysubentry': BRANCH2_PWP }))) except ldap.LDAPError as e: - log.error('Failed to add COS template for level2: error ' + e.message['desc']) + log.error('Failed to add COS template for level2: error ' + e.args[0]['desc']) assert False # Add the COS definition @@ -307,7 +307,7 @@ def test_ticket47980(topology_st): 'cosAttribute': 'pwdpolicysubentry default operational-default' }))) except ldap.LDAPError as e: - log.error('Failed to add COS def for level2: error ' + e.message['desc']) + log.error('Failed to add COS def for level2: error ' + e.args[0]['desc']) assert False # @@ -320,7 +320,7 @@ def test_ticket47980(topology_st): 'cn': 'nsPwPolicyContainer' }))) except ldap.LDAPError as e: - log.error('Failed to add subtree container for level3: error ' + e.message['desc']) + log.error('Failed to add subtree container for level3: error ' + e.args[0]['desc']) assert False # Add the password policy subentry @@ -336,7 +336,7 @@ def test_ticket47980(topology_st): 'passwordStorageScheme': 'ssha' }))) except ldap.LDAPError as e: - log.error('Failed to add passwordpolicy for level3: error ' + e.message['desc']) + log.error('Failed to add passwordpolicy for level3: error ' + e.args[0]['desc']) assert False # Add the COS template @@ -349,7 +349,7 @@ def test_ticket47980(topology_st): 'pwdpolicysubentry': BRANCH3_PWP }))) except ldap.LDAPError as e: - log.error('Failed to add COS template for level3: error ' + e.message['desc']) + log.error('Failed to add COS template for level3: error ' + e.args[0]['desc']) assert False # Add the COS definition @@ -361,7 +361,7 @@ def test_ticket47980(topology_st): 'cosAttribute': 'pwdpolicysubentry default operational-default' }))) except ldap.LDAPError as e: - log.error('Failed to add COS def for level3: error ' + e.message['desc']) + log.error('Failed to add COS def for level3: error ' + e.args[0]['desc']) assert False # @@ -374,7 +374,7 @@ def test_ticket47980(topology_st): 'cn': 'nsPwPolicyContainer' }))) except ldap.LDAPError as e: - log.error('Failed to add subtree container for level3: error ' + e.message['desc']) + log.error('Failed to add subtree container for level3: error ' + e.args[0]['desc']) assert False # Add the password policy subentry @@ -390,7 +390,7 @@ def test_ticket47980(topology_st): 'passwordStorageScheme': 'ssha' }))) except ldap.LDAPError as e: - log.error('Failed to add passwordpolicy for branch4: error ' + e.message['desc']) + log.error('Failed to add passwordpolicy for branch4: error ' + e.args[0]['desc']) assert False # Add the COS template @@ -403,7 +403,7 @@ def test_ticket47980(topology_st): 'pwdpolicysubentry': BRANCH4_PWP }))) except ldap.LDAPError as e: - log.error('Failed to add COS template for level3: error ' + e.message['desc']) + log.error('Failed to add COS template for level3: error ' + e.args[0]['desc']) assert False # Add the COS definition @@ -415,7 +415,7 @@ def test_ticket47980(topology_st): 'cosAttribute': 'pwdpolicysubentry default operational-default' }))) except ldap.LDAPError as e: - log.error('Failed to add COS def for branch4: error ' + e.message['desc']) + log.error('Failed to add COS def for branch4: error ' + e.args[0]['desc']) assert False # @@ -428,7 +428,7 @@ def test_ticket47980(topology_st): 'cn': 'nsPwPolicyContainer' }))) except ldap.LDAPError as e: - log.error('Failed to add subtree container for branch5: error ' + e.message['desc']) + log.error('Failed to add subtree container for branch5: error ' + e.args[0]['desc']) assert False # Add the password policy subentry @@ -444,7 +444,7 @@ def test_ticket47980(topology_st): 'passwordStorageScheme': 'ssha' }))) except ldap.LDAPError as e: - log.error('Failed to add passwordpolicy for branch5: error ' + e.message['desc']) + log.error('Failed to add passwordpolicy for branch5: error ' + e.args[0]['desc']) assert False # Add the COS template @@ -457,7 +457,7 @@ def test_ticket47980(topology_st): 'pwdpolicysubentry': BRANCH5_PWP }))) except ldap.LDAPError as e: - log.error('Failed to add COS template for branch5: error ' + e.message['desc']) + log.error('Failed to add COS template for branch5: error ' + e.args[0]['desc']) assert False # Add the COS definition @@ -469,7 +469,7 @@ def test_ticket47980(topology_st): 'cosAttribute': 'pwdpolicysubentry default operational-default' }))) except ldap.LDAPError as e: - log.error('Failed to add COS def for level3: error ' + e.message['desc']) + log.error('Failed to add COS def for level3: error ' + e.args[0]['desc']) assert False # @@ -482,7 +482,7 @@ def test_ticket47980(topology_st): 'cn': 'nsPwPolicyContainer' }))) except ldap.LDAPError as e: - log.error('Failed to add subtree container for branch6: error ' + e.message['desc']) + log.error('Failed to add subtree container for branch6: error ' + e.args[0]['desc']) assert False # Add the password policy subentry @@ -498,7 +498,7 @@ def test_ticket47980(topology_st): 'passwordStorageScheme': 'ssha' }))) except ldap.LDAPError as e: - log.error('Failed to add passwordpolicy for branch6: error ' + e.message['desc']) + log.error('Failed to add passwordpolicy for branch6: error ' + e.args[0]['desc']) assert False # Add the COS template @@ -511,7 +511,7 @@ def test_ticket47980(topology_st): 'pwdpolicysubentry': BRANCH6_PWP }))) except ldap.LDAPError as e: - log.error('Failed to add COS template for branch6: error ' + e.message['desc']) + log.error('Failed to add COS template for branch6: error ' + e.args[0]['desc']) assert False # Add the COS definition @@ -523,7 +523,7 @@ def test_ticket47980(topology_st): 'cosAttribute': 'pwdpolicysubentry default operational-default' }))) except ldap.LDAPError as e: - log.error('Failed to add COS def for branch6: error ' + e.message['desc']) + log.error('Failed to add COS def for branch6: error ' + e.args[0]['desc']) assert False time.sleep(2) @@ -537,7 +537,7 @@ def test_ticket47980(topology_st): log.fatal('User %s does not have expected pwdpolicysubentry!') assert False except ldap.LDAPError as e: - log.fatal('Unable to search for entry %s: error %s' % (USER1_DN, e.message['desc'])) + log.fatal('Unable to search for entry %s: error %s' % (USER1_DN, e.args[0]['desc'])) assert False try: @@ -546,7 +546,7 @@ def test_ticket47980(topology_st): log.fatal('User %s does not have expected pwdpolicysubentry!' % USER2_DN) assert False except ldap.LDAPError as e: - log.fatal('Unable to search for entry %s: error %s' % (USER2_DN, e.message['desc'])) + log.fatal('Unable to search for entry %s: error %s' % (USER2_DN, e.args[0]['desc'])) assert False try: @@ -555,7 +555,7 @@ def test_ticket47980(topology_st): log.fatal('User %s does not have expected pwdpolicysubentry!' % USER3_DN) assert False except ldap.LDAPError as e: - log.fatal('Unable to search for entry %s: error %s' % (USER3_DN, e.message['desc'])) + log.fatal('Unable to search for entry %s: error %s' % (USER3_DN, e.args[0]['desc'])) assert False try: @@ -564,7 +564,7 @@ def test_ticket47980(topology_st): log.fatal('User %s does not have expected pwdpolicysubentry!' % USER4_DN) assert False except ldap.LDAPError as e: - log.fatal('Unable to search for entry %s: error %s' % (USER4_DN, e.message['desc'])) + log.fatal('Unable to search for entry %s: error %s' % (USER4_DN, e.args[0]['desc'])) assert False try: @@ -573,7 +573,7 @@ def test_ticket47980(topology_st): log.fatal('User %s does not have expected pwdpolicysubentry!' % USER5_DN) assert False except ldap.LDAPError as e: - log.fatal('Unable to search for entry %s: error %s' % (USER5_DN, e.message['desc'])) + log.fatal('Unable to search for entry %s: error %s' % (USER5_DN, e.args[0]['desc'])) assert False try: @@ -582,7 +582,7 @@ def test_ticket47980(topology_st): log.fatal('User %s does not have expected pwdpolicysubentry!' % USER6_DN) assert False except ldap.LDAPError as e: - log.fatal('Unable to search for entry %s: error %s' % (USER6_DN, e.message['desc'])) + log.fatal('Unable to search for entry %s: error %s' % (USER6_DN, e.args[0]['desc'])) assert False diff --git a/dirsrvtests/tests/tickets/ticket47981_test.py b/dirsrvtests/tests/tickets/ticket47981_test.py index 753169e77..b8bc7ca12 100644 --- a/dirsrvtests/tests/tickets/ticket47981_test.py +++ b/dirsrvtests/tests/tickets/ticket47981_test.py @@ -39,7 +39,7 @@ def addSubtreePwPolicy(inst): 'cn': 'nsPwPolicyContainer' }))) except ldap.LDAPError as e: - log.error('Failed to add subtree container for ou=people: error ' + e.message['desc']) + log.error('Failed to add subtree container for ou=people: error ' + e.args[0]['desc']) assert False # Add the password policy subentry @@ -55,7 +55,7 @@ def addSubtreePwPolicy(inst): 'passwordStorageScheme': 'ssha' }))) except ldap.LDAPError as e: - log.error('Failed to add passwordpolicy: error ' + e.message['desc']) + log.error('Failed to add passwordpolicy: error ' + e.args[0]['desc']) assert False # Add the COS template @@ -68,7 +68,7 @@ def addSubtreePwPolicy(inst): 'pwdpolicysubentry': BRANCH_PWP }))) except ldap.LDAPError as e: - log.error('Failed to add COS template: error ' + e.message['desc']) + log.error('Failed to add COS template: error ' + e.args[0]['desc']) assert False # Add the COS definition @@ -80,7 +80,7 @@ def addSubtreePwPolicy(inst): 'cosAttribute': 'pwdpolicysubentry default operational-default' }))) except ldap.LDAPError as e: - log.error('Failed to add COS def: error ' + e.message['desc']) + log.error('Failed to add COS def: error ' + e.args[0]['desc']) assert False time.sleep(1) @@ -89,25 +89,25 @@ def delSubtreePwPolicy(inst): try: inst.delete_s(BRANCH_COS_DEF) except ldap.LDAPError as e: - log.error('Failed to delete COS def: error ' + e.message['desc']) + log.error('Failed to delete COS def: error ' + e.args[0]['desc']) assert False try: inst.delete_s(BRANCH_COS_TMPL) except ldap.LDAPError as e: - log.error('Failed to delete COS template: error ' + e.message['desc']) + log.error('Failed to delete COS template: error ' + e.args[0]['desc']) assert False try: inst.delete_s(BRANCH_PWP) except ldap.LDAPError as e: - log.error('Failed to delete COS password policy: error ' + e.message['desc']) + log.error('Failed to delete COS password policy: error ' + e.args[0]['desc']) assert False try: inst.delete_s(BRANCH_CONTAINER) except ldap.LDAPError as e: - log.error('Failed to delete COS container: error ' + e.message['desc']) + log.error('Failed to delete COS container: error ' + e.args[0]['desc']) assert False time.sleep(1) @@ -135,7 +135,7 @@ def test_ticket47981(topology_st): except ldap.ALREADY_EXISTS: pass except ldap.LDAPError as e: - log.error('Failed to create suffix entry: error ' + e.message['desc']) + log.error('Failed to create suffix entry: error ' + e.args[0]['desc']) assert False # @@ -151,7 +151,7 @@ def test_ticket47981(topology_st): except ldap.ALREADY_EXISTS: pass except ldap.LDAPError as e: - log.error('Failed to add ou=people: error ' + e.message['desc']) + log.error('Failed to add ou=people: error ' + e.args[0]['desc']) assert False # @@ -163,16 +163,16 @@ def test_ticket47981(topology_st): 'uid': 'user1' }))) except ldap.LDAPError as e: - log.error('Failed to add user1: error ' + e.message['desc']) + log.error('Failed to add user1: error ' + e.args[0]['desc']) assert False # # Enable password policy and add the subtree policy # try: - topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-pwpolicy-local', 'on')]) + topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-pwpolicy-local', b'on')]) except ldap.LDAPError as e: - log.error('Failed to set pwpolicy-local: error ' + e.message['desc']) + log.error('Failed to set pwpolicy-local: error ' + e.args[0]['desc']) assert False addSubtreePwPolicy(topology_st.standalone) @@ -189,7 +189,7 @@ def test_ticket47981(topology_st): log.fatal('User does not have expected pwdpolicysubentry!') assert False except ldap.LDAPError as e: - log.fatal('Unable to search for entry %s: error %s' % (USER_DN, e.message['desc'])) + log.fatal('Unable to search for entry %s: error %s' % (USER_DN, e.args[0]['desc'])) assert False # @@ -202,7 +202,7 @@ def test_ticket47981(topology_st): log.fatal('User unexpectedly does have the pwdpolicysubentry!') assert False except ldap.LDAPError as e: - log.fatal('Unable to search for entry %s: error %s' % (USER_DN, e.message['desc'])) + log.fatal('Unable to search for entry %s: error %s' % (USER_DN, e.args[0]['desc'])) assert False # @@ -215,7 +215,7 @@ def test_ticket47981(topology_st): log.fatal('User does not have expected pwdpolicysubentry!') assert False except ldap.LDAPError as e: - log.fatal('Unable to search for entry %s: error %s' % (USER_DN, e.message['desc'])) + log.fatal('Unable to search for entry %s: error %s' % (USER_DN, e.args[0]['desc'])) assert False diff --git a/dirsrvtests/tests/tickets/ticket48234_test.py b/dirsrvtests/tests/tickets/ticket48234_test.py index 057bb93f6..94faec9f2 100644 --- a/dirsrvtests/tests/tickets/ticket48234_test.py +++ b/dirsrvtests/tests/tickets/ticket48234_test.py @@ -34,7 +34,7 @@ def test_ticket48234(topology_st): try: topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) except ldap.LDAPError as e: - topology_st.standalone.log.error('Root DN failed to authenticate: ' + e.message['desc']) + topology_st.standalone.log.error('Root DN failed to authenticate: ' + e.args[0]['desc']) assert False ouname = 'outest' @@ -48,9 +48,9 @@ def test_ticket48234(topology_st): '(userdn = "ldap:///%s??sub?(&(cn=%s)(ou:dn:=%s))");)' % (DEFAULT_SUFFIX, username, ouname)) try: - topology_st.standalone.modify_s(DEFAULT_SUFFIX, [(ldap.MOD_ADD, 'aci', aci_text)]) + topology_st.standalone.modify_s(DEFAULT_SUFFIX, [(ldap.MOD_ADD, 'aci', ensure_bytes(aci_text))]) except ldap.LDAPError as e: - log.error('Failed to add aci: (%s) error %s' % (aci_text, e.message['desc'])) + log.error('Failed to add aci: (%s) error %s' % (aci_text, e.args[0]['desc'])) assert False log.info('Add entries ...') @@ -72,7 +72,7 @@ def test_ticket48234(topology_st): try: topology_st.standalone.simple_bind_s(binddn, passwd) except ldap.LDAPError as e: - topology_st.standalone.log.error(bindn + ' failed to authenticate: ' + e.message['desc']) + topology_st.standalone.log.error(bindn + ' failed to authenticate: ' + e.args[0]['desc']) assert False filter = '(cn=%s)' % username @@ -84,7 +84,7 @@ def test_ticket48234(topology_st): log.fatal('aci with extensible filter failed -- %s') assert False except ldap.LDAPError as e: - topology_st.standalone.log.error('Search (%s, %s) failed: ' % (DEFAULT_SUFFIX, filter) + e.message['desc']) + topology_st.standalone.log.error('Search (%s, %s) failed: ' % (DEFAULT_SUFFIX, filter) + e.args[0]['desc']) assert False log.info('Test complete') diff --git a/dirsrvtests/tests/tickets/ticket48270_test.py b/dirsrvtests/tests/tickets/ticket48270_test.py index be2f805a2..5c1592c1a 100644 --- a/dirsrvtests/tests/tickets/ticket48270_test.py +++ b/dirsrvtests/tests/tickets/ticket48270_test.py @@ -46,8 +46,8 @@ def test_ticket48270_homeDirectory_indexed_cis(topology_st): # log.info("attach debugger") # time.sleep(60) - IGNORE_MR_NAME = 'caseIgnoreIA5Match' - EXACT_MR_NAME = 'caseExactIA5Match' + IGNORE_MR_NAME = b'caseIgnoreIA5Match' + EXACT_MR_NAME = b'caseExactIA5Match' mod = [(ldap.MOD_REPLACE, MATCHINGRULE, (IGNORE_MR_NAME, EXACT_MR_NAME))] topology_st.standalone.modify_s(HOMEDIRECTORY_INDEX, mod) @@ -78,7 +78,7 @@ def test_ticket48270_homeDirectory_indexed_cis(topology_st): def test_ticket48270_homeDirectory_mixed_value(topology_st): # Set a homedirectory value with mixed case name = "uid=%s1,%s" % (NEW_ACCOUNT, SUFFIX) - mod = [(ldap.MOD_REPLACE, 'homeDirectory', MIXED_VALUE)] + mod = [(ldap.MOD_REPLACE, 'homeDirectory', ensure_bytes(MIXED_VALUE))] topology_st.standalone.modify_s(name, mod) diff --git a/dirsrvtests/tests/tickets/ticket48272_test.py b/dirsrvtests/tests/tickets/ticket48272_test.py index 3936c6485..6e2f4ca27 100644 --- a/dirsrvtests/tests/tickets/ticket48272_test.py +++ b/dirsrvtests/tests/tickets/ticket48272_test.py @@ -104,7 +104,7 @@ def test_ticket48272(topology_st): # This means we have a conflicting user in scope now! topology_st.standalone.modify_s("cn=example.com,cn=addn,cn=plugins,cn=config", - [(ldap.MOD_REPLACE, 'addn_base', DEFAULT_SUFFIX)]) + [(ldap.MOD_REPLACE, 'addn_base', ensure_bytes(DEFAULT_SUFFIX))]) topology_st.standalone.restart(60) # Make sure our binds still work. diff --git a/dirsrvtests/tests/tickets/ticket48294_test.py b/dirsrvtests/tests/tickets/ticket48294_test.py index df48dc610..9bba02b74 100644 --- a/dirsrvtests/tests/tickets/ticket48294_test.py +++ b/dirsrvtests/tests/tickets/ticket48294_test.py @@ -14,6 +14,7 @@ import pytest from lib389 import Entry from lib389._constants import * from lib389.topologies import topology_st +from lib389.utils import * log = logging.getLogger(__name__) @@ -44,7 +45,7 @@ def check_attr_val(topology_st, dn, attr, expected): log.fatal('Failed to get %s' % dn) assert False except ldap.LDAPError as e: - log.fatal('Failed to search ' + dn + ': ' + e.message['desc']) + log.fatal('Failed to search ' + dn + ': ' + e.args[0]['desc']) assert False @@ -92,15 +93,15 @@ def test_48294_init(topology_st): log.info('Enable Dynamic plugins, and the linked Attrs plugin') try: - topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', 'on')]) + topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', b'on')]) except ldap.LDAPError as e: - log.fatal('Failed to enable dynamic plugin!' + e.message['desc']) + log.fatal('Failed to enable dynamic plugin!' + e.args[0]['desc']) assert False try: topology_st.standalone.plugins.enable(name=PLUGIN_LINKED_ATTRS) except ValueError as e: - log.fatal('Failed to enable linked attributes plugin!' + e.message['desc']) + log.fatal('Failed to enable linked attributes plugin!' + e.args[0]['desc']) assert False log.info('Add the plugin config entry') @@ -112,7 +113,7 @@ def test_48294_init(topology_st): 'managedType': MANAGEDTYPE }))) except ldap.LDAPError as e: - log.fatal('Failed to add linked attr config entry: error ' + e.message['desc']) + log.fatal('Failed to add linked attr config entry: error ' + e.args[0]['desc']) assert False log.info('Add 2 entries: manager1 and employee1') @@ -121,7 +122,7 @@ def test_48294_init(topology_st): 'objectclass': 'top extensibleObject'.split(), 'uid': 'manager1'}))) except ldap.LDAPError as e: - log.fatal('Add manager1 failed: error ' + e.message['desc']) + log.fatal('Add manager1 failed: error ' + e.args[0]['desc']) assert False try: @@ -129,15 +130,15 @@ def test_48294_init(topology_st): 'objectclass': 'top extensibleObject'.split(), 'uid': 'employee1'}))) except ldap.LDAPError as e: - log.fatal('Add employee1 failed: error ' + e.message['desc']) + log.fatal('Add employee1 failed: error ' + e.args[0]['desc']) assert False log.info('Add linktype to manager1') topology_st.standalone.modify_s('uid=manager1,%s' % OU_PEOPLE, - [(ldap.MOD_ADD, LINKTYPE, 'uid=employee1,%s' % OU_PEOPLE)]) + [(ldap.MOD_ADD, LINKTYPE, ensure_bytes('uid=employee1,%s' % OU_PEOPLE))]) log.info('Check managed attribute') - check_attr_val(topology_st, 'uid=employee1,%s' % OU_PEOPLE, MANAGEDTYPE, 'uid=manager1,%s' % OU_PEOPLE) + check_attr_val(topology_st, 'uid=employee1,%s' % OU_PEOPLE, MANAGEDTYPE, ensure_bytes('uid=manager1,%s' % OU_PEOPLE)) log.info('PASSED') @@ -154,13 +155,13 @@ def test_48294_run_0(topology_st): log.info('Modify the value of directReport to uid=employee2') try: topology_st.standalone.modify_s('uid=manager1,%s' % OU_PEOPLE, - [(ldap.MOD_REPLACE, LINKTYPE, 'uid=employee2,%s' % OU_PEOPLE)]) + [(ldap.MOD_REPLACE, LINKTYPE, ensure_bytes('uid=employee2,%s' % OU_PEOPLE))]) except ldap.LDAPError as e: - log.fatal('Failed to replace uid=employee1 with employee2: ' + e.message['desc']) + log.fatal('Failed to replace uid=employee1 with employee2: ' + e.args[0]['desc']) assert False log.info('Check managed attribute') - check_attr_val(topology_st, 'uid=employee2,%s' % OU_PEOPLE, MANAGEDTYPE, 'uid=manager1,%s' % OU_PEOPLE) + check_attr_val(topology_st, 'uid=employee2,%s' % OU_PEOPLE, MANAGEDTYPE, ensure_bytes('uid=manager1,%s' % OU_PEOPLE)) log.info('PASSED') @@ -177,20 +178,20 @@ def test_48294_run_1(topology_st): log.info('Modify the value of directReport to uid=employee3') try: topology_st.standalone.modify_s('uid=manager1,%s' % OU_PEOPLE, - [(ldap.MOD_DELETE, LINKTYPE, 'uid=employee2,%s' % OU_PEOPLE)]) + [(ldap.MOD_DELETE, LINKTYPE, ensure_bytes('uid=employee2,%s' % OU_PEOPLE))]) except ldap.LDAPError as e: - log.fatal('Failed to delete employee2: ' + e.message['desc']) + log.fatal('Failed to delete employee2: ' + e.args[0]['desc']) assert False try: topology_st.standalone.modify_s('uid=manager1,%s' % OU_PEOPLE, - [(ldap.MOD_ADD, LINKTYPE, 'uid=employee3,%s' % OU_PEOPLE)]) + [(ldap.MOD_ADD, LINKTYPE, ensure_bytes('uid=employee3,%s' % OU_PEOPLE))]) except ldap.LDAPError as e: - log.fatal('Failed to add employee3: ' + e.message['desc']) + log.fatal('Failed to add employee3: ' + e.args[0]['desc']) assert False log.info('Check managed attribute') - check_attr_val(topology_st, 'uid=employee3,%s' % OU_PEOPLE, MANAGEDTYPE, 'uid=manager1,%s' % OU_PEOPLE) + check_attr_val(topology_st, 'uid=employee3,%s' % OU_PEOPLE, MANAGEDTYPE, ensure_bytes('uid=manager1,%s' % OU_PEOPLE)) log.info('PASSED') @@ -205,7 +206,7 @@ def test_48294_run_2(topology_st): _modrdn_entry(topology_st, entry_dn='uid=manager1,%s' % OU_PEOPLE, new_rdn='uid=manager2') log.info('Check managed attribute') - check_attr_val(topology_st, 'uid=employee3,%s' % OU_PEOPLE, MANAGEDTYPE, 'uid=manager2,%s' % OU_PEOPLE) + check_attr_val(topology_st, 'uid=employee3,%s' % OU_PEOPLE, MANAGEDTYPE, ensure_bytes('uid=manager2,%s' % OU_PEOPLE)) log.info('PASSED') diff --git a/dirsrvtests/tests/tickets/ticket48295_test.py b/dirsrvtests/tests/tickets/ticket48295_test.py index d118de46f..f5d17f470 100644 --- a/dirsrvtests/tests/tickets/ticket48295_test.py +++ b/dirsrvtests/tests/tickets/ticket48295_test.py @@ -13,6 +13,7 @@ import pytest from lib389 import Entry from lib389._constants import * from lib389.topologies import topology_st +from lib389.utils import * log = logging.getLogger(__name__) @@ -57,7 +58,7 @@ def check_attr_val(topology_st, dn, attr, expected, revert): log.fatal('Failed to get %s' % dn) assert False except ldap.LDAPError as e: - log.fatal('Failed to search ' + dn + ': ' + e.message['desc']) + log.fatal('Failed to search ' + dn + ': ' + e.args[0]['desc']) assert False @@ -70,15 +71,15 @@ def test_48295_init(topology_st): log.info('Enable Dynamic plugins, and the linked Attrs plugin') try: - topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', 'on')]) + topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', b'on')]) except ldap.LDAPError as e: - log.fatal('Failed to enable dynamic plugin!' + e.message['desc']) + log.fatal('Failed to enable dynamic plugin!' + e.args[0]['desc']) assert False try: topology_st.standalone.plugins.enable(name=PLUGIN_LINKED_ATTRS) except ValueError as e: - log.fatal('Failed to enable linked attributes plugin!' + e.message['desc']) + log.fatal('Failed to enable linked attributes plugin!' + e.args[0]['desc']) assert False log.info('Add the plugin config entry') @@ -90,7 +91,7 @@ def test_48295_init(topology_st): 'managedType': MANAGEDTYPE }))) except ldap.LDAPError as e: - log.fatal('Failed to add linked attr config entry: error ' + e.message['desc']) + log.fatal('Failed to add linked attr config entry: error ' + e.args[0]['desc']) assert False log.info('Add 2 entries: manager1 and employee1') @@ -99,7 +100,7 @@ def test_48295_init(topology_st): 'objectclass': 'top extensibleObject'.split(), 'uid': 'manager1'}))) except ldap.LDAPError as e: - log.fatal('Add manager1 failed: error ' + e.message['desc']) + log.fatal('Add manager1 failed: error ' + e.args[0]['desc']) assert False try: @@ -107,7 +108,7 @@ def test_48295_init(topology_st): 'objectclass': 'top extensibleObject'.split(), 'uid': 'employee1'}))) except ldap.LDAPError as e: - log.fatal('Add employee1 failed: error ' + e.message['desc']) + log.fatal('Add employee1 failed: error ' + e.args[0]['desc']) assert False log.info('PASSED') @@ -122,14 +123,14 @@ def test_48295_run(topology_st): 'Add 2 linktypes to manager1 - one exists, another does not to make sure the managed entry does not have managed type.') try: topology_st.standalone.modify_s('uid=manager1,%s' % OU_PEOPLE, - [(ldap.MOD_ADD, LINKTYPE, 'uid=employee1,%s' % OU_PEOPLE), - (ldap.MOD_ADD, LINKTYPE, 'uid=doNotExist,%s' % OU_PEOPLE)]) + [(ldap.MOD_ADD, LINKTYPE, ensure_bytes('uid=employee1,%s' % OU_PEOPLE)), + (ldap.MOD_ADD, LINKTYPE, ensure_bytes('uid=doNotExist,%s' % OU_PEOPLE))]) except ldap.UNWILLING_TO_PERFORM: log.info('Add uid=employee1 and uid=doNotExist expectedly failed.') pass log.info('Check managed attribute does not exist.') - check_attr_val(topology_st, 'uid=employee1,%s' % OU_PEOPLE, MANAGEDTYPE, 'uid=manager1,%s' % OU_PEOPLE, True) + check_attr_val(topology_st, 'uid=employee1,%s' % OU_PEOPLE, MANAGEDTYPE, ensure_bytes('uid=manager1,%s' % OU_PEOPLE), True) log.info('PASSED') diff --git a/dirsrvtests/tests/tickets/ticket48312_test.py b/dirsrvtests/tests/tickets/ticket48312_test.py index 74632dffa..387b79718 100644 --- a/dirsrvtests/tests/tickets/ticket48312_test.py +++ b/dirsrvtests/tests/tickets/ticket48312_test.py @@ -26,9 +26,9 @@ def test_ticket48312(topology_st): # First enable dynamic plugins # try: - topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', 'on')]) + topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', b'on')]) except ldap.LDAPError as e: - log.fatal('Failed to enable dynamic plugin!' + e.message['desc']) + log.fatal('Failed to enable dynamic plugin!' + e.args[0]['desc']) assert False topology_st.standalone.plugins.enable(name=PLUGIN_MANAGED_ENTRY) @@ -42,7 +42,7 @@ def test_ticket48312(topology_st): except ldap.ALREADY_EXISTS: pass except ldap.LDAPError as e: - log.fatal('test_mep: Failed to add people org unit: error ' + e.message['desc']) + log.fatal('test_mep: Failed to add people org unit: error ' + e.args[0]['desc']) assert False try: @@ -52,7 +52,7 @@ def test_ticket48312(topology_st): except ldap.ALREADY_EXISTS: pass except ldap.LDAPError as e: - log.fatal('test_mep: Failed to add people org unit: error ' + e.message['desc']) + log.fatal('test_mep: Failed to add people org unit: error ' + e.args[0]['desc']) assert False # @@ -67,7 +67,7 @@ def test_ticket48312(topology_st): 'mepMappedAttr': ['cn: $uid', 'uid: $cn', 'gidNumber: $uidNumber'] }))) except ldap.LDAPError as e: - log.fatal('test_mep: Failed to add template entry: error ' + e.message['desc']) + log.fatal('test_mep: Failed to add template entry: error ' + e.args[0]['desc']) assert False # @@ -83,7 +83,7 @@ def test_ticket48312(topology_st): 'managedTemplate': TEMPLATE_DN }))) except ldap.LDAPError as e: - log.fatal('test_mep: Failed to add config entry: error ' + e.message['desc']) + log.fatal('test_mep: Failed to add config entry: error ' + e.args[0]['desc']) assert False # @@ -100,7 +100,7 @@ def test_ticket48312(topology_st): 'description': 'uiser description' }))) except ldap.LDAPError as e: - log.fatal('test_mep: Failed to user1: error ' + e.message['desc']) + log.fatal('test_mep: Failed to user1: error ' + e.args[0]['desc']) assert False # @@ -109,7 +109,7 @@ def test_ticket48312(topology_st): try: topology_st.standalone.rename_s(USER_DN, USER_NEWRDN, delold=1) except ldap.LDAPError as e: - log.error('Failed to modrdn: error ' + e.message['desc']) + log.error('Failed to modrdn: error ' + e.args[0]['desc']) assert False log.info('Test complete') diff --git a/dirsrvtests/tests/tickets/ticket48342_test.py b/dirsrvtests/tests/tickets/ticket48342_test.py index 14ef8f93c..c30a55723 100644 --- a/dirsrvtests/tests/tickets/ticket48342_test.py +++ b/dirsrvtests/tests/tickets/ticket48342_test.py @@ -29,14 +29,14 @@ def _dna_config(server, nextValue=500, maxValue=510): }))) except ldap.LDAPError as e: - log.error('Failed to add DNA config entry: error ' + e.message['desc']) + log.error('Failed to add DNA config entry: error ' + e.args[0]['desc']) assert False log.info("Enable the DNA plugin...") try: server.plugins.enable(name=PLUGIN_DNA) except e: - log.error("Failed to enable DNA Plugin: error " + e.message['desc']) + log.error("Failed to enable DNA Plugin: error " + e.args[0]['desc']) assert False log.info("Restarting the server...") @@ -86,7 +86,7 @@ def test_ticket4026(topology_m3): # Turn on lots of error logging now. - mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '16384')] + mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', b'16384')] # mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '1')] topology_m3.ms["master1"].modify_s('cn=config', mod) topology_m3.ms["master2"].modify_s('cn=config', mod) @@ -124,7 +124,7 @@ def test_ticket4026(topology_m3): }))) log.info('Test complete') # add on master1 users with description DNA - mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '16384')] + mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', b'16384')] # mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '1')] topology_m3.ms["master1"].modify_s('cn=config', mod) topology_m3.ms["master2"].modify_s('cn=config', mod) diff --git a/dirsrvtests/tests/tickets/ticket48362_test.py b/dirsrvtests/tests/tickets/ticket48362_test.py index b5b547a5b..edb7ccacc 100644 --- a/dirsrvtests/tests/tickets/ticket48362_test.py +++ b/dirsrvtests/tests/tickets/ticket48362_test.py @@ -16,9 +16,9 @@ PEOPLE_DN = "ou=%s,%s" % (PEOPLE_OU, SUFFIX) MAX_ACCOUNTS = 5 BINDMETHOD_ATTR = 'dnaRemoteBindMethod' -BINDMETHOD_VALUE = "SASL/GSSAPI" +BINDMETHOD_VALUE = b'SASL/GSSAPI' PROTOCOLE_ATTR = 'dnaRemoteConnProtocol' -PROTOCOLE_VALUE = 'LDAP' +PROTOCOLE_VALUE = b'LDAP' SHARE_CFG_BASE = 'ou=ranges,' + SUFFIX @@ -82,8 +82,8 @@ def _shared_cfg_server_update(server, method=BINDMETHOD_VALUE, transport=PROTOCO log.info('\n======================== Update dnaPortNum=%d ============================\n' % server.port) try: ent = server.getEntry(SHARE_CFG_BASE, ldap.SCOPE_ONELEVEL, "(dnaPortNum=%d)" % server.port) - mod = [(ldap.MOD_REPLACE, BINDMETHOD_ATTR, method), - (ldap.MOD_REPLACE, PROTOCOLE_ATTR, transport)] + mod = [(ldap.MOD_REPLACE, BINDMETHOD_ATTR, ensure_bytes(method)), + (ldap.MOD_REPLACE, PROTOCOLE_ATTR, ensure_bytes(transport))] server.modify_s(ent.dn, mod) log.info('\n======================== Update done\n') diff --git a/dirsrvtests/tests/tickets/ticket48366_test.py b/dirsrvtests/tests/tickets/ticket48366_test.py index 90dc6af63..c860f5456 100644 --- a/dirsrvtests/tests/tickets/ticket48366_test.py +++ b/dirsrvtests/tests/tickets/ticket48366_test.py @@ -77,13 +77,13 @@ def test_ticket48366_init(topology_st): ACI_ALLOW = "(version 3.0; acl \"Allow search-read to green subtree\"; allow (read, search, compare)" ACI_SUBJECT = " userdn = \"ldap:///%s\";)" % TEST_USER_DN ACI_BODY = ACI_TARGET + ACI_TARGETATTR + ACI_ALLOW + ACI_SUBJECT - mod = [(ldap.MOD_ADD, 'aci', ACI_BODY)] + mod = [(ldap.MOD_ADD, 'aci', ensure_bytes(ACI_BODY))] topology_st.standalone.modify_s(SUFFIX, mod) ACI_ALLOW = "(version 3.0; acl \"Allow use pf proxy auth to green subtree\"; allow (proxy)" ACI_SUBJECT = " userdn = \"ldap:///%s\";)" % PROXY_USER_DN ACI_BODY = ACI_TARGET + ACI_TARGETATTR + ACI_ALLOW + ACI_SUBJECT - mod = [(ldap.MOD_ADD, 'aci', ACI_BODY)] + mod = [(ldap.MOD_ADD, 'aci', ensure_bytes(ACI_BODY))] topology_st.standalone.modify_s(SUFFIX, mod) log.info("Adding %d test entries...") @@ -107,7 +107,7 @@ def test_ticket48366_init(topology_st): def test_ticket48366_search_user(topology_st): - proxy_ctrl = ProxyAuthzControl(criticality=True, authzId="dn: " + TEST_USER_DN) + proxy_ctrl = ProxyAuthzControl(criticality=True, authzId=ensure_bytes("dn: " + TEST_USER_DN)) # searching as test user should return one entry from the green subtree topology_st.standalone.simple_bind_s(TEST_USER_DN, PASSWORD) ents = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, 'uid=test1') @@ -130,12 +130,12 @@ def test_ticket48366_search_dm(topology_st): assert (len(ents) == 2) # searching as directory manager proxying test user should return one entry - proxy_ctrl = ProxyAuthzControl(criticality=True, authzId="dn: " + TEST_USER_DN) + proxy_ctrl = ProxyAuthzControl(criticality=True, authzId=ensure_bytes("dn: " + TEST_USER_DN)) ents = topology_st.standalone.search_ext_s(SUFFIX, ldap.SCOPE_SUBTREE, 'uid=test1', serverctrls=[proxy_ctrl]) assert (len(ents) == 1) # searching as directory manager proxying proxy user should return no entry - proxy_ctrl = ProxyAuthzControl(criticality=True, authzId="dn: " + PROXY_USER_DN) + proxy_ctrl = ProxyAuthzControl(criticality=True, authzId=ensure_bytes("dn: " + PROXY_USER_DN)) ents = topology_st.standalone.search_ext_s(SUFFIX, ldap.SCOPE_SUBTREE, 'uid=test1', serverctrls=[proxy_ctrl]) assert (len(ents) == 0) diff --git a/dirsrvtests/tests/tickets/ticket48497_test.py b/dirsrvtests/tests/tickets/ticket48497_test.py index a2f6ec986..9dea9e882 100644 --- a/dirsrvtests/tests/tickets/ticket48497_test.py +++ b/dirsrvtests/tests/tickets/ticket48497_test.py @@ -36,7 +36,7 @@ def test_ticket48497_init(topology_st): def test_ticket48497_homeDirectory_mixed_value(topology_st): # Set a homedirectory value with mixed case name = "uid=%s1,%s" % (NEW_ACCOUNT, SUFFIX) - mod = [(ldap.MOD_REPLACE, 'homeDirectory', MIXED_VALUE)] + mod = [(ldap.MOD_REPLACE, 'homeDirectory', ensure_bytes(MIXED_VALUE))] topology_st.standalone.modify_s(name, mod) @@ -78,8 +78,8 @@ def test_ticket48497_homeDirectory_index_cfg(topology_st): 'nsSystemIndex': 'false', 'nsIndexType': 'eq'}))) - IGNORE_MR_NAME = 'caseIgnoreIA5Match' - EXACT_MR_NAME = 'caseExactIA5Match' + IGNORE_MR_NAME = b'caseIgnoreIA5Match' + EXACT_MR_NAME = b'caseExactIA5Match' mod = [(ldap.MOD_REPLACE, MATCHINGRULE, (IGNORE_MR_NAME, EXACT_MR_NAME))] topology_st.standalone.modify_s(HOMEDIRECTORY_INDEX, mod) diff --git a/dirsrvtests/tests/tickets/ticket48665_test.py b/dirsrvtests/tests/tickets/ticket48665_test.py index fdbe5874e..d2c038787 100644 --- a/dirsrvtests/tests/tickets/ticket48665_test.py +++ b/dirsrvtests/tests/tickets/ticket48665_test.py @@ -23,10 +23,10 @@ def test_ticket48665(topology_st): # This will trigger a mod delete then add. topology_st.standalone.modify_s('cn=config,cn=ldbm database,cn=plugins,cn=config', - [(ldap.MOD_REPLACE, 'nsslapd-cache-autosize', '0')]) + [(ldap.MOD_REPLACE, 'nsslapd-cache-autosize', b'0')]) try: - modlist = [(ldap.MOD_DELETE, 'nsslapd-cachememsize', None), (ldap.MOD_ADD, 'nsslapd-cachememsize', '1')] + modlist = [(ldap.MOD_DELETE, 'nsslapd-cachememsize', None), (ldap.MOD_ADD, 'nsslapd-cachememsize', b'1')] topology_st.standalone.modify_s("cn=%s,cn=ldbm database,cn=plugins,cn=config" % DEFAULT_BENAME, modlist) except: @@ -40,9 +40,9 @@ def test_ticket48665(topology_st): # This has a magic hack to determine if we are in cn=config. try: topology_st.standalone.modify_s(DEFAULT_BENAME, [(ldap.MOD_REPLACE, - 'nsslapd-cachememsize', '1')]) + 'nsslapd-cachememsize', b'1')]) except ldap.LDAPError as e: - log.fatal('Failed to change nsslapd-cachememsize ' + e.message['desc']) + log.fatal('Failed to change nsslapd-cachememsize ' + e.args[0]['desc']) # Check the server has not commited seppuku. entries = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(cn=*)') @@ -51,7 +51,7 @@ def test_ticket48665(topology_st): # Now try with mod_replace. This should be okay. - modlist = [(ldap.MOD_REPLACE, 'nsslapd-cachememsize', '1')] + modlist = [(ldap.MOD_REPLACE, 'nsslapd-cachememsize', b'1')] topology_st.standalone.modify_s("cn=%s,cn=ldbm database,cn=plugins,cn=config" % DEFAULT_BENAME, modlist) diff --git a/dirsrvtests/tests/tickets/ticket48745_test.py b/dirsrvtests/tests/tickets/ticket48745_test.py index 88b78c6ce..c40358d1d 100644 --- a/dirsrvtests/tests/tickets/ticket48745_test.py +++ b/dirsrvtests/tests/tickets/ticket48745_test.py @@ -46,8 +46,8 @@ def test_ticket48745_homeDirectory_indexed_cis(topology_st): # log.info("attach debugger") # time.sleep(60) - IGNORE_MR_NAME = 'caseIgnoreIA5Match' - EXACT_MR_NAME = 'caseExactIA5Match' + IGNORE_MR_NAME = b'caseIgnoreIA5Match' + EXACT_MR_NAME = b'caseExactIA5Match' mod = [(ldap.MOD_REPLACE, MATCHINGRULE, (IGNORE_MR_NAME, EXACT_MR_NAME))] topology_st.standalone.modify_s(HOMEDIRECTORY_INDEX, mod) @@ -78,7 +78,7 @@ def test_ticket48745_homeDirectory_indexed_cis(topology_st): def test_ticket48745_homeDirectory_mixed_value(topology_st): # Set a homedirectory value with mixed case name = "uid=%s1,%s" % (NEW_ACCOUNT, SUFFIX) - mod = [(ldap.MOD_REPLACE, 'homeDirectory', MIXED_VALUE)] + mod = [(ldap.MOD_REPLACE, 'homeDirectory', ensure_bytes(MIXED_VALUE))] topology_st.standalone.modify_s(name, mod) diff --git a/dirsrvtests/tests/tickets/ticket48746_test.py b/dirsrvtests/tests/tickets/ticket48746_test.py index fa5a7068e..e23fc9047 100644 --- a/dirsrvtests/tests/tickets/ticket48746_test.py +++ b/dirsrvtests/tests/tickets/ticket48746_test.py @@ -46,8 +46,8 @@ def test_ticket48746_homeDirectory_indexed_cis(topology_st): # log.info("attach debugger") # time.sleep(60) - IGNORE_MR_NAME = 'caseIgnoreIA5Match' - EXACT_MR_NAME = 'caseExactIA5Match' + IGNORE_MR_NAME = b'caseIgnoreIA5Match' + EXACT_MR_NAME = b'caseExactIA5Match' mod = [(ldap.MOD_REPLACE, MATCHINGRULE, (IGNORE_MR_NAME, EXACT_MR_NAME))] topology_st.standalone.modify_s(HOMEDIRECTORY_INDEX, mod) @@ -78,7 +78,7 @@ def test_ticket48746_homeDirectory_indexed_cis(topology_st): def test_ticket48746_homeDirectory_mixed_value(topology_st): # Set a homedirectory value with mixed case name = "uid=%s1,%s" % (NEW_ACCOUNT, SUFFIX) - mod = [(ldap.MOD_REPLACE, 'homeDirectory', MIXED_VALUE)] + mod = [(ldap.MOD_REPLACE, 'homeDirectory', ensure_bytes(MIXED_VALUE))] topology_st.standalone.modify_s(name, mod) @@ -111,7 +111,7 @@ def test_ticket48746_homeDirectory_indexed_ces(topology_st): # log.info("attach debugger") # time.sleep(60) - EXACT_MR_NAME = 'caseExactIA5Match' + EXACT_MR_NAME = b'caseExactIA5Match' mod = [(ldap.MOD_REPLACE, MATCHINGRULE, (EXACT_MR_NAME))] topology_st.standalone.modify_s(HOMEDIRECTORY_INDEX, mod) diff --git a/dirsrvtests/tests/tickets/ticket48784_test.py b/dirsrvtests/tests/tickets/ticket48784_test.py index 8924fe2e3..051fdb07b 100644 --- a/dirsrvtests/tests/tickets/ticket48784_test.py +++ b/dirsrvtests/tests/tickets/ticket48784_test.py @@ -51,12 +51,12 @@ def config_tls_agreements(topology_m2): log.info("##### Update the agreement of master1") m1 = topology_m2.ms["master1"] m1_m2_agmt = m1.agreement.list(suffix=DEFAULT_SUFFIX)[0].dn - topology_m2.ms["master1"].modify_s(m1_m2_agmt, [(ldap.MOD_REPLACE, 'nsDS5ReplicaTransportInfo', 'TLS')]) + topology_m2.ms["master1"].modify_s(m1_m2_agmt, [(ldap.MOD_REPLACE, 'nsDS5ReplicaTransportInfo', b'TLS')]) log.info("##### Update the agreement of master2") m2 = topology_m2.ms["master2"] m2_m1_agmt = m2.agreement.list(suffix=DEFAULT_SUFFIX)[0].dn - topology_m2.ms["master2"].modify_s(m2_m1_agmt, [(ldap.MOD_REPLACE, 'nsDS5ReplicaTransportInfo', 'TLS')]) + topology_m2.ms["master2"].modify_s(m2_m1_agmt, [(ldap.MOD_REPLACE, 'nsDS5ReplicaTransportInfo', b'TLS')]) time.sleep(1) @@ -70,10 +70,10 @@ def set_ssl_Version(server, name, version): log.info("\n######################### Set %s on %s ######################\n" % (version, name)) server.simple_bind_s(DN_DM, PASSWORD) - server.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3', 'off'), - (ldap.MOD_REPLACE, 'nsTLS1', 'on'), - (ldap.MOD_REPLACE, 'sslVersionMin', version), - (ldap.MOD_REPLACE, 'sslVersionMax', version)]) + server.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3', b'off'), + (ldap.MOD_REPLACE, 'nsTLS1', b'on'), + (ldap.MOD_REPLACE, 'sslVersionMin', ensure_bytes(version)), + (ldap.MOD_REPLACE, 'sslVersionMax', ensure_bytes(version))]) def test_ticket48784(topology_m2): diff --git a/dirsrvtests/tests/tickets/ticket48799_test.py b/dirsrvtests/tests/tickets/ticket48799_test.py index a52e85c8f..0e51d4258 100644 --- a/dirsrvtests/tests/tickets/ticket48799_test.py +++ b/dirsrvtests/tests/tickets/ticket48799_test.py @@ -8,11 +8,11 @@ log = logging.getLogger(__name__) def _add_custom_schema(server): - attr_value = "( 10.0.9.2342.19200300.100.1.1 NAME 'customManager' EQUALITY distinguishedNameMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 X-ORIGIN 'user defined' )" + attr_value = b"( 10.0.9.2342.19200300.100.1.1 NAME 'customManager' EQUALITY distinguishedNameMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 X-ORIGIN 'user defined' )" mod = [(ldap.MOD_ADD, 'attributeTypes', attr_value)] server.modify_s('cn=schema', mod) - oc_value = "( 1.3.6.1.4.1.4843.2.1 NAME 'customPerson' SUP inetorgperson STRUCTURAL MAY (customManager) X-ORIGIN 'user defined' )" + oc_value = b"( 1.3.6.1.4.1.4843.2.1 NAME 'customPerson' SUP inetorgperson STRUCTURAL MAY (customManager) X-ORIGIN 'user defined' )" mod = [(ldap.MOD_ADD, 'objectclasses', oc_value)] server.modify_s('cn=schema', mod) @@ -37,9 +37,9 @@ def _create_user(server): def _modify_user(server): mod = [ - (ldap.MOD_ADD, 'objectClass', ['customPerson']), - (ldap.MOD_ADD, 'sn', ['User']), - (ldap.MOD_ADD, 'customManager', ['cn=manager']), + (ldap.MOD_ADD, 'objectClass', [b'customPerson']), + (ldap.MOD_ADD, 'sn', [b'User']), + (ldap.MOD_ADD, 'customManager', [b'cn=manager']), ] server.modify("uid=testuser,ou=People,%s" % DEFAULT_SUFFIX, mod) diff --git a/dirsrvtests/tests/tickets/ticket48808_test.py b/dirsrvtests/tests/tickets/ticket48808_test.py index fc3242644..0414576ae 100644 --- a/dirsrvtests/tests/tickets/ticket48808_test.py +++ b/dirsrvtests/tests/tickets/ticket48808_test.py @@ -33,7 +33,7 @@ def test_user(topology_st): }))) except ldap.LDAPError as e: log.error('Failed to add user (%s): error (%s)' % (TEST_USER_DN, - e.message['desc'])) + e.args[0]['desc'])) raise e @@ -62,7 +62,7 @@ def add_users(topology_st, users_num): }))) except ldap.LDAPError as e: log.error('Failed to add user (%s): error (%s)' % (USER_DN, - e.message['desc'])) + e.args[0]['desc'])) raise e return users_list @@ -76,7 +76,7 @@ def del_users(topology_st, users_list): topology_st.standalone.delete_s(user_dn) except ldap.LDAPError as e: log.error('Failed to delete user (%s): error (%s)' % (user_dn, - e.message['desc'])) + e.args[0]['desc'])) raise e @@ -102,7 +102,7 @@ def change_conf_attr(topology_st, suffix, attr_name, attr_value): attr_value)]) except ldap.LDAPError as e: log.error('Failed to change attr value (%s): error (%s)' % (attr_name, - e.message['desc'])) + e.args[0]['desc'])) raise e return attr_value_bck @@ -168,7 +168,7 @@ def test_ticket48808(topology_st, test_user): req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='') controls = [req_ctrl] - for ii in xrange(3): + for ii in range(3): log.info('Iteration %d' % ii) msgid = topology_st.standalone.search_ext(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, @@ -225,8 +225,8 @@ def test_ticket48808(topology_st, test_user): log.info("Search should fail with 'nsPagedSizeLimit = 5'" "and 'nsslapd-pagedsizelimit = 15' with 10 users") - conf_attr = '15' - user_attr = '5' + conf_attr = b'15' + user_attr = b'5' expected_rs = ldap.SIZELIMIT_EXCEEDED users_num = 10 page_size = 10 @@ -260,8 +260,8 @@ def test_ticket48808(topology_st, test_user): log.info("Search should pass with 'nsPagedSizeLimit = 15'" "and 'nsslapd-pagedsizelimit = 5' with 10 users") - conf_attr = '5' - user_attr = '15' + conf_attr = b'5' + user_attr = b'15' users_num = 10 page_size = 10 users_list = add_users(topology_st, users_num) diff --git a/dirsrvtests/tests/tickets/ticket48844_test.py b/dirsrvtests/tests/tickets/ticket48844_test.py index b4d2df39a..a1fc48c51 100644 --- a/dirsrvtests/tests/tickets/ticket48844_test.py +++ b/dirsrvtests/tests/tickets/ticket48844_test.py @@ -73,8 +73,8 @@ def test_ticket48844_init(topology_st): BITW_SCHEMA_AT_2 = '( NAME \'testUserStatus\' DESC \'State of User account active/disabled\' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 )' BITW_SCHEMA_OC_1 = '( NAME \'testperson\' SUP top STRUCTURAL MUST ( sn $ cn $ testUserAccountControl $ testUserStatus )' + \ ' MAY ( userPassword $ telephoneNumber $ seeAlso $ description ) X-ORIGIN \'BitWise\' )' - topology_st.standalone.schema.add_schema('attributetypes', [BITW_SCHEMA_AT_1, BITW_SCHEMA_AT_2]) - topology_st.standalone.schema.add_schema('objectClasses', BITW_SCHEMA_OC_1) + topology_st.standalone.schema.add_schema('attributetypes', [ensure_bytes(BITW_SCHEMA_AT_1), ensure_bytes(BITW_SCHEMA_AT_2)]) + topology_st.standalone.schema.add_schema('objectClasses', ensure_bytes(BITW_SCHEMA_OC_1)) topology_st.standalone.backend.create(TESTBASEDN, {BACKEND_NAME: TESTBACKEND_NAME}) topology_st.standalone.mappingtree.create(TESTBASEDN, bename=TESTBACKEND_NAME, parent=None) diff --git a/dirsrvtests/tests/tickets/ticket48891_test.py b/dirsrvtests/tests/tickets/ticket48891_test.py index c37ed660a..ce0d3b7cb 100644 --- a/dirsrvtests/tests/tickets/ticket48891_test.py +++ b/dirsrvtests/tests/tickets/ticket48891_test.py @@ -42,10 +42,7 @@ def test_ticket48891_setup(topology_st): topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) # check there is no core - entry = topology_st.standalone.search_s(CONFIG_DN, ldap.SCOPE_BASE, - "(cn=config)", ['nsslapd-errorlog']) - assert entry - path = entry[0].getValue('nsslapd-errorlog').replace('errors', '') + path = topology_st.standalone.config.get_attr_val_utf8('nsslapd-errorlog').replace('errors', '') log.debug('Looking for a core file in: ' + path) cores = fnmatch.filter(os.listdir(path), 'core.*') assert len(cores) == 0 diff --git a/dirsrvtests/tests/tickets/ticket48896_test.py b/dirsrvtests/tests/tickets/ticket48896_test.py index be58b3486..8125af4b8 100644 --- a/dirsrvtests/tests/tickets/ticket48896_test.py +++ b/dirsrvtests/tests/tickets/ticket48896_test.py @@ -49,7 +49,7 @@ def replace_pw(server, curpw, newpw, expstr, rc): hit = 0 log.info('Replacing password: %s -> %s, which should %s' % (curpw, newpw, expstr)) try: - server.modify_s(TESTDN, [(ldap.MOD_REPLACE, 'userPassword', newpw)]) + server.modify_s(TESTDN, [(ldap.MOD_REPLACE, 'userPassword', ensure_bytes(newpw))]) except Exception as e: log.info("Exception (expected): %s" % type(e).__name__) hit = 1 @@ -69,8 +69,8 @@ def test_ticket48896(topology_st): log.info("Setting global password policy with password syntax.") topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) - topology_st.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'passwordCheckSyntax', 'on'), - (ldap.MOD_REPLACE, 'nsslapd-pwpolicy-local', 'on')]) + topology_st.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'passwordCheckSyntax', b'on'), + (ldap.MOD_REPLACE, 'nsslapd-pwpolicy-local', b'on')]) config = topology_st.standalone.search_s(CONFIG_DN, ldap.SCOPE_BASE, 'cn=*') mintokenlen = config[0].getValue('passwordMinTokenLength') diff --git a/dirsrvtests/tests/tickets/ticket48944_test.py b/dirsrvtests/tests/tickets/ticket48944_test.py index b933cdd4f..7e63cf2f5 100644 --- a/dirsrvtests/tests/tickets/ticket48944_test.py +++ b/dirsrvtests/tests/tickets/ticket48944_test.py @@ -53,13 +53,13 @@ def _enable_plugin(topo, inst_name): topo.ms[inst_name].simple_bind_s(DN_DM, PASSWORD) try: topo.ms[inst_name].plugins.enable(name=PLUGIN_ACCT_POLICY) - topo.ms[inst_name].modify_s(ACCPOL_DN, [(ldap.MOD_REPLACE, 'nsslapd-pluginarg0', ACCP_CONF)]) - topo.ms[inst_name].modify_s(ACCP_CONF, [(ldap.MOD_REPLACE, 'alwaysrecordlogin', 'yes')]) - topo.ms[inst_name].modify_s(ACCP_CONF, [(ldap.MOD_REPLACE, 'stateattrname', 'lastLoginTime')]) - topo.ms[inst_name].modify_s(ACCP_CONF, [(ldap.MOD_REPLACE, 'altstateattrname', 'createTimestamp')]) - topo.ms[inst_name].modify_s(ACCP_CONF, [(ldap.MOD_REPLACE, 'specattrname', 'acctPolicySubentry')]) - topo.ms[inst_name].modify_s(ACCP_CONF, [(ldap.MOD_REPLACE, 'limitattrname', 'accountInactivityLimit')]) - topo.ms[inst_name].modify_s(ACCP_CONF, [(ldap.MOD_REPLACE, 'accountInactivityLimit', '3600')]) + topo.ms[inst_name].modify_s(ACCPOL_DN, [(ldap.MOD_REPLACE, 'nsslapd-pluginarg0', ensure_bytes(ACCP_CONF))]) + topo.ms[inst_name].modify_s(ACCP_CONF, [(ldap.MOD_REPLACE, 'alwaysrecordlogin', b'yes')]) + topo.ms[inst_name].modify_s(ACCP_CONF, [(ldap.MOD_REPLACE, 'stateattrname', b'lastLoginTime')]) + topo.ms[inst_name].modify_s(ACCP_CONF, [(ldap.MOD_REPLACE, 'altstateattrname', b'createTimestamp')]) + topo.ms[inst_name].modify_s(ACCP_CONF, [(ldap.MOD_REPLACE, 'specattrname', b'acctPolicySubentry')]) + topo.ms[inst_name].modify_s(ACCP_CONF, [(ldap.MOD_REPLACE, 'limitattrname', b'accountInactivityLimit')]) + topo.ms[inst_name].modify_s(ACCP_CONF, [(ldap.MOD_REPLACE, 'accountInactivityLimit', b'3600')]) except ldap.LDAPError as e: log.error('Failed to configure {} plugin for inst-{} error: {}'.format(PLUGIN_ACCT_POLICY, inst_name, str(e))) topo.ms[inst_name].restart(timeout=10) @@ -68,13 +68,13 @@ def _enable_plugin(topo, inst_name): topo.cs[inst_name].simple_bind_s(DN_DM, PASSWORD) try: topo.cs[inst_name].plugins.enable(name=PLUGIN_ACCT_POLICY) - topo.cs[inst_name].modify_s(ACCPOL_DN, [(ldap.MOD_REPLACE, 'nsslapd-pluginarg0', ACCP_CONF)]) - topo.cs[inst_name].modify_s(ACCP_CONF, [(ldap.MOD_REPLACE, 'alwaysrecordlogin', 'yes')]) - topo.cs[inst_name].modify_s(ACCP_CONF, [(ldap.MOD_REPLACE, 'stateattrname', 'lastLoginTime')]) - topo.cs[inst_name].modify_s(ACCP_CONF, [(ldap.MOD_REPLACE, 'altstateattrname', 'createTimestamp')]) - topo.cs[inst_name].modify_s(ACCP_CONF, [(ldap.MOD_REPLACE, 'specattrname', 'acctPolicySubentry')]) - topo.cs[inst_name].modify_s(ACCP_CONF, [(ldap.MOD_REPLACE, 'limitattrname', 'accountInactivityLimit')]) - topo.cs[inst_name].modify_s(ACCP_CONF, [(ldap.MOD_REPLACE, 'accountInactivityLimit', '3600')]) + topo.cs[inst_name].modify_s(ACCPOL_DN, [(ldap.MOD_REPLACE, 'nsslapd-pluginarg0', ensure_bytes(ACCP_CONF))]) + topo.cs[inst_name].modify_s(ACCP_CONF, [(ldap.MOD_REPLACE, 'alwaysrecordlogin', b'yes')]) + topo.cs[inst_name].modify_s(ACCP_CONF, [(ldap.MOD_REPLACE, 'stateattrname', b'lastLoginTime')]) + topo.cs[inst_name].modify_s(ACCP_CONF, [(ldap.MOD_REPLACE, 'altstateattrname', b'createTimestamp')]) + topo.cs[inst_name].modify_s(ACCP_CONF, [(ldap.MOD_REPLACE, 'specattrname', b'acctPolicySubentry')]) + topo.cs[inst_name].modify_s(ACCP_CONF, [(ldap.MOD_REPLACE, 'limitattrname', b'accountInactivityLimit')]) + topo.cs[inst_name].modify_s(ACCP_CONF, [(ldap.MOD_REPLACE, 'accountInactivityLimit', b'3600')]) except ldap.LDAPError as e: log.error('Failed to configure {} plugin for inst-{} error {}'.format(PLUGIN_ACCT_POLICY, inst_name, str(e))) topo.cs[inst_name].restart(timeout=10) diff --git a/dirsrvtests/tests/tickets/ticket48956_test.py b/dirsrvtests/tests/tickets/ticket48956_test.py index b4e21b376..14a62418e 100644 --- a/dirsrvtests/tests/tickets/ticket48956_test.py +++ b/dirsrvtests/tests/tickets/ticket48956_test.py @@ -73,10 +73,10 @@ def _check_inactivity(topology_st, mysuffix): log.error('CONSTRAINT VIOLATION ' + e.message['desc']) topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) - assert (_check_status(topology_st, TEST_USER_DN, '- activated')) + assert (_check_status(topology_st, TEST_USER_DN, b'- activated')) time.sleep(int(INACTIVITY_LIMIT) + 5) - assert (_check_status(topology_st, TEST_USER_DN, '- inactivated (inactivity limit exceeded')) + assert (_check_status(topology_st, TEST_USER_DN, b'- inactivated (inactivity limit exceeded')) def test_ticket48956(topology_st): @@ -88,14 +88,14 @@ def test_ticket48956(topology_st): """ topology_st.standalone.modify_s(ACCT_POLICY_PLUGIN_DN, - [(ldap.MOD_REPLACE, 'nsslapd-pluginarg0', ACCT_POLICY_CONFIG_DN)]) + [(ldap.MOD_REPLACE, 'nsslapd-pluginarg0', ensure_bytes(ACCT_POLICY_CONFIG_DN))]) - topology_st.standalone.modify_s(ACCT_POLICY_CONFIG_DN, [(ldap.MOD_REPLACE, 'alwaysrecordlogin', 'yes'), - (ldap.MOD_REPLACE, 'stateattrname', 'lastLoginTime'), - (ldap.MOD_REPLACE, 'altstateattrname', 'createTimestamp'), - (ldap.MOD_REPLACE, 'specattrname', 'acctPolicySubentry'), + topology_st.standalone.modify_s(ACCT_POLICY_CONFIG_DN, [(ldap.MOD_REPLACE, 'alwaysrecordlogin', b'yes'), + (ldap.MOD_REPLACE, 'stateattrname', b'lastLoginTime'), + (ldap.MOD_REPLACE, 'altstateattrname', b'createTimestamp'), + (ldap.MOD_REPLACE, 'specattrname', b'acctPolicySubentry'), (ldap.MOD_REPLACE, 'limitattrname', - 'accountInactivityLimit')]) + b'accountInactivityLimit')]) # Enable the plugins topology_st.standalone.plugins.enable(name=PLUGIN_ACCT_POLICY) diff --git a/dirsrvtests/tests/tickets/ticket48973_test.py b/dirsrvtests/tests/tickets/ticket48973_test.py index 5e98dacb3..6af945ec9 100644 --- a/dirsrvtests/tests/tickets/ticket48973_test.py +++ b/dirsrvtests/tests/tickets/ticket48973_test.py @@ -173,7 +173,7 @@ def _check_entry(topology, filterHead=None, filterValueUpper=False, entry_ext=No if found: assert len(ents) == 1 assert ents[0].hasAttr('homedirectory') - valueHome = "%s%d" % (HOMEHEAD, entry_ext) + valueHome = ensure_bytes("%s%d" % (HOMEHEAD, entry_ext)) assert valueHome in ents[0].getValues('homedirectory') else: assert len(ents) == 0 @@ -210,7 +210,7 @@ def test_ticket48973_ces_not_indexed(topology): ents = topology.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, Filter) assert len(ents) == 1 assert ents[0].hasAttr('homedirectory') - assert searchedHome in ents[0].getValues('homedirectory') + assert ensure_bytes(searchedHome) in ents[0].getValues('homedirectory') result = _find_next_notes(topology, Filter) log.info("result=%s" % result) @@ -269,10 +269,10 @@ def test_ticket48973_homeDirectory_caseExactIA5Match_caseIgnoreIA5Match_indexing entry_ext = 4 log.info("\n\nindex homeDirectory in caseExactIA5Match and caseIgnoreIA5Match") - EXACTIA5_MR_NAME='caseExactIA5Match' - IGNOREIA5_MR_NAME='caseIgnoreIA5Match' - EXACT_MR_NAME='caseExactMatch' - IGNORE_MR_NAME='caseIgnoreMatch' + EXACTIA5_MR_NAME=b'caseExactIA5Match' + IGNOREIA5_MR_NAME=b'caseIgnoreIA5Match' + EXACT_MR_NAME=b'caseExactMatch' + IGNORE_MR_NAME=b'caseIgnoreMatch' mod = [(ldap.MOD_REPLACE, MATCHINGRULE, (EXACT_MR_NAME, IGNORE_MR_NAME, EXACTIA5_MR_NAME, IGNOREIA5_MR_NAME))] topology.standalone.modify_s(HOMEDIRECTORY_INDEX, mod) diff --git a/dirsrvtests/tests/tickets/ticket49008_test.py b/dirsrvtests/tests/tickets/ticket49008_test.py index 461132a74..22b054203 100644 --- a/dirsrvtests/tests/tickets/ticket49008_test.py +++ b/dirsrvtests/tests/tickets/ticket49008_test.py @@ -39,7 +39,7 @@ def test_ticket49008(T): # Set the auto OC to an objectclass that does NOT allow memberOf B.modify_s('cn=MemberOf Plugin,cn=plugins,cn=config', - [(ldap.MOD_REPLACE, 'memberofAutoAddOC', 'referral')]) + [(ldap.MOD_REPLACE, 'memberofAutoAddOC', b'referral')]) B.restart(timeout=10) # add a few entries allowing memberof @@ -87,7 +87,7 @@ def test_ticket49008(T): elements = B_RUV[0].getValues('nsds50ruv') ruv_before = 'ruv_before' for ruv in elements: - if 'replica 2' in ruv: + if b'replica 2' in ruv: ruv_before = ruv # add a group with members allowing memberof and members which don't @@ -108,7 +108,7 @@ def test_ticket49008(T): elements = B_RUV[0].getValues('nsds50ruv') ruv_after = 'ruv_after' for ruv in elements: - if 'replica 2' in ruv: + if b'replica 2' in ruv: ruv_after = ruv log.info('ruv before fail: {}'.format(ruv_before)) diff --git a/dirsrvtests/tests/tickets/ticket49072_test.py b/dirsrvtests/tests/tickets/ticket49072_test.py index efa57f6c7..f6329f77d 100644 --- a/dirsrvtests/tests/tickets/ticket49072_test.py +++ b/dirsrvtests/tests/tickets/ticket49072_test.py @@ -55,7 +55,7 @@ def test_ticket49072_basedn(topo): except subprocess.CalledProcessError as err: output = err.output log.info('output: {}'.format(output)) - expected = "Successfully added task entry" + expected = b"Successfully added task entry" assert expected in output log_entry = topo.standalone.ds_error_log.match('.*Failed to get be backend.*') log.info('Error log out: {}'.format(log_entry)) @@ -96,7 +96,7 @@ def test_ticket49072_filter(topo): except subprocess.CalledProcessError as err: output = err.output log.info('output: {}'.format(output)) - expected = "Successfully added task entry" + expected = b"Successfully added task entry" assert expected in output log_entry = topo.standalone.ds_error_log.match('.*Bad search filter.*') log.info('Error log out: {}'.format(log_entry)) diff --git a/dirsrvtests/tests/tickets/ticket49076_test.py b/dirsrvtests/tests/tickets/ticket49076_test.py index c4a2c1bdf..3fa696726 100644 --- a/dirsrvtests/tests/tickets/ticket49076_test.py +++ b/dirsrvtests/tests/tickets/ticket49076_test.py @@ -28,7 +28,7 @@ def _check_configured_value(topology_st, attr=txn_begin_flag, expected_value=Non assert (entries[0].hasValue(attr)) if entries[0].hasValue(attr): topology_st.standalone.log.info('Current value is %s' % entries[0].getValue(attr)) - assert (entries[0].getValue(attr) == expected_value) + assert (entries[0].getValue(attr) == ensure_bytes(expected_value)) def _update_db(topology_st): topology_st.standalone.add_s( @@ -53,7 +53,7 @@ def test_ticket49076(topo): # switch to wait mode topo.standalone.modify_s(ldbm_config, - [(ldap.MOD_REPLACE, txn_begin_flag, "on")]) + [(ldap.MOD_REPLACE, txn_begin_flag, b"on")]) # check default value is DB_TXN_NOWAIT _check_configured_value(topo, expected_value="on") _update_db(topo) @@ -61,7 +61,7 @@ def test_ticket49076(topo): # switch back to "normal mode" topo.standalone.modify_s(ldbm_config, - [(ldap.MOD_REPLACE, txn_begin_flag, "off")]) + [(ldap.MOD_REPLACE, txn_begin_flag, b"off")]) # check default value is DB_TXN_NOWAIT _check_configured_value(topo, expected_value="off") # tests we are able to update DB @@ -69,7 +69,7 @@ def test_ticket49076(topo): # check that settings are not reset by restart topo.standalone.modify_s(ldbm_config, - [(ldap.MOD_REPLACE, txn_begin_flag, "on")]) + [(ldap.MOD_REPLACE, txn_begin_flag, b"on")]) # check default value is DB_TXN_NOWAIT _check_configured_value(topo, expected_value="on") _update_db(topo) diff --git a/dirsrvtests/tests/tickets/ticket49095_test.py b/dirsrvtests/tests/tickets/ticket49095_test.py index 04f92b2df..43013543c 100644 --- a/dirsrvtests/tests/tickets/ticket49095_test.py +++ b/dirsrvtests/tests/tickets/ticket49095_test.py @@ -36,24 +36,24 @@ def test_ticket49095(topo): 'telephonenumber': '555-555-5555' }))) except ldap.LDAPError as e: - log.fatal('Failed to add test user: ' + e.message['desc']) + log.fatal('Failed to add test user: ' + e.args[0]['desc']) assert False for aci in acis: # Add ACI try: topo.standalone.modify_s(DEFAULT_SUFFIX, - [(ldap.MOD_REPLACE, 'aci', aci)]) + [(ldap.MOD_REPLACE, 'aci', ensure_bytes(aci))]) except ldap.LDAPError as e: - log.fatal('Failed to set aci: ' + aci + ': ' + e.message['desc']) + log.fatal('Failed to set aci: ' + aci + ': ' + e.args[0]['desc']) assert False # Set Anonymous Bind to test aci try: topo.standalone.simple_bind_s("", "") except ldap.LDAPError as e: - log.fatal('Failed to bind anonymously: ' + e.message['desc']) + log.fatal('Failed to bind anonymously: ' + e.args[0]['desc']) assert False # Search for entry - should not get any results @@ -64,14 +64,14 @@ def test_ticket49095(topo): log.fatal('The entry was incorrectly returned') assert False except ldap.LDAPError as e: - log.fatal('Failed to search anonymously: ' + e.message['desc']) + log.fatal('Failed to search anonymously: ' + e.args[0]['desc']) assert False # Set root DN Bind so we can update aci's try: topo.standalone.simple_bind_s(DN_DM, PASSWORD) except ldap.LDAPError as e: - log.fatal('Failed to bind anonymously: ' + e.message['desc']) + log.fatal('Failed to bind anonymously: ' + e.args[0]['desc']) assert False log.info("Test Passed") diff --git a/dirsrvtests/tests/tickets/ticket49184_test.py b/dirsrvtests/tests/tickets/ticket49184_test.py index 20edfde58..7992e6156 100644 --- a/dirsrvtests/tests/tickets/ticket49184_test.py +++ b/dirsrvtests/tests/tickets/ticket49184_test.py @@ -30,7 +30,7 @@ def _add_group_with_members(topo, group_dn): {'objectclass': 'top groupofnames extensibleObject'.split(), 'cn': 'group'}))) except ldap.LDAPError as e: - log.fatal('Failed to add group: error ' + e.message['desc']) + log.fatal('Failed to add group: error ' + e.args[0]['desc']) assert False # Add members to the group - set timeout @@ -41,10 +41,10 @@ def _add_group_with_members(topo, group_dn): topo.standalone.modify_s(group_dn, [(ldap.MOD_ADD, 'member', - MEMBER_VAL)]) + ensure_bytes(MEMBER_VAL))]) except ldap.LDAPError as e: log.fatal('Failed to update group: member (%s) - error: %s' % - (MEMBER_VAL, e.message['desc'])) + (MEMBER_VAL, e.args[0]['desc'])) assert False def _check_memberof(topo, member=None, memberof=True, group_dn=None): @@ -54,21 +54,21 @@ def _check_memberof(topo, member=None, memberof=True, group_dn=None): USER_DN = ("uid=member%d,%s" % (idx, DEFAULT_SUFFIX)) ent = topo.standalone.getEntry(USER_DN, ldap.SCOPE_BASE, "(objectclass=*)") if presence_flag: - assert ent.hasAttr('memberof') and ent.getValue('memberof') == group_dn + assert ent.hasAttr('memberof') and ent.getValue('memberof') == ensure_bytes(group_dn) else: assert not ent.hasAttr('memberof') except ldap.LDAPError as e: - log.fatal('Failed to retrieve user (%s): error %s' % (USER_DN, e.message['desc'])) + log.fatal('Failed to retrieve user (%s): error %s' % (USER_DN, e.args[0]['desc'])) assert False def _check_memberof(topo, member=None, memberof=True, group_dn=None): ent = topo.standalone.getEntry(member, ldap.SCOPE_BASE, "(objectclass=*)") if memberof: assert group_dn - assert ent.hasAttr('memberof') and group_dn in ent.getValues('memberof') + assert ent.hasAttr('memberof') and ensure_bytes(group_dn) in ent.getValues('memberof') else: if ent.hasAttr('memberof'): - assert group_dn not in ent.getValues('memberof') + assert ensure_bytes(group_dn) not in ent.getValues('memberof') def test_ticket49184(topo): @@ -92,7 +92,7 @@ def test_ticket49184(topo): {'objectclass': 'top extensibleObject'.split(), 'uid': 'member%d' % (idx)}))) except ldap.LDAPError as e: - log.fatal('Failed to add user (%s): error %s' % (USER_DN, e.message['desc'])) + log.fatal('Failed to add user (%s): error %s' % (USER_DN, e.args[0]['desc'])) assert False # add all users in GROUP_DN_1 and checks each users is memberof GROUP_DN_1 @@ -115,17 +115,17 @@ def test_ticket49184(topo): topo.standalone.modify_s(SUPER_GRP1, [(ldap.MOD_ADD, 'member', - GROUP_DN_1), + ensure_bytes(GROUP_DN_1)), (ldap.MOD_ADD, 'member', - GROUP_DN_2)]) + ensure_bytes(GROUP_DN_2))]) topo.standalone.modify_s(SUPER_GRP2, [(ldap.MOD_ADD, 'member', - GROUP_DN_1), + ensure_bytes(GROUP_DN_1)), (ldap.MOD_ADD, 'member', - GROUP_DN_2)]) + ensure_bytes(GROUP_DN_2))]) return topo.standalone.delete_s(GROUP_DN_2) for idx in range(1, 5): diff --git a/dirsrvtests/tests/tickets/ticket49192_test.py b/dirsrvtests/tests/tickets/ticket49192_test.py index f770ba71e..6d2866ef6 100644 --- a/dirsrvtests/tests/tickets/ticket49192_test.py +++ b/dirsrvtests/tests/tickets/ticket49192_test.py @@ -39,7 +39,7 @@ def test_ticket49192(topo): 'objectclass': 'top organization'.split(), 'o': 'hang.com'}))) except ldap.LDAPError as e: - log.fatal('Failed to create 2nd suffix: error ' + e.message['desc']) + log.fatal('Failed to create 2nd suffix: error ' + e.args[0]['desc']) assert False # @@ -54,7 +54,7 @@ def test_ticket49192(topo): 'nsManagedRoleDefinition'], 'cn': 'nsManagedDisabledRole'}))) except ldap.LDAPError as e: - log.fatal('Failed to add managed role: error ' + e.message['desc']) + log.fatal('Failed to add managed role: error ' + e.args[0]['desc']) assert False try: @@ -66,7 +66,7 @@ def test_ticket49192(topo): 'cn': 'nsDisabledRole', 'nsRoledn': 'cn=nsManagedDisabledRole,' + MY_SUFFIX}))) except ldap.LDAPError as e: - log.fatal('Failed to add nested role: error ' + e.message['desc']) + log.fatal('Failed to add nested role: error ' + e.args[0]['desc']) assert False try: @@ -74,7 +74,7 @@ def test_ticket49192(topo): 'objectclass': ['top', 'nsContainer'], 'cn': 'nsAccountInactivationTmp'}))) except ldap.LDAPError as e: - log.fatal('Failed to add container: error ' + e.message['desc']) + log.fatal('Failed to add container: error ' + e.args[0]['desc']) assert False try: @@ -83,7 +83,7 @@ def test_ticket49192(topo): 'ldapsubentry'], 'nsAccountLock': 'true'}))) except ldap.LDAPError as e: - log.fatal('Failed to add cos1: error ' + e.message['desc']) + log.fatal('Failed to add cos1: error ' + e.args[0]['desc']) assert False try: @@ -95,7 +95,7 @@ def test_ticket49192(topo): 'cosSpecifier': 'nsRole', 'cosAttribute': 'nsAccountLock operational'}))) except ldap.LDAPError as e: - log.fatal('Failed to add cos2 : error ' + e.message['desc']) + log.fatal('Failed to add cos2 : error ' + e.args[0]['desc']) assert False # @@ -108,7 +108,7 @@ def test_ticket49192(topo): 'userpassword': 'password', }))) except ldap.LDAPError as e: - log.fatal('Failed to add user: error ' + e.message['desc']) + log.fatal('Failed to add user: error ' + e.args[0]['desc']) assert False # @@ -118,9 +118,9 @@ def test_ticket49192(topo): topo.standalone.modify_s(USER_DN, [(ldap.MOD_ADD, 'nsRoleDN', - 'cn=nsManagedDisabledRole,' + MY_SUFFIX)]) + ensure_bytes('cn=nsManagedDisabledRole,' + MY_SUFFIX))]) except ldap.LDAPError as e: - log.fatal('Failed to disable user: error ' + e.message['desc']) + log.fatal('Failed to disable user: error ' + e.args[0]['desc']) assert False time.sleep(1) @@ -133,14 +133,14 @@ def test_ticket49192(topo): except ldap.UNWILLING_TO_PERFORM: log.info('Got error 53 as expected') except ldap.LDAPError as e: - log.fatal('Bind has unexpected error ' + e.message['desc']) + log.fatal('Bind has unexpected error ' + e.args[0]['desc']) assert False # Bind as root DN try: topo.standalone.simple_bind_s(DN_DM, PASSWORD) except ldap.LDAPError as e: - log.fatal('RootDN Bind has unexpected error ' + e.message['desc']) + log.fatal('RootDN Bind has unexpected error ' + e.args[0]['desc']) assert False # diff --git a/dirsrvtests/tests/tickets/ticket49227_test.py b/dirsrvtests/tests/tickets/ticket49227_test.py index 494063fc3..2d83ddb28 100644 --- a/dirsrvtests/tests/tickets/ticket49227_test.py +++ b/dirsrvtests/tests/tickets/ticket49227_test.py @@ -16,16 +16,16 @@ else: logging.getLogger(__name__).setLevel(logging.INFO) log = logging.getLogger(__name__) -DEFAULT_LEVEL = "16384" -COMB_LEVEL = "73864" # 65536+8192+128+8 = 73864 -COMB_DEFAULT_LEVEL = "90248" # 65536+8192+128+8+16384 = 90248 +DEFAULT_LEVEL = b"16384" +COMB_LEVEL = b"73864" # 65536+8192+128+8 = 73864 +COMB_DEFAULT_LEVEL = b"90248" # 65536+8192+128+8+16384 = 90248 def set_level(topo, level): ''' Set the error log level ''' try: - topo.standalone.modify_s("cn=config", [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', level)]) + topo.standalone.modify_s("cn=config", [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', ensure_bytes(level))]) time.sleep(1) except ldap.LDAPError as e: log.fatal('Failed to set loglevel to %s - error: %s' % (level, str(e))) @@ -66,7 +66,7 @@ def test_ticket49227(topo): # Set connection logging set_level(topo, '8') level = get_level(topo) - if level != '8': + if level != b'8': log.fatal('Incorrect connection logging level: %s' % (level)) assert False diff --git a/dirsrvtests/tests/tickets/ticket49386_test.py b/dirsrvtests/tests/tickets/ticket49386_test.py index 241909200..bbf5c7b58 100644 --- a/dirsrvtests/tests/tickets/ticket49386_test.py +++ b/dirsrvtests/tests/tickets/ticket49386_test.py @@ -3,7 +3,7 @@ import pytest import os import ldap import time -from lib389.utils import ds_is_older +from lib389.utils import * from lib389.topologies import topology_st as topo from lib389._constants import * from lib389.config import Config @@ -40,7 +40,7 @@ def add_group(server, nr, sleep=True): time.sleep(2) def update_member(server, member_dn, group_dn, op, sleep=True): - mod = [(op, 'member', member_dn)] + mod = [(op, 'member', ensure_bytes(member_dn))] server.modify_s(group_dn, mod) if sleep: time.sleep(2) @@ -51,8 +51,8 @@ def config_memberof(server): MEMBEROF_PLUGIN_DN = ('cn=' + PLUGIN_MEMBER_OF + ',cn=plugins,cn=config') server.modify_s(MEMBEROF_PLUGIN_DN, [(ldap.MOD_REPLACE, 'memberOfAllBackends', - 'on'), - (ldap.MOD_REPLACE, 'memberOfAutoAddOC', 'nsMemberOf')]) + b'on'), + (ldap.MOD_REPLACE, 'memberOfAutoAddOC', b'nsMemberOf')]) def _find_memberof(server, member_dn, group_dn, find_result=True): @@ -64,7 +64,7 @@ def _find_memberof(server, member_dn, group_dn, find_result=True): server.log.info("!!!!!!! %s: memberof->%s" % (member_dn, val)) server.log.info("!!!!!!! %s" % (val)) server.log.info("!!!!!!! %s" % (group_dn)) - if val.lower() == group_dn.lower(): + if val.lower() == ensure_bytes(group_dn.lower()): found = True break
0
8879ed2efa48e96f2b920a3ab83036b07e3b3ae4
389ds/389-ds-base
Ticket 47329 - Improve slapi_back_transaction_begin() return code when transactions are not available Bug Description: The slapi_back_transaction_begin() function needs it's return codes to be changed to be more friendly for plug-in writers when transactions are not available. Fix Description: Added new error code SLAPI_BACK_TRANSACTION_NOT_SUPPORTED, and updated the slapi_plugin.h https://fedorahosted.org/389/ticket/47329 Reviewed by: Noriko, Ludwig, and Rich(Thanks!!!)
commit 8879ed2efa48e96f2b920a3ab83036b07e3b3ae4 Author: Mark Reynolds <[email protected]> Date: Fri Jun 21 10:47:09 2013 -0400 Ticket 47329 - Improve slapi_back_transaction_begin() return code when transactions are not available Bug Description: The slapi_back_transaction_begin() function needs it's return codes to be changed to be more friendly for plug-in writers when transactions are not available. Fix Description: Added new error code SLAPI_BACK_TRANSACTION_NOT_SUPPORTED, and updated the slapi_plugin.h https://fedorahosted.org/389/ticket/47329 Reviewed by: Noriko, Ludwig, and Rich(Thanks!!!) diff --git a/ldap/servers/slapd/backend.c b/ldap/servers/slapd/backend.c index ad253f164..ead251e6c 100644 --- a/ldap/servers/slapd/backend.c +++ b/ldap/servers/slapd/backend.c @@ -648,8 +648,13 @@ int slapi_back_transaction_begin(Slapi_PBlock *pb) { IFP txn_begin; - slapi_pblock_get(pb, SLAPI_PLUGIN_DB_BEGIN_FN, (void*)&txn_begin); - return txn_begin(pb); + if(slapi_pblock_get(pb, SLAPI_PLUGIN_DB_BEGIN_FN, (void*)&txn_begin) || + !txn_begin) + { + return SLAPI_BACK_TRANSACTION_NOT_SUPPORTED; + } else { + return txn_begin(pb); + } } /* API to expose DB transaction commit */ @@ -657,7 +662,13 @@ int slapi_back_transaction_commit(Slapi_PBlock *pb) { IFP txn_commit; - slapi_pblock_get(pb, SLAPI_PLUGIN_DB_COMMIT_FN, (void*)&txn_commit); + if(slapi_pblock_get(pb, SLAPI_PLUGIN_DB_COMMIT_FN, (void*)&txn_commit) || + !txn_commit) + { + return SLAPI_BACK_TRANSACTION_NOT_SUPPORTED; + } else { + return txn_commit(pb); + } return txn_commit(pb); } @@ -666,6 +677,11 @@ int slapi_back_transaction_abort(Slapi_PBlock *pb) { IFP txn_abort; - slapi_pblock_get(pb, SLAPI_PLUGIN_DB_ABORT_FN, (void*)&txn_abort); - return txn_abort(pb); + if(slapi_pblock_get(pb, SLAPI_PLUGIN_DB_ABORT_FN, (void*)&txn_abort) || + !txn_abort) + { + return SLAPI_BACK_TRANSACTION_NOT_SUPPORTED; + } else { + return txn_abort(pb); + } } diff --git a/ldap/servers/slapd/slapi-plugin.h b/ldap/servers/slapd/slapi-plugin.h index d7d968d56..d1e90de46 100644 --- a/ldap/servers/slapd/slapi-plugin.h +++ b/ldap/servers/slapd/slapi-plugin.h @@ -6061,6 +6061,7 @@ const char * slapi_be_gettype(Slapi_Backend *be); * * \param pb Pblock which is supposed to set (Slapi_Backend *) to SLAPI_BACKEND * \return 0 if successful + * \return SLAPI_BACK_TRANSACTION_NOT_SUPPORTED if transaction support is not available for this backend * \return Non-zero if an error occurred * * \see slapi_back_transaction_commit @@ -6908,6 +6909,7 @@ typedef struct slapi_plugindesc { #define SLAPI_PARENT_TXN 190 #define SLAPI_TXN 191 #define SLAPI_TXN_RUV_MODS_FN 1901 +#define SLAPI_BACK_TRANSACTION_NOT_SUPPORTED 1902 /* * The following are used to pass information back and forth
0
6cc77119625e8c63d27ff1a6a8e8a7159c3abbfe
389ds/389-ds-base
Resolves: 207567 Summary: Corrected search scope used to find entries to sync in winsync total update protocol.
commit 6cc77119625e8c63d27ff1a6a8e8a7159c3abbfe Author: Nathan Kinder <[email protected]> Date: Tue Sep 4 15:45:57 2007 +0000 Resolves: 207567 Summary: Corrected search scope used to find entries to sync in winsync total update protocol. diff --git a/ldap/servers/plugins/replication/windows_protocol_util.c b/ldap/servers/plugins/replication/windows_protocol_util.c index 573ee587d..4ff364af6 100644 --- a/ldap/servers/plugins/replication/windows_protocol_util.c +++ b/ldap/servers/plugins/replication/windows_protocol_util.c @@ -3028,23 +3028,11 @@ windows_process_total_add(Private_Repl_Protocol *prp,Slapi_Entry *e, Slapi_DN* r return retval; } -static int -windows_process_total_delete(Private_Repl_Protocol *prp,Slapi_Entry *e, Slapi_DN* remote_dn) -{ - int retval = 0; - if (delete_remote_entry_allowed(e)) - { - retval = windows_conn_send_delete(prp->conn, slapi_sdn_get_dn(remote_dn), NULL, NULL /* returned controls */); - } - return retval; -} - /* Entry point for the total protocol */ int windows_process_total_entry(Private_Repl_Protocol *prp,Slapi_Entry *e) { int retval = 0; int is_ours = 0; - int is_tombstone = 0; Slapi_DN *remote_dn = NULL; int missing_entry = 0; const Slapi_DN *local_dn = slapi_entry_get_sdn_const(e); @@ -3063,14 +3051,7 @@ int windows_process_total_entry(Private_Repl_Protocol *prp,Slapi_Entry *e) agmt_get_long_name(prp->agmt), slapi_sdn_get_dn(local_dn)); goto error; } - /* Either the entry is a tombstone, or not a tombstone */ - if (is_tombstone) - { - retval = windows_process_total_delete(prp,e,remote_dn); - } else - { - retval = windows_process_total_add(prp,e,remote_dn,missing_entry); - } + retval = windows_process_total_add(prp,e,remote_dn,missing_entry); } if (remote_dn) { diff --git a/ldap/servers/plugins/replication/windows_tot_protocol.c b/ldap/servers/plugins/replication/windows_tot_protocol.c index 0ecef1597..597d80110 100644 --- a/ldap/servers/plugins/replication/windows_tot_protocol.c +++ b/ldap/servers/plugins/replication/windows_tot_protocol.c @@ -171,8 +171,10 @@ windows_tot_run(Private_Repl_Protocol *prp) dn = slapi_sdn_get_dn( windows_private_get_directory_subtree(prp->agmt)); pb = slapi_pblock_new (); - slapi_search_internal_set_pb (pb, dn, /* XXX modify the searchfilter and scope? */ - LDAP_SCOPE_ONELEVEL, "(|(objectclass=ntuser)(objectclass=ntgroup)(nsuniqueid=*))", NULL, 0, NULL, NULL, + /* Perform a subtree search for any ntuser or ntgroup entries underneath the + * suffix defined in the sync agreement. */ + slapi_search_internal_set_pb (pb, dn, + LDAP_SCOPE_SUBTREE, "(|(objectclass=ntuser)(objectclass=ntgroup))", NULL, 0, NULL, NULL, repl_get_plugin_identity (PLUGIN_MULTIMASTER_REPLICATION), 0); cb_data.prp = prp; cb_data.rc = 0; @@ -180,8 +182,6 @@ windows_tot_run(Private_Repl_Protocol *prp) cb_data.sleep_on_busy = 0UL; cb_data.last_busy = current_time (); - /* this search get all the entries from the replicated area including tombstones - and referrals */ slapi_search_internal_callback_pb (pb, &cb_data /* callback data */, get_result /* result callback */, send_entry /* entry callback */,
0
e9941a2915ac848abe9a4afe802d0432aa0c354a
389ds/389-ds-base
Coverity defects The commit b9eeb2e1a8e688dfec753e8965d0e5aeb119e638 for Ticket #481 "expand nested posix groups" introduced 4 coverity defects. Description: 13100, 13101: Missing return statement Fix description: addUserToGroupMembership and propogateDeletion- UpwardCallback are declared to return an integer value, but nothing was returned. This patch changes it to return 0. 13102: Resource leak Fix description: The memory of valueset muid_old_vs is internally allocated. It was meant to be set to muid_upward_vs and freed together when muid_upward_vs is freed. But due to the function calling order, it was not properly set and it lost the chance to be freed. This patch calls slapi_attr_get_valueset prior to slapi_valueset_set_valueset and let free muid_old_vs together with slapi_valueset_set_valueset. 13103: Uninitialized pointer read Fix description: Possibly uninitialized variable was passed to a logging function slapi_log_error, but actually it was not referred. With this patch, the variable filter is no longer to passed to the function.
commit e9941a2915ac848abe9a4afe802d0432aa0c354a Author: Noriko Hosoi <[email protected]> Date: Wed Oct 24 15:27:48 2012 -0700 Coverity defects The commit b9eeb2e1a8e688dfec753e8965d0e5aeb119e638 for Ticket #481 "expand nested posix groups" introduced 4 coverity defects. Description: 13100, 13101: Missing return statement Fix description: addUserToGroupMembership and propogateDeletion- UpwardCallback are declared to return an integer value, but nothing was returned. This patch changes it to return 0. 13102: Resource leak Fix description: The memory of valueset muid_old_vs is internally allocated. It was meant to be set to muid_upward_vs and freed together when muid_upward_vs is freed. But due to the function calling order, it was not properly set and it lost the chance to be freed. This patch calls slapi_attr_get_valueset prior to slapi_valueset_set_valueset and let free muid_old_vs together with slapi_valueset_set_valueset. 13103: Uninitialized pointer read Fix description: Possibly uninitialized variable was passed to a logging function slapi_log_error, but actually it was not referred. With this patch, the variable filter is no longer to passed to the function. diff --git a/ldap/servers/plugins/posix-winsync/posix-group-func.c b/ldap/servers/plugins/posix-winsync/posix-group-func.c index 66b927222..be3a6ece3 100644 --- a/ldap/servers/plugins/posix-winsync/posix-group-func.c +++ b/ldap/servers/plugins/posix-winsync/posix-group-func.c @@ -473,15 +473,15 @@ propogateMembershipUpward(Slapi_Entry *entry, Slapi_ValueSet *muid_vs, int depth muid_here_vs = muid_vs; } else { + int i = 0; + Slapi_Value *v = NULL; /* Eliminate duplicates */ muid_upward_vs = slapi_valueset_new(); muid_here_vs = slapi_valueset_new(); + slapi_attr_get_valueset(muid_old_attr, &muid_old_vs); slapi_valueset_set_valueset(muid_upward_vs, muid_old_vs); - slapi_attr_get_valueset(muid_old_attr, &muid_old_vs); - int i = 0; - Slapi_Value *v = NULL; for (i = slapi_valueset_first_value(muid_vs, &v); i != -1; i = slapi_valueset_next_value(muid_vs, i, &v)) { @@ -542,6 +542,7 @@ propogateDeletionsUpwardCallback(Slapi_Entry *entry, void *callback_data) { struct propogateDeletionsUpwardArgs *args = (struct propogateDeletionsUpwardArgs *)(callback_data); propogateDeletionsUpward(entry, args->base_sdn, args->smod_deluids, args->del_nested_vs, args->depth); + return 0; } void @@ -920,6 +921,7 @@ addUserToGroupMembership(Slapi_Entry *entry) propogateMembershipUpward(entry, muid_vs, 0); slapi_valueset_free(muid_vs); muid_vs = NULL; + return 0; } int diff --git a/ldap/servers/plugins/posix-winsync/posix-group-task.c b/ldap/servers/plugins/posix-winsync/posix-group-task.c index 4555f1b6b..e5385b036 100644 --- a/ldap/servers/plugins/posix-winsync/posix-group-task.c +++ b/ldap/servers/plugins/posix-winsync/posix-group-task.c @@ -152,9 +152,9 @@ posix_group_task_add(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eAfter, int rv = SLAPI_DSE_CALLBACK_OK; } - out: +out: slapi_log_error(SLAPI_LOG_PLUGIN, POSIX_WINSYNC_PLUGIN_NAME, - "posix_group_task_add: <==\n", filter); + "posix_group_task_add: <==\n"); return rv; }
0
81b997480956b2b6fa3a5d0e8d6abf5113a06400
389ds/389-ds-base
Ticket #627 - ns-slapd crashes sporadically with segmentation fault in libslapd.so Bug Description: Schema reload task (schema-reload.pl) was not thread safe. Fix Description: Attribute Syntax is stored in the hash and retrieved based upon the attribute syntax. When Schema reload task is invoked, the attribute syntax objects were completely replaced ignoring the lock protection. This patch protects the attribute syntax replacement (attr_syntax_delete_all_for_ schemareload) with the write lock. Also, attribute syntax object maintains the reference count. The schema reload respects the reference count instead of blindly deleting them. https://fedorahosted.org/389/ticket/627 Reviewed by Rich (Thank you!!)
commit 81b997480956b2b6fa3a5d0e8d6abf5113a06400 Author: Noriko Hosoi <[email protected]> Date: Mon Mar 25 14:37:18 2013 -0700 Ticket #627 - ns-slapd crashes sporadically with segmentation fault in libslapd.so Bug Description: Schema reload task (schema-reload.pl) was not thread safe. Fix Description: Attribute Syntax is stored in the hash and retrieved based upon the attribute syntax. When Schema reload task is invoked, the attribute syntax objects were completely replaced ignoring the lock protection. This patch protects the attribute syntax replacement (attr_syntax_delete_all_for_ schemareload) with the write lock. Also, attribute syntax object maintains the reference count. The schema reload respects the reference count instead of blindly deleting them. https://fedorahosted.org/389/ticket/627 Reviewed by Rich (Thank you!!) diff --git a/ldap/servers/slapd/attrsyntax.c b/ldap/servers/slapd/attrsyntax.c index 227369c81..4326f0394 100644 --- a/ldap/servers/slapd/attrsyntax.c +++ b/ldap/servers/slapd/attrsyntax.c @@ -96,13 +96,28 @@ attr_syntax_read_lock(void) AS_LOCK_READ(name2asi_lock); } +void +attr_syntax_write_lock(void) +{ + if (0 != attr_syntax_init()) return; + + AS_LOCK_WRITE(oid2asi_lock); + AS_LOCK_WRITE(name2asi_lock); +} + void attr_syntax_unlock_read(void) { - if(name2asi_lock) AS_UNLOCK_READ(name2asi_lock); - if(oid2asi_lock) AS_UNLOCK_READ(oid2asi_lock); + AS_UNLOCK_READ(name2asi_lock); + AS_UNLOCK_READ(oid2asi_lock); } +void +attr_syntax_unlock_write(void) +{ + AS_UNLOCK_WRITE(name2asi_lock); + AS_UNLOCK_WRITE(oid2asi_lock); +} #if 0 @@ -233,13 +248,17 @@ attr_syntax_get_by_oid_locking_optional( const char *oid, PRBool use_lock ) struct asyntaxinfo *asi = 0; if (oid2asi) { - if ( use_lock ) AS_LOCK_READ(oid2asi_lock); + if ( use_lock ) { + AS_LOCK_READ(oid2asi_lock); + } asi = (struct asyntaxinfo *)PL_HashTableLookup_const(oid2asi, oid); if (asi) { PR_AtomicIncrement( &asi->asi_refcnt ); } - if ( use_lock ) AS_UNLOCK_READ(oid2asi_lock); + if ( use_lock ) { + AS_UNLOCK_READ(oid2asi_lock); + } } return asi; @@ -257,13 +276,15 @@ attr_syntax_add_by_oid(const char *oid, struct asyntaxinfo *a, int lock) { if (0 != attr_syntax_init()) return; - if (lock) + if (lock) { AS_LOCK_WRITE(oid2asi_lock); + } PL_HashTableAdd(oid2asi, oid, a); - if (lock) + if (lock) { AS_UNLOCK_WRITE(oid2asi_lock); + } } /* @@ -304,12 +325,16 @@ attr_syntax_get_by_name_locking_optional(const char *name, PRBool use_lock) struct asyntaxinfo *asi = 0; if (name2asi) { - if ( use_lock ) AS_LOCK_READ(name2asi_lock); + if ( use_lock ) { + AS_LOCK_READ(name2asi_lock); + } asi = (struct asyntaxinfo *)PL_HashTableLookup_const(name2asi, name); if ( NULL != asi ) { PR_AtomicIncrement( &asi->asi_refcnt ); } - if ( use_lock ) AS_UNLOCK_READ(name2asi_lock); + if ( use_lock ) { + AS_UNLOCK_READ(name2asi_lock); + } } if (!asi) /* given name may be an OID */ asi = attr_syntax_get_by_oid_locking_optional(name, use_lock); @@ -331,30 +356,38 @@ attr_syntax_return( struct asyntaxinfo *asi ) } void -attr_syntax_return_locking_optional( struct asyntaxinfo *asi, PRBool use_lock ) +attr_syntax_return_locking_optional(struct asyntaxinfo *asi, PRBool use_lock) { + int locked = 0; + if(use_lock) { + AS_LOCK_READ(name2asi_lock); + locked = 1; + } if ( NULL != asi ) { - if ( 0 == PR_AtomicDecrement( &asi->asi_refcnt )) - { - PRBool delete_it; - - if(use_lock) AS_LOCK_READ(name2asi_lock); + PRBool delete_it = PR_FALSE; + if ( 0 == PR_AtomicDecrement( &asi->asi_refcnt )) { delete_it = asi->asi_marked_for_delete; - if(use_lock) AS_UNLOCK_READ(name2asi_lock); - - if ( delete_it ) - { - AS_LOCK_WRITE(name2asi_lock); /* get a write lock */ - if ( asi->asi_marked_for_delete ) /* one final check */ - { - /* ref count is 0 and it's flagged for - * deletion, so it's safe to free now */ - attr_syntax_free(asi); + } + + if (delete_it) { + if ( asi->asi_marked_for_delete ) { /* one final check */ + if(use_lock) { + AS_UNLOCK_READ(name2asi_lock); + AS_LOCK_WRITE(name2asi_lock); + } + /* ref count is 0 and it's flagged for + * deletion, so it's safe to free now */ + attr_syntax_free(asi); + if(use_lock) { + AS_UNLOCK_WRITE(name2asi_lock); + locked = 0; } - AS_UNLOCK_WRITE(name2asi_lock); } } } + if(locked) { + AS_UNLOCK_READ(name2asi_lock); + } } /* @@ -371,8 +404,9 @@ attr_syntax_add_by_name(struct asyntaxinfo *a, int lock) { if (0 != attr_syntax_init()) return; - if (lock) + if (lock) { AS_LOCK_WRITE(name2asi_lock); + } PL_HashTableAdd(name2asi, a->asi_name, a); if ( a->asi_aliases != NULL ) { @@ -383,8 +417,9 @@ attr_syntax_add_by_name(struct asyntaxinfo *a, int lock) } } - if (lock) + if (lock) { AS_UNLOCK_WRITE(name2asi_lock); + } } @@ -990,11 +1025,11 @@ attr_syntax_enumerate_attrs(AttrEnumFunc aef, void *arg, PRBool writelock ) attr_syntax_enumerate_attrs_ext(oid2asi, aef, arg); if ( writelock ) { - AS_UNLOCK_WRITE(oid2asi_lock); AS_UNLOCK_WRITE(name2asi_lock); + AS_UNLOCK_WRITE(oid2asi_lock); } else { - AS_UNLOCK_READ(oid2asi_lock); AS_UNLOCK_READ(name2asi_lock); + AS_UNLOCK_READ(oid2asi_lock); } } @@ -1092,6 +1127,21 @@ attr_syntax_delete_all() (void *)&fi, PR_TRUE ); } +/* + * Delete all attribute definitions without attr_syntax lock. + * The caller is responsible for the lock. + */ +void +attr_syntax_delete_all_for_schemareload(unsigned long flag) +{ + struct attr_syntax_enum_flaginfo fi; + + memset(&fi, 0, sizeof(fi)); + fi.asef_flag = flag; + attr_syntax_enumerate_attrs_ext(oid2asi, attr_syntax_delete_if_not_flagged, + (void *)&fi); +} + static int attr_syntax_init(void) { @@ -1215,13 +1265,19 @@ static int attr_syntax_internal_asi_add(struct asyntaxinfo *asip, void *arg) { struct asyntaxinfo *asip_copy; + int rc = 0; + if (!asip) { return 1; } /* Copy is needed since when reloading the schema, * existing syntax info is cleaned up. */ asip_copy = attr_syntax_dup(asip); - return attr_syntax_add(asip_copy); + rc = attr_syntax_add(asip_copy); + if (LDAP_SUCCESS != rc) { + attr_syntax_free(asip_copy); + } + return rc; } /* Reload internal attribute syntax stashed in the internalasi hashtable. */ diff --git a/ldap/servers/slapd/proto-slap.h b/ldap/servers/slapd/proto-slap.h index 4077daa89..a34136f76 100644 --- a/ldap/servers/slapd/proto-slap.h +++ b/ldap/servers/slapd/proto-slap.h @@ -114,7 +114,9 @@ int attrlist_replace_with_flags(Slapi_Attr **alist, const char *type, struct ber * attrsyntax.c */ void attr_syntax_read_lock(void); +void attr_syntax_write_lock(void); void attr_syntax_unlock_read(void); +void attr_syntax_unlock_write(void); int attr_syntax_exists (const char *attr_name); void attr_syntax_delete ( struct asyntaxinfo *asip ); #define SLAPI_SYNTAXLENGTH_NONE (-1) /* for syntaxlength parameter */ @@ -142,6 +144,7 @@ struct asyntaxinfo *attr_syntax_get_by_name_locking_optional ( const char *name, void attr_syntax_return( struct asyntaxinfo *asi ); void attr_syntax_return_locking_optional( struct asyntaxinfo *asi, PRBool use_lock ); void attr_syntax_delete_all(void); +void attr_syntax_delete_all_for_schemareload(unsigned long flag); /* * value.c diff --git a/ldap/servers/slapd/schema.c b/ldap/servers/slapd/schema.c index 5eb6520d7..dc565340c 100644 --- a/ldap/servers/slapd/schema.c +++ b/ldap/servers/slapd/schema.c @@ -1393,7 +1393,7 @@ slapi_schema_list_attribute_names(unsigned long flag) aew.flag=flag; attr_syntax_enumerate_attrs(schema_list_attributes_callback, &aew, - PR_FALSE); + PR_FALSE); return aew.attrs; } @@ -2405,8 +2405,9 @@ static int schema_replace_attributes ( Slapi_PBlock *pb, LDAPMod *mod, char *errorbuf, size_t errorbufsize ) { - int i, rc = LDAP_SUCCESS; - struct asyntaxinfo *newasip, *oldasip; + int i, rc = LDAP_SUCCESS; + struct asyntaxinfo *newasip, *oldasip; + PRUint32 schema_flags = 0; if ( NULL == mod->mod_bvalues ) { schema_create_errormsg( errorbuf, errorbufsize, schema_errprefix_at, @@ -2414,8 +2415,11 @@ schema_replace_attributes ( Slapi_PBlock *pb, LDAPMod *mod, char *errorbuf, return LDAP_UNWILLING_TO_PERFORM; } - /* clear all of the "keep" flags */ - attr_syntax_all_clear_flag( SLAPI_ATTR_FLAG_KEEP ); + slapi_pblock_get(pb, SLAPI_SCHEMA_FLAGS, &schema_flags); + if (!(schema_flags & (DSE_SCHEMA_NO_LOAD|DSE_SCHEMA_NO_CHECK))) { + /* clear all of the "keep" flags unless it's from schema-reload */ + attr_syntax_all_clear_flag( SLAPI_ATTR_FLAG_KEEP ); + } for ( i = 0; mod->mod_bvalues[i] != NULL; ++i ) { if ( LDAP_SUCCESS != ( rc = read_at_ldif( mod->mod_bvalues[i]->bv_val, @@ -2473,12 +2477,14 @@ schema_replace_attributes ( Slapi_PBlock *pb, LDAPMod *mod, char *errorbuf, * XXXmcs: we should consider reporting an error if any read only types * remain.... */ - attr_syntax_delete_all_not_flagged( SLAPI_ATTR_FLAG_KEEP - | SLAPI_ATTR_FLAG_STD_ATTR ); + attr_syntax_delete_all_not_flagged( SLAPI_ATTR_FLAG_KEEP | + SLAPI_ATTR_FLAG_STD_ATTR ); clean_up_and_return: - /* clear all of the "keep" flags */ - attr_syntax_all_clear_flag( SLAPI_ATTR_FLAG_KEEP ); + if (!(schema_flags & (DSE_SCHEMA_NO_LOAD|DSE_SCHEMA_NO_CHECK))) { + /* clear all of the "keep" flags unless it's from schema-reload */ + attr_syntax_all_clear_flag( SLAPI_ATTR_FLAG_KEEP ); + } return rc; } @@ -3894,14 +3900,12 @@ load_schema_dse(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *ignored, int primary_file = 0; /* this is the primary (writeable) schema file */ int schema_ds4x_compat = config_get_ds4_compatible_schema(); PRUint32 flags = *(PRUint32 *)arg; - flags |= DSE_SCHEMA_NO_GLOCK; /* don't lock global resources - during initialization */ *returncode = 0; /* * Note: there is no need to call schema_lock_write() here because this - * function is only called during server startup. + * function is only called during server startup. */ slapi_pblock_get( pb, SLAPI_DSE_IS_PRIMARY_FILE, &primary_file ); @@ -3943,6 +3947,8 @@ load_schema_dse(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *ignored, if (*returncode) return SLAPI_DSE_CALLBACK_ERROR; + flags |= DSE_SCHEMA_NO_GLOCK; /* don't lock global resources + during initialization */ if (!slapi_entry_attr_find(e, "objectclasses", &attr) && attr) { /* enumerate the values in attr */ @@ -4013,7 +4019,6 @@ load_schema_dse(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *ignored, * DSE_SCHEMA_NO_CHECK -- schema won't be checked * DSE_SCHEMA_NO_BACKEND -- don't add as backend * DSE_SCHEMA_LOCKED -- already locked; no further lock needed - */ static int init_schema_dse_ext(char *schemadir, Slapi_Backend *be, @@ -4119,7 +4124,7 @@ init_schema_dse_ext(char *schemadir, Slapi_Backend *be, "DESC 'Standard schema for LDAP' SYNTAX " "1.3.6.1.4.1.1466.115.121.1.15 X-ORIGIN 'RFC 2252' )", NULL, errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, - DSE_SCHEMA_NO_GLOCK|schema_flags, 0, 0, 0); + schema_flags, 0, 0, 0); } if (rc) { @@ -4192,7 +4197,7 @@ init_schema_dse(const char *configdir) { schemadir = slapi_ch_smprintf("%s/%s", configdir, SCHEMA_SUBDIR_NAME); } - rc = init_schema_dse_ext(schemadir, NULL, &pschemadse, 0); + rc = init_schema_dse_ext(schemadir, NULL, &pschemadse, DSE_SCHEMA_NO_GLOCK); slapi_ch_free_string(&schemadir); return rc; } @@ -4856,14 +4861,14 @@ slapi_validate_schema_files(char *schemadir) { struct dse *my_pschemadse = NULL; int rc = init_schema_dse_ext(schemadir, NULL, &my_pschemadse, - DSE_SCHEMA_NO_LOAD | DSE_SCHEMA_NO_BACKEND); + DSE_SCHEMA_NO_LOAD | DSE_SCHEMA_NO_BACKEND); dse_destroy(my_pschemadse); /* my_pschemadse was created just to - validate the schema */ + validate the schema */ if (rc) { return LDAP_SUCCESS; } else { slapi_log_error( SLAPI_LOG_FATAL, "schema_reload", - "schema file validation failed\n" ); + "schema file validation failed\n" ); return LDAP_OBJECT_CLASS_VIOLATION; } } @@ -4889,10 +4894,13 @@ slapi_reload_schema_files(char *schemadir) } slapi_be_Wlock(be); /* be lock must be outer of schemafile lock */ reload_schemafile_lock(); - attr_syntax_delete_all(); + /* Exclude attr_syntax not to grab from the hash table while cleaning up */ + attr_syntax_write_lock(); + attr_syntax_delete_all_for_schemareload(SLAPI_ATTR_FLAG_KEEP); oc_delete_all_nolock(); + attr_syntax_unlock_write(); rc = init_schema_dse_ext(schemadir, be, &my_pschemadse, - DSE_SCHEMA_NO_CHECK | DSE_SCHEMA_LOCKED); + DSE_SCHEMA_NO_CHECK | DSE_SCHEMA_LOCKED); if (rc) { dse_destroy(pschemadse); pschemadse = my_pschemadse;
0
9f07f9d751b23bb51792387dd264549f3dab9038
389ds/389-ds-base
Bug 194531 - db2bak is too noisy https://bugzilla.redhat.com/show_bug.cgi?id=194531 Description: Introduced "-q" option to suppress the backing up/ restoring message for each db file. Usage: db2bak [archivedir] [-q] [-h] Usage: bak2db archivedir [-n backendname] [-q] | [-h] If "-q" is specified, the verbose messages are not prited to the standard error, but just to the errors log. In addition, new log level SLAPI_LOG_BACKLDBM/LDAP_DEBUG_BACKLDBM has been introduced.
commit 9f07f9d751b23bb51792387dd264549f3dab9038 Author: Noriko Hosoi <[email protected]> Date: Mon Aug 9 16:14:19 2010 -0700 Bug 194531 - db2bak is too noisy https://bugzilla.redhat.com/show_bug.cgi?id=194531 Description: Introduced "-q" option to suppress the backing up/ restoring message for each db file. Usage: db2bak [archivedir] [-q] [-h] Usage: bak2db archivedir [-n backendname] [-q] | [-h] If "-q" is specified, the verbose messages are not prited to the standard error, but just to the errors log. In addition, new log level SLAPI_LOG_BACKLDBM/LDAP_DEBUG_BACKLDBM has been introduced. diff --git a/ldap/admin/src/scripts/template-bak2db.in b/ldap/admin/src/scripts/template-bak2db.in index 12d46ca12..0f9266780 100755 --- a/ldap/admin/src/scripts/template-bak2db.in +++ b/ldap/admin/src/scripts/template-bak2db.in @@ -12,21 +12,38 @@ export LD_LIBRARY_PATH SHLIB_PATH=$LD_LIBRARY_PATH export SHLIB_PATH -if [ $# -lt 1 ] || [ $# -gt 3 ] +if [ $# -lt 1 ] || [ $# -gt 6 ] then - echo "Usage: bak2db archivedir [-n backendname]" + echo "Usage: bak2db archivedir [-n backendname] [-q] | [-h]" exit 1 +elif [ "$1" = "-h" ] +then + echo "Usage: bak2db archivedir [-n backendname] [-q] | [-h]" + exit 0 else archivedir=$1 shift fi -while getopts "n:" flag +benameopt="" +dlevel=0 +quiet=0 +while getopts "hn:qd:" flag do case $flag in - n) bename=$OPTARG;; - *) echo "Usage: bak2db archivedir [-n backendname]"; exit 2;; + h) echo "Usage: bak2db archivedir [-n backendname] [-q] | [-h]" + exit 1;; + n) benameopt="-n $OPTARG";; + q) quiet=1;; + d) dlevel=$OPTARG;; esac done +if [ $quiet -eq 0 ] +then + if [ $dlevel -ne 524288 ] + then + dlevel=`expr $dlevel + 524288` + fi +fi if [ 1 = `expr $archivedir : "\/"` ] then @@ -37,9 +54,4 @@ else fi cd {{SERVERBIN-DIR}} -if [ "$#" -eq 2 ] -then - ./ns-slapd archive2db -D {{CONFIG-DIR}} -a $archivedir -n $bename -else - ./ns-slapd archive2db -D {{CONFIG-DIR}} -a $archivedir -fi +./ns-slapd archive2db -D {{CONFIG-DIR}} -a $archivedir $benameopt -d $dlevel diff --git a/ldap/admin/src/scripts/template-db2bak.in b/ldap/admin/src/scripts/template-db2bak.in index a0fe1f59b..5ff5d0783 100755 --- a/ldap/admin/src/scripts/template-db2bak.in +++ b/ldap/admin/src/scripts/template-db2bak.in @@ -12,13 +12,41 @@ export LD_LIBRARY_PATH SHLIB_PATH=$LD_LIBRARY_PATH export SHLIB_PATH +if [ $# -gt 4 ] +then + echo "Usage: db2bak [archivedir] [-q] [-h]" + exit 1 +fi + +bak_dir={{BAK-DIR}}/{{SERV-ID}}-`date +%Y_%m_%d_%H_%M_%S` +dlevel=0 +quiet=0 cd {{SERVERBIN-DIR}} -if [ "$#" -eq 1 ] +if [ "$#" -gt 0 ] +then + if [ "$1" != "-q" ] && [ "$1" != "-d" ] && [ "$1" != "-h" ] + then + bak_dir=$1 + shift + fi + while getopts "hqd:" flag + do + case $flag in + h) echo "Usage: db2bak [archivedir] [-q] [-h]" + exit 0;; + q) quiet=1;; + d) dlevel=$OPTARG;; + esac + done +fi +# If not quiet, set LDAP_DEBUG_BACKLDBM to debug level +if [ $quiet -eq 0 ] then - bak_dir=$1 -else - bak_dir={{BAK-DIR}}/{{SERV-ID}}-`date +%Y_%m_%d_%H_%M_%S` + if [ $dlevel -ne 524288 ] + then + dlevel=`expr $dlevel + 524288` + fi fi echo "Back up directory: $bak_dir" -./ns-slapd db2archive -D {{CONFIG-DIR}} -a $bak_dir +./ns-slapd db2archive -D {{CONFIG-DIR}} -a $bak_dir -d $dlevel diff --git a/ldap/include/ldaplog.h b/ldap/include/ldaplog.h index ef3559e48..567672630 100644 --- a/ldap/include/ldaplog.h +++ b/ldap/include/ldaplog.h @@ -66,6 +66,7 @@ extern "C" { #define LDAP_DEBUG_PLUGIN 0x10000 /* 65536 */ #define LDAP_DEBUG_TIMING 0x20000 /*131072 */ #define LDAP_DEBUG_ACLSUMMARY 0x40000 /*262144 */ +#define LDAP_DEBUG_BACKLDBM 0x80000 /*524288 */ #define LDAP_DEBUG_ALL_LEVELS 0xFFFFF diff --git a/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c b/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c index 36fbe933c..daa116ae1 100644 --- a/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c +++ b/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c @@ -245,7 +245,7 @@ entryrdn_index_entry(backend *be, rc = LDAP_INVALID_DN_SYNTAX; goto bail; } else if (rc > 0) { - slapi_log_error(SLAPI_LOG_TRACE, ENTRYRDN_TAG, + slapi_log_error(SLAPI_LOG_BACKLDBM, ENTRYRDN_TAG, "entryrdn_index_entry: %s does not belong to " "the db\n", slapi_sdn_get_dn(sdn)); rc = DB_NOTFOUND; @@ -335,7 +335,7 @@ entryrdn_index_read(backend *be, rc = LDAP_INVALID_DN_SYNTAX; goto bail; } else if (rc > 0) { - slapi_log_error(SLAPI_LOG_TRACE, ENTRYRDN_TAG, + slapi_log_error(SLAPI_LOG_BACKLDBM, ENTRYRDN_TAG, "entryrdn_index_read: %s does not belong to the db\n", slapi_sdn_get_dn(sdn)); rc = DB_NOTFOUND; @@ -346,7 +346,7 @@ entryrdn_index_read(backend *be, rc = _entryrdn_open_index(be, &ai, &db); if (rc) { slapi_log_error(SLAPI_LOG_FATAL, ENTRYRDN_TAG, - "entryrdn_index_read:: Opening the index failed: " + "entryrdn_index_read: Opening the index failed: " "%s(%d)\n", rc<0?dblayer_strerror(rc):"Invalid parameter", rc); goto bail; @@ -455,8 +455,8 @@ entryrdn_rename_subtree(backend *be, rc = LDAP_INVALID_DN_SYNTAX; goto bail; } else if (rc > 0) { - slapi_log_error(SLAPI_LOG_TRACE, ENTRYRDN_TAG, - "entryrdn_rename_subtree:: %s does not belong to " + slapi_log_error(SLAPI_LOG_BACKLDBM, ENTRYRDN_TAG, + "entryrdn_rename_subtree: %s does not belong to " "the db\n", slapi_sdn_get_dn(oldsdn)); rc = DB_NOTFOUND; goto bail; @@ -497,7 +497,7 @@ entryrdn_rename_subtree(backend *be, goto bail; } else { /* newsupsdn == NULL, so newsrdn is not */ - slapi_log_error(SLAPI_LOG_TRACE, ENTRYRDN_TAG, + slapi_log_error(SLAPI_LOG_BACKLDBM, ENTRYRDN_TAG, "entryrdn_rename_subtree: Renaming suffix %s to %s\n", nrdn, slapi_rdn_get_nrdn((Slapi_RDN *)mynewsrdn)); } @@ -546,8 +546,8 @@ entryrdn_rename_subtree(backend *be, rc = LDAP_INVALID_DN_SYNTAX; goto bail; } else if (rc > 0) { - slapi_log_error(SLAPI_LOG_TRACE, ENTRYRDN_TAG, - "entryrdn_rename_subtree:: %s does not belong " + slapi_log_error(SLAPI_LOG_BACKLDBM, ENTRYRDN_TAG, + "entryrdn_rename_subtree: %s does not belong " "to the db\n", slapi_sdn_get_dn(mynewsupsdn)); rc = DB_NOTFOUND; goto bail; @@ -894,8 +894,8 @@ entryrdn_get_subordinates(backend *be, "\"%s\" to Slapi_RDN\n", slapi_sdn_get_dn(sdn)); rc = LDAP_INVALID_DN_SYNTAX; } else if (rc > 0) { - slapi_log_error(SLAPI_LOG_TRACE, ENTRYRDN_TAG, - "entryrdn_get_subordinates:: %s does not belong to " + slapi_log_error(SLAPI_LOG_BACKLDBM, ENTRYRDN_TAG, + "entryrdn_get_subordinates: %s does not belong to " "the db\n", slapi_sdn_get_dn(sdn)); rc = DB_NOTFOUND; } @@ -1601,7 +1601,7 @@ _entryrdn_put_data(DBC *cursor, DBT *key, DBT *data, char type) if (rc) { if (DB_KEYEXIST == rc) { /* this is okay */ - slapi_log_error(SLAPI_LOG_TRACE, ENTRYRDN_TAG, + slapi_log_error(SLAPI_LOG_BACKLDBM, ENTRYRDN_TAG, "_entryrdn_put_data: The same key (%s) and the " "data exists in index\n", (char *)key->data); @@ -2026,7 +2026,7 @@ _entryrdn_insert_key(backend *be, } /* if (TMPID == tmpid) */ rc = 0; } /* if (DB_KEYEXIST == rc) */ - slapi_log_error(SLAPI_LOG_TRACE, ENTRYRDN_TAG, + slapi_log_error(SLAPI_LOG_BACKLDBM, ENTRYRDN_TAG, "_entryrdn_insert_key: Suffix %s added: %d\n", nrdn, rc); goto bail; /* succeeded or failed, it's done */ @@ -2099,7 +2099,7 @@ _entryrdn_insert_key(backend *be, adddata.flags = DB_DBT_USERMEM; rc = _entryrdn_put_data(cursor, &key, &adddata, RDN_INDEX_SELF); - slapi_log_error(SLAPI_LOG_TRACE, ENTRYRDN_TAG, + slapi_log_error(SLAPI_LOG_BACKLDBM, ENTRYRDN_TAG, "_entryrdn_insert_key: Suffix %s added: %d\n", slapi_rdn_get_rdn(tmpsrdn), rc); } else { @@ -2201,7 +2201,7 @@ _entryrdn_insert_key(backend *be, /* already in the file */ /* do nothing and return. */ rc = 0; - slapi_log_error(SLAPI_LOG_TRACE, ENTRYRDN_TAG, + slapi_log_error(SLAPI_LOG_BACKLDBM, ENTRYRDN_TAG, "_entryrdn_insert_key: ID %d is already " "in the index. NOOP.\n", currid); } else { /* different id, error return */ @@ -2409,7 +2409,7 @@ retry_get0: } if (rc) { if (DB_NOTFOUND == rc) { - slapi_log_error(SLAPI_LOG_TRACE, ENTRYRDN_TAG, + slapi_log_error(SLAPI_LOG_BACKLDBM, ENTRYRDN_TAG, "_entryrdn_delete_key: No parent link %s\n", keybuf); goto bail; } else { @@ -2567,7 +2567,7 @@ _entryrdn_index_read(backend *be, /* getting the suffix element */ rc = _entryrdn_get_elem(cursor, &key, &data, nrdn, elem); if (rc || NULL == *elem) { - slapi_log_error(SLAPI_LOG_TRACE, ENTRYRDN_TAG, + slapi_log_error(SLAPI_LOG_BACKLDBM, ENTRYRDN_TAG, "_entryrdn_index_read: Suffix \"%s\" not found: " "%s(%d)\n", nrdn, dblayer_strerror(rc), rc); rc = DB_NOTFOUND; @@ -2644,7 +2644,7 @@ _entryrdn_index_read(backend *be, rc = _entryrdn_get_elem(cursor, &key, &data, childnrdn, &tmpelem); if (rc) { slapi_ch_free((void **)&tmpelem); - slapi_log_error(SLAPI_LOG_TRACE, ENTRYRDN_TAG, + slapi_log_error(SLAPI_LOG_BACKLDBM, ENTRYRDN_TAG, "_entryrdn_index_read: Child link \"%s\" of key " "\"%s\" not found: %s(%d)\n", childnrdn, keybuf, dblayer_strerror(rc), rc); diff --git a/ldap/servers/slapd/log.c b/ldap/servers/slapd/log.c index 328002dab..8ca7297d9 100644 --- a/ldap/servers/slapd/log.c +++ b/ldap/servers/slapd/log.c @@ -102,6 +102,7 @@ static int slapi_log_map[] = { LDAP_DEBUG_PLUGIN, /* SLAPI_LOG_PLUGIN */ LDAP_DEBUG_TIMING, /* SLAPI_LOG_TIMING */ LDAP_DEBUG_ACLSUMMARY, /* SLAPI_LOG_ACLSUMMARY */ + LDAP_DEBUG_BACKLDBM, /* SLAPI_LOG_BACKLDBM */ }; #define SLAPI_LOG_MIN SLAPI_LOG_FATAL /* from slapi-plugin.h */ diff --git a/ldap/servers/slapd/main.c b/ldap/servers/slapd/main.c index 322f1b8fa..d118a04b6 100644 --- a/ldap/servers/slapd/main.c +++ b/ldap/servers/slapd/main.c @@ -2579,6 +2579,9 @@ slapd_exemode_db2archive() LDAPDebug(LDAP_DEBUG_ANY, "Initialization Failed 0 %d\n",return_value,0,0); return 1; } + if (!(slapd_ldap_debug & LDAP_DEBUG_BACKLDBM)) { + g_set_detached(1); + } memset( &pb, '\0', sizeof(pb) ); pb.pb_backend = NULL; @@ -2630,6 +2633,10 @@ slapd_exemode_archive2db() return 1; } + if (!(slapd_ldap_debug & LDAP_DEBUG_BACKLDBM)) { + g_set_detached(1); + } + memset( &pb, '\0', sizeof(pb) ); pb.pb_backend = NULL; pb.pb_plugin = backend_plugin; @@ -2928,6 +2935,7 @@ static struct slapd_debug_level_entry { { LDAP_DEBUG_PLUGIN, "plugin", 1 }, { LDAP_DEBUG_TIMING, "timing", 0 }, { LDAP_DEBUG_ACLSUMMARY,"accesscontrolsummary", 0 }, + { LDAP_DEBUG_BACKLDBM, "backend", 0 }, { LDAP_DEBUG_ALL_LEVELS,"ALL", 0 }, { 0, NULL, 0 } }; diff --git a/ldap/servers/slapd/slapi-plugin.h b/ldap/servers/slapd/slapi-plugin.h index df3380b0f..b5f4d199b 100644 --- a/ldap/servers/slapd/slapi-plugin.h +++ b/ldap/servers/slapd/slapi-plugin.h @@ -4990,7 +4990,8 @@ int slapi_log_error( int severity, char *subsystem, char *fmt, ... ) #define SLAPI_LOG_CACHE 13 #define SLAPI_LOG_PLUGIN 14 #define SLAPI_LOG_TIMING 15 -#define SLAPI_LOG_ACLSUMMARY 16 +#define SLAPI_LOG_BACKLDBM 16 +#define SLAPI_LOG_ACLSUMMARY 17 int slapi_is_loglevel_set( const int loglevel );
0
304502cb42c00edf8e5177aaa9dceeb51f8ea078
389ds/389-ds-base
Ticket #514 - investigate connection locking https://fedorahosted.org/389/ticket/514 Reviewed by: mreynolds,nhosoi (Thanks!) Branch: master Fix Description: There were two locks involved for every operation - a lock to protect the pblock queue and a lock and condition variable to protect the counter variable. This fix consolidates them into a single lock/cv for the pblock queue and gets rid of the separate lock for the counter. The queue structures have been cleaned up and renamed work_q and work_q_size. The worker threads wait for new work if not shutdown and the work_q is empty. In addition, the timeout interval for the wait for the work_q cv has been changed to "infinite" instead of 10 seconds. Platforms tested: RHEL6 x86_64 Flag Day: no Doc impact: Yes
commit 304502cb42c00edf8e5177aaa9dceeb51f8ea078 Author: Rich Megginson <[email protected]> Date: Tue Feb 19 19:32:56 2013 -0700 Ticket #514 - investigate connection locking https://fedorahosted.org/389/ticket/514 Reviewed by: mreynolds,nhosoi (Thanks!) Branch: master Fix Description: There were two locks involved for every operation - a lock to protect the pblock queue and a lock and condition variable to protect the counter variable. This fix consolidates them into a single lock/cv for the pblock queue and gets rid of the separate lock for the counter. The queue structures have been cleaned up and renamed work_q and work_q_size. The worker threads wait for new work if not shutdown and the work_q is empty. In addition, the timeout interval for the wait for the work_q cv has been changed to "infinite" instead of 10 seconds. Platforms tested: RHEL6 x86_64 Flag Day: no Doc impact: Yes diff --git a/ldap/servers/slapd/connection.c b/ldap/servers/slapd/connection.c index 31fc54334..c68f56b42 100644 --- a/ldap/servers/slapd/connection.c +++ b/ldap/servers/slapd/connection.c @@ -84,10 +84,11 @@ struct Slapi_PBlock_q static struct Slapi_PBlock_q *first_pb= NULL; /* global work queue head */ static struct Slapi_PBlock_q *last_pb= NULL; /* global work queue tail */ static PRLock *pb_q_lock=NULL; /* protects first_pb & last_pb */ - -static PRCondVar *op_thread_cv; /* used by operation threads to wait for work */ -static PRLock *op_thread_lock; /* associated with op_thread_cv */ -static int op_shutdown= 0; /* if non-zero, server is shutting down */ +static PRCondVar *pb_q_cv; /* used by operation threads to wait for work - when there is a op pblock in the queue waiting to be processed */ +static PRInt32 pb_q_size; /* size of pb_q */ +static PRInt32 pb_q_size_max; /* high water mark of pb_q_size */ +#define PB_Q_EMPTY (pb_q_size == 0) +static PRInt32 op_shutdown= 0; /* if non-zero, server is shutting down */ #define LDAP_SOCKET_IO_BUFFER_SIZE 512 /* Size of the buffer we give to the I/O system for reads */ @@ -403,38 +404,30 @@ init_op_threads() int max_threads = config_get_threadnumber(); /* Initialize the locks and cv */ - if ((pb_q_lock = PR_NewLock()) == NULL ) { - errorCode = PR_GetError(); - LDAPDebug( LDAP_DEBUG_ANY, - "init_op_threads: PR_NewLock failed, " SLAPI_COMPONENT_NAME_NSPR " error %d (%s)\n", - errorCode, slapd_pr_strerror(errorCode), 0 ); - exit(-1); - } - - if ((op_thread_lock = PR_NewLock()) == NULL ) { - errorCode = PR_GetError(); - LDAPDebug( LDAP_DEBUG_ANY, - "init_op_threads: PR_NewLock failed, " SLAPI_COMPONENT_NAME_NSPR " error %d (%s)\n", - errorCode, slapd_pr_strerror(errorCode), 0 ); - exit(-1); - } - - if ((op_thread_cv = PR_NewCondVar( op_thread_lock )) == NULL) { - errorCode = PR_GetError(); - LDAPDebug( LDAP_DEBUG_ANY, "init_op_threads: PR_NewCondVar failed, " SLAPI_COMPONENT_NAME_NSPR " error %d (%s)\n", - errorCode, slapd_pr_strerror(errorCode), 0 ); - exit(-1); - } + if ((pb_q_lock = PR_NewLock()) == NULL ) { + errorCode = PR_GetError(); + LDAPDebug( LDAP_DEBUG_ANY, + "init_op_threads: PR_NewLock failed for pb_q_lock, " SLAPI_COMPONENT_NAME_NSPR " error %d (%s)\n", + errorCode, slapd_pr_strerror(errorCode), 0 ); + exit(-1); + } + + if ((pb_q_cv = PR_NewCondVar( pb_q_lock )) == NULL) { + errorCode = PR_GetError(); + LDAPDebug( LDAP_DEBUG_ANY, "init_op_threads: PR_NewCondVar failed for pb_q_cv, " SLAPI_COMPONENT_NAME_NSPR " error %d (%s)\n", + errorCode, slapd_pr_strerror(errorCode), 0 ); + exit(-1); + } /* start the operation threads */ for (i=0; i < max_threads; i++) { PR_SetConcurrency(4); if (PR_CreateThread (PR_USER_THREAD, - (VFP) (void *) connection_threadmain, NULL, - PR_PRIORITY_NORMAL, PR_GLOBAL_THREAD, - PR_UNJOINABLE_THREAD, - SLAPD_DEFAULT_THREAD_STACKSIZE - ) == NULL ) { + (VFP) (void *) connection_threadmain, NULL, + PR_PRIORITY_NORMAL, PR_GLOBAL_THREAD, + PR_UNJOINABLE_THREAD, + SLAPD_DEFAULT_THREAD_STACKSIZE + ) == NULL ) { int prerr = PR_GetError(); LDAPDebug( LDAP_DEBUG_ANY, "PR_CreateThread failed, " SLAPI_COMPONENT_NAME_NSPR " error %d (%s)\n", prerr, slapd_pr_strerror( prerr ), 0 ); @@ -1535,8 +1528,6 @@ static int finished_chomping(Connection *conn) * IO Completion Ports are not available on this platform. */ -static int counter= 0; /* JCM Dumb Name */ - /* The connection private structure for UNIX turbo mode */ struct Conn_private { @@ -1700,37 +1691,26 @@ connection_free_private_buffer(Connection *conn) #define CONN_TURBO_PERCENTILE 50 /* proportion of threads allowed to be in turbo mode */ #define CONN_TURBO_HYSTERESIS 0 /* avoid flip flopping in and out of turbo mode */ -int connection_wait_for_new_pb(Slapi_PBlock **ppb, PRIntervalTime interval) +int connection_wait_for_new_pb(Slapi_PBlock **ppb, PRIntervalTime interval) { int ret = CONN_FOUND_WORK_TO_DO; - - PR_Lock( op_thread_lock ); - - /* While there is no operation to do... */ - while( counter < 1) { - /* Check if we should shutdown. */ - if (op_shutdown) { - PR_Unlock( op_thread_lock ); - return CONN_SHUTDOWN; - } - PR_WaitCondVar( op_thread_cv, interval); - } - /* There is some work to do. */ - - counter--; - PR_Unlock( op_thread_lock ); + PR_Lock( pb_q_lock ); - /* Get the next operation from the work queue. */ + while( !op_shutdown && PB_Q_EMPTY ) { + PR_WaitCondVar( pb_q_cv, interval ); + } - *ppb = get_pb(); - if (*ppb == NULL) { - LDAPDebug( LDAP_DEBUG_ANY, "pb is null \n", 0, 0, 0 ); - PR_Lock( op_thread_lock ); - counter++; - PR_Unlock( op_thread_lock ); + if ( op_shutdown ) { + LDAPDebug0Args( LDAP_DEBUG_ANY, "connection_wait_for_new_pb: shutdown\n" ); + ret = CONN_SHUTDOWN; + } else if ( NULL == ( *ppb = get_pb() ) ) { + /* not sure how this can happen */ + LDAPDebug0Args( LDAP_DEBUG_ANY, "connection_wait_for_new_pb: pb is null\n" ); ret = CONN_NOWORK; } + + PR_Unlock( pb_q_lock ); return ret; } @@ -2166,12 +2146,12 @@ static void connection_threadmain() { Slapi_PBlock *pb = NULL; - PRIntervalTime interval = PR_SecondsToInterval(10); + /* wait forever for new pb until one is available or shutdown */ + PRIntervalTime interval = PR_INTERVAL_NO_TIMEOUT; /* PR_SecondsToInterval(10); */ Connection *conn = NULL; Operation *op; ber_tag_t tag = 0; int need_wakeup = 0; - int need_conn_release = 0; int thread_turbo_flag = 0; int ret = 0; int more_data = 0; @@ -2202,6 +2182,7 @@ connection_threadmain() ret = connection_wait_for_new_pb(&pb,interval); switch (ret) { case CONN_NOWORK: + PR_ASSERT(interval != PR_INTERVAL_NO_TIMEOUT); /* this should never happen with PR_INTERVAL_NO_TIMEOUT */ continue; case CONN_SHUTDOWN: LDAPDebug( LDAP_DEBUG_TRACE, @@ -2264,9 +2245,9 @@ connection_threadmain() } /* turn off turbo mode immediately if any pb waiting in global queue */ - if (thread_turbo_flag && (counter > 0)) { + if (thread_turbo_flag && !PB_Q_EMPTY) { thread_turbo_flag = 0; - LDAPDebug(LDAP_DEBUG_CONNS,"conn %" NSPRIu64 " leaving turbo mode\n",conn->c_connid,0,0); + LDAPDebug2Args(LDAP_DEBUG_CONNS,"conn %" NSPRIu64 " leaving turbo mode - pb_q is not empty %d\n",conn->c_connid,pb_q_size); } #endif @@ -2313,13 +2294,16 @@ connection_threadmain() */ replication_connection = conn->c_isreplication_session; if ((tag != LDAP_REQ_UNBIND) && !thread_turbo_flag && !more_data && !replication_connection) { - connection_make_readable(conn); + connection_make_readable_nolock(conn); + /* once the connection is readable, another thread may access conn, + * so need locking from here on */ + signal_listner(); } /* are we in referral-only mode? */ if (config_check_referral_mode() && tag != LDAP_REQ_UNBIND) { - referral_mode_reply(pb); - goto done; + referral_mode_reply(pb); + goto done; } /* check if new password is required */ @@ -2347,6 +2331,21 @@ connection_threadmain() connection_dispatch_operation(conn, op, pb); done: + if (doshutdown) { + PR_Lock(conn->c_mutex); + connection_remove_operation( conn, op ); + /* destroying the pblock will cause destruction of the operation + * so this must happend before releasing the connection + */ + slapi_pblock_destroy( pb ); + pb = NULL; + connection_make_readable_nolock(conn); + conn->c_threadnumber--; + connection_release_nolock(conn); + PR_Unlock(conn->c_mutex); + signal_listner(); + return; + } /* * done with this operation. delete it from the op * queue for this connection, delete the number of @@ -2359,75 +2358,51 @@ done: /* total number of ops for the server */ slapi_counter_increment(ops_completed); /* If this op isn't a persistent search, remove it */ - need_conn_release = 0; - if ( !( pb->pb_op->o_flags & OP_FLAG_PS )) { - /* delete from connection operation queue & decr refcnt */ - PR_Lock( conn->c_mutex ); - connection_remove_operation( conn, op ); - /* destroying the pblock will cause destruction of the operation - * so this must happend before releasing the connection - */ - slapi_pblock_destroy( pb ); - - /* If we're in turbo mode, we keep our reference to the connection - alive */ - if (!thread_turbo_flag && !more_data) { - /* - * Don't release the connection now. - * But note down what to do. - */ - need_conn_release = 1; - } - PR_Unlock( conn->c_mutex ); - } else { /* the ps code acquires a ref to the conn - we need to release ours here */ - PR_Lock( conn->c_mutex ); - connection_release_nolock (conn); - PR_Unlock( conn->c_mutex ); - } - pb = NULL; - if (doshutdown) { - PR_Lock(conn->c_mutex); - connection_make_readable_nolock(conn); - conn->c_threadnumber--; - connection_release_nolock(conn); - PR_Unlock(conn->c_mutex); - signal_listner(); - return; - } - - if (!more_data) { /* no more data in the buffer */ - if (!thread_turbo_flag) { /* Don't do this in turbo mode */ - /* Since we didn't do so earlier, we need to make a - * replication connection readable again here */ - PR_Lock( conn->c_mutex ); - if (replication_connection || (1 == is_timedout)) { - connection_make_readable_nolock(conn); - need_wakeup = 1; - } - /* if the threadnumber of now below the maximum, wakeup - * the listener thread so that we start polling on this - * connection again - */ - if (!need_wakeup) { - if (conn->c_threadnumber == config_get_maxthreadsperconn()) + if ( pb->pb_op->o_flags & OP_FLAG_PS ) { + PR_Lock( conn->c_mutex ); + connection_release_nolock (conn); /* psearch acquires ref to conn - release this one now */ + PR_Unlock( conn->c_mutex ); + } else { + /* delete from connection operation queue & decr refcnt */ + PR_Lock( conn->c_mutex ); + connection_remove_operation( conn, op ); + /* destroying the pblock will cause destruction of the operation + * so this must happend before releasing the connection + */ + slapi_pblock_destroy( pb ); + + /* If we're in turbo mode, we keep our reference to the connection alive */ + if (!more_data) { + if (!thread_turbo_flag) { + /* + * Don't release the connection now. + * But note down what to do. + */ + if (replication_connection || (1 == is_timedout)) { + connection_make_readable_nolock(conn); need_wakeup = 1; - else - need_wakeup = 0; - } - conn->c_threadnumber--; - if (need_conn_release) { + } + if (!need_wakeup) { + if (conn->c_threadnumber == config_get_maxthreadsperconn()) + need_wakeup = 1; + else + need_wakeup = 0; + } + conn->c_threadnumber--; connection_release_nolock(conn); - } - PR_Unlock( conn->c_mutex ); - /* Call signal_listner after releasing the - * connection if required. */ - if (need_wakeup) { + /* Call signal_listner after releasing the + * connection if required. */ + if (need_wakeup) { + signal_listner(); + } + } else if (1 == is_timedout) { + connection_make_readable_nolock(conn); signal_listner(); } - } else if (1 == is_timedout) { - connection_make_readable(conn); } + PR_Unlock( conn->c_mutex ); } + pb = NULL; } /* while (1) */ } @@ -2437,33 +2412,27 @@ connection_activity(Connection *conn) { Slapi_PBlock *pb; - connection_make_new_pb(&pb, conn); - - /* Add pb to the end of the work queue. */ - add_pb( pb ); - - /* Check if exceed the max thread per connection. If so, increment - c_pbwait. Otherwise increment the counter and notify the cond. var. - there is work to do. */ - if (connection_acquire_nolock (conn) == -1) { - LDAPDebug(LDAP_DEBUG_CONNS, - "could not acquire lock in connection_activity as conn %" NSPRIu64 " closing fd=%d\n", - conn->c_connid,conn->c_sd,0); - /* XXX how to handle this error? */ - /* MAB: 25 Jan 01: let's return on error and pray this won't leak */ - return (-1); + LDAPDebug(LDAP_DEBUG_CONNS, + "could not acquire lock in connection_activity as conn %" NSPRIu64 " closing fd=%d\n", + conn->c_connid,conn->c_sd,0); + /* XXX how to handle this error? */ + /* MAB: 25 Jan 01: let's return on error and pray this won't leak */ + return (-1); } + + connection_make_new_pb(&pb, conn); + + /* set these here so setup_pr_read_pds will not add this conn back to the poll array */ conn->c_gettingber = 1; conn->c_threadnumber++; - PR_Lock( op_thread_lock ); - counter++; - PR_NotifyCondVar( op_thread_cv ); - PR_Unlock( op_thread_lock ); - + /* Add pb to the end of the work queue. */ + /* have to do this last - add_pb will signal waiters in connection_wait_for_new_pb */ + add_pb( pb ); + if (! config_check_referral_mode()) { - slapi_counter_increment(ops_initiated); - slapi_counter_increment(g_get_global_snmp_vars()->ops_tbl.dsInOps); + slapi_counter_increment(ops_initiated); + slapi_counter_increment(g_get_global_snmp_vars()->ops_tbl.dsInOps); } return 0; } @@ -2474,7 +2443,6 @@ connection_activity(Connection *conn) static void add_pb( Slapi_PBlock *pb) { - struct Slapi_PBlock_q *new_pb=NULL; LDAPDebug( LDAP_DEBUG_TRACE, "add_pb \n", 0, 0, 0 ); @@ -2492,25 +2460,27 @@ add_pb( Slapi_PBlock *pb) last_pb->next_pb = new_pb; last_pb = new_pb; } + PR_AtomicIncrement( &pb_q_size ); /* increment q size */ + if ( pb_q_size > pb_q_size_max ) { + pb_q_size_max = pb_q_size; + } + PR_NotifyCondVar( pb_q_cv ); /* notify waiters in connection_wait_for_new_pb */ PR_Unlock( pb_q_lock ); } -/* get_pb(): will get a pb from the begining of the work queue, return NULL if - the queue is empty.*/ +/* get_pb(): will get a pb from the beginning of the work queue, return NULL if + the queue is empty. This should only be called from connection_wait_for_new_pb + with the pb_q_lock held */ static Slapi_PBlock * get_pb() { - struct Slapi_PBlock_q *tmp = NULL; Slapi_PBlock *pb; - LDAPDebug( LDAP_DEBUG_TRACE, "get_pb \n", 0, 0, 0 ); - PR_Lock( pb_q_lock ); + LDAPDebug0Args( LDAP_DEBUG_TRACE, "get_pb \n" ); if (first_pb == NULL) { - PR_Unlock( pb_q_lock ); - LDAPDebug( LDAP_DEBUG_ANY, "get_pb: the work queue is empty.\n", - 0, 0, 0 ); + LDAPDebug0Args( LDAP_DEBUG_TRACE, "get_pb: the work queue is empty.\n" ); return NULL; } @@ -2519,11 +2489,11 @@ get_pb() last_pb = NULL; } first_pb = tmp->next_pb; - PR_Unlock( pb_q_lock ); pb = tmp->pb; /* Free the memory used by the pb found. */ slapi_ch_free ((void **)&tmp); + PR_AtomicDecrement( &pb_q_size ); /* decrement q size */ return (pb); } @@ -2540,28 +2510,28 @@ void op_thread_cleanup() { #ifdef _WIN32 - int i; - PRIntervalTime interval; - int max_threads = config_get_threadnumber(); - interval = PR_SecondsToInterval(3); + int i; + PRIntervalTime interval; + int max_threads = config_get_threadnumber(); + interval = PR_SecondsToInterval(3); #endif - LDAPDebug( LDAP_DEBUG_ANY, - "slapd shutting down - signaling operation threads\n", 0, 0, 0); - - PR_Lock( op_thread_lock ); - op_shutdown = 1; - PR_NotifyAllCondVar ( op_thread_cv ); - PR_Unlock( op_thread_lock ); + LDAPDebug( LDAP_DEBUG_ANY, + "slapd shutting down - signaling operation threads\n", 0, 0, 0); + + PR_AtomicIncrement(&op_shutdown); + PR_Lock( pb_q_lock ); + PR_NotifyAllCondVar ( pb_q_cv ); /* tell any thread waiting in connection_wait_for_new_pb to shutdown */ + PR_Unlock( pb_q_lock ); #ifdef _WIN32 - LDAPDebug( LDAP_DEBUG_ANY, - "slapd shutting down - waiting for %d threads to terminate\n", - g_get_active_threadcnt(), 0, 0 ); - /* kill off each worker waiting on GetQueuedCompletionStatus */ - for ( i = 0; i < max_threads; ++ i ) - { - PostQueuedCompletionStatus( completion_port, 0, COMPKEY_DIE ,0); - } - /* don't sleep: there's no reason to do so here DS_Sleep(interval); */ /* sleep 3 seconds */ + LDAPDebug( LDAP_DEBUG_ANY, + "slapd shutting down - waiting for %d threads to terminate\n", + g_get_active_threadcnt(), 0, 0 ); + /* kill off each worker waiting on GetQueuedCompletionStatus */ + for ( i = 0; i < max_threads; ++ i ) + { + PostQueuedCompletionStatus( completion_port, 0, COMPKEY_DIE ,0); + } + /* don't sleep: there's no reason to do so here DS_Sleep(interval); */ /* sleep 3 seconds */ #endif }
0
59c1881e7324259c407619815222c2b25aea8b74
389ds/389-ds-base
Resolves: bug 476891 Bug Description: Replication: Server to Server Connection Error: SASL(-1): generic failure: All-whitespace username. Reviewed by: nkinder (Thanks!) Fix Description: My earlier fix for this bug broke GSSAPI - it would cause the username and authid to only be freed under certain conditions e.g. if the krb creds were still valid, the code would not free the username and authid, so they would be passed via SASL instead of the principal name. This fix just makes sure username and authid are always freed, under all circumstances. Platforms tested: RHEL5, Fedora 9 Flag Day: no Doc impact: no
commit 59c1881e7324259c407619815222c2b25aea8b74 Author: Rich Megginson <[email protected]> Date: Fri Dec 19 19:26:01 2008 +0000 Resolves: bug 476891 Bug Description: Replication: Server to Server Connection Error: SASL(-1): generic failure: All-whitespace username. Reviewed by: nkinder (Thanks!) Fix Description: My earlier fix for this bug broke GSSAPI - it would cause the username and authid to only be freed under certain conditions e.g. if the krb creds were still valid, the code would not free the username and authid, so they would be passed via SASL instead of the principal name. This fix just makes sure username and authid are always freed, under all circumstances. Platforms tested: RHEL5, Fedora 9 Flag Day: no Doc impact: no diff --git a/ldap/servers/slapd/util.c b/ldap/servers/slapd/util.c index 2b6ac7d62..b6aeb1a13 100644 --- a/ldap/servers/slapd/util.c +++ b/ldap/servers/slapd/util.c @@ -1946,11 +1946,11 @@ set_krb5_creds( cc_env_name); } +cleanup: /* use NULL as username and authid */ slapi_ch_free_string(&vals->username); slapi_ch_free_string(&vals->authid); -cleanup: krb5_free_unparsed_name(ctx, princ_name); if (kt) { /* NULL not allowed */ krb5_kt_close(ctx, kt);
0
8189e94b3f4f196a9be70a59b810f7c28bf5e04e
389ds/389-ds-base
add support for ldif files with changetype: add IPA config files use changetype: add to add new files. This was not supported by the DS ConfigFile directive or other LDIF processing. Now, if we see that the entry has changetype: add, we process it as if it had no changetype at all, which is to add the entry. Remove the changetype: from the entry before adding. Reviewed by: nkinder (Thanks!)
commit 8189e94b3f4f196a9be70a59b810f7c28bf5e04e Author: Rich Megginson <[email protected]> Date: Mon Jun 27 10:50:00 2011 -0600 add support for ldif files with changetype: add IPA config files use changetype: add to add new files. This was not supported by the DS ConfigFile directive or other LDIF processing. Now, if we see that the entry has changetype: add, we process it as if it had no changetype at all, which is to add the entry. Remove the changetype: from the entry before adding. Reviewed by: nkinder (Thanks!) diff --git a/ldap/admin/src/scripts/DSUtil.pm.in b/ldap/admin/src/scripts/DSUtil.pm.in index d330d0ba5..16f46b6f9 100644 --- a/ldap/admin/src/scripts/DSUtil.pm.in +++ b/ldap/admin/src/scripts/DSUtil.pm.in @@ -306,7 +306,7 @@ sub check_and_add_entry } else { debug(3, "check_and_add_entry: Entry not found " . $aentry->{dn} . " error " . $conn->getErrorString() . "\n"); - if (@ctypes) { # uh oh - attempt to del/mod an entry that doesn't exist + if (@ctypes and !("add" eq lc($ctypes[0]))) { # uh oh - attempt to del/mod an entry that doesn't exist debug(3, "check_and_add_entry: attepting to @ctypes the entry " . $aentry->{dn} . " that does not exist\n"); return 1; # ignore - return success @@ -323,7 +323,7 @@ sub check_and_add_entry my $OP_DEL = 3; # $op stores either of the above $OP_ values my $op = $OP_NONE; - if ( 0 > $#ctypes ) # aentry: complete entry + if ( (0 > $#ctypes) or ("add" eq lc($ctypes[0])) ) # aentry: complete entry { $op = $OP_ADD; # just add the entry } @@ -351,6 +351,10 @@ sub check_and_add_entry if ( $OP_ADD == $op ) { + if ("add" eq lc($ctypes[0])) { + # remove the changetype: add from the entry + $aentry->remove('changetype'); + } $conn->add($aentry); my $rc = $conn->getErrorCode(); if ( $rc != 0 )
0
6ca3fb977ac78d77f4e09d774f88b1f1196e5631
389ds/389-ds-base
Issue 4770 - Lower FIPS logging severity Description: If FIPS is not available on a system we log errors messages with the severity level of ERR, but it's not really an error so it should be changed to NOTICE. relates: https://github.com/389ds/389-ds-base/issues/4770 Reviewed by: mreynolds (one line commit rule)
commit 6ca3fb977ac78d77f4e09d774f88b1f1196e5631 Author: Mark Reynolds <[email protected]> Date: Mon May 17 09:21:49 2021 -0400 Issue 4770 - Lower FIPS logging severity Description: If FIPS is not available on a system we log errors messages with the severity level of ERR, but it's not really an error so it should be changed to NOTICE. relates: https://github.com/389ds/389-ds-base/issues/4770 Reviewed by: mreynolds (one line commit rule) diff --git a/ldap/servers/slapd/security_wrappers.c b/ldap/servers/slapd/security_wrappers.c index 27cdf2f1a..d978f7c33 100644 --- a/ldap/servers/slapd/security_wrappers.c +++ b/ldap/servers/slapd/security_wrappers.c @@ -239,15 +239,15 @@ slapd_system_isFIPS(void) char buf[sizeof (PRIu64)]; int val; if (PR_SUCCESS != PR_Access(FIPS_ENABLED, PR_ACCESS_READ_OK)) { - slapi_log_err(SLAPI_LOG_ERR, "slapd_system_isFIPS", "Can not access %s - assuming FIPS is OFF\n", FIPS_ENABLED); + slapi_log_err(SLAPI_LOG_NOTICE, "slapd_system_isFIPS", "Can not access %s - assuming FIPS is OFF\n", FIPS_ENABLED); goto done; } if ((prfd = PR_Open(FIPS_ENABLED, PR_RDONLY, SLAPD_DEFAULT_FILE_MODE)) == NULL) { - slapi_log_err(SLAPI_LOG_ERR, "slapd_system_isFIPS", "Can not open %s - assuming FIPS is OFF\n", FIPS_ENABLED); + slapi_log_err(SLAPI_LOG_NOTICE, "slapd_system_isFIPS", "Can not open %s - assuming FIPS is OFF\n", FIPS_ENABLED); goto done; } if (PR_Read(prfd, buf, sizeof (buf)) < 0) { - slapi_log_err(SLAPI_LOG_ERR, "slapd_system_isFIPS", "Can not read %s - assuming FIPS is OFF\n", FIPS_ENABLED); + slapi_log_err(SLAPI_LOG_NOTICE, "slapd_system_isFIPS", "Can not read %s - assuming FIPS is OFF\n", FIPS_ENABLED); PR_Close(prfd); goto done; }
0
465e1059493a8dacf3caf7776824e8149e878b93
389ds/389-ds-base
Ticket 48154 - abort cleanAllRUV tasks should not certify-all by default Bug Description: The current default for an abort task is to certify that all the replicas receive and process the abort task. The problem is that if we are trying to abort a cleanallruv task because it is "hanging" due to some replicas not being reachable, the abort task will also "hang". Fix Description: The default for certify-all should be "no". https://fedorahosted.org/389/ticket/48154 Reviewed by: nhosoi(Thanks!)
commit 465e1059493a8dacf3caf7776824e8149e878b93 Author: Mark Reynolds <[email protected]> Date: Tue Apr 21 12:23:47 2015 -0400 Ticket 48154 - abort cleanAllRUV tasks should not certify-all by default Bug Description: The current default for an abort task is to certify that all the replicas receive and process the abort task. The problem is that if we are trying to abort a cleanallruv task because it is "hanging" due to some replicas not being reachable, the abort task will also "hang". Fix Description: The default for certify-all should be "no". https://fedorahosted.org/389/ticket/48154 Reviewed by: nhosoi(Thanks!) diff --git a/ldap/servers/plugins/replication/repl5_replica_config.c b/ldap/servers/plugins/replication/repl5_replica_config.c index 39783b2ed..0be97c2cd 100644 --- a/ldap/servers/plugins/replication/repl5_replica_config.c +++ b/ldap/servers/plugins/replication/repl5_replica_config.c @@ -2905,7 +2905,13 @@ replica_cleanall_ruv_abort(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eAfter goto out; } } else { - certify_all = "yes"; + /* + * The default should be not to certify all the replicas, because + * we might be trying to abort a clean task that is "hanging" due + * to unreachable replicas. If the default is "yes" then the abort + * task will run into the same issue. + */ + certify_all = "no"; } /* * Create payload
0
eac244055c32d18d15e0b2a923aebcb8587acdb0
389ds/389-ds-base
Issue 91 - Fix replication topology Bug Description: When you use m1h1c1 topologies the replica type is incorrectly set for consumers(maybe the others too), and it adds repl agreements to consumers. Also there is circular repl initialization going on which causes everybody to have different db generation IDs which then breaks replication. Fix Description: Refactor create_topology(), make it more readable and depricate cascading replication scenarion. Let's create it when we need it. Fix an issue with creating agreements on consumers. Fix an issue with agreements initialization. Remade topology_m1h1c1 so it wouldn't use create_topology() func. Fix small replica.py issues. Reviewed by: wibrown, mreynolds (Thanks!) https://pagure.io/lib389/issue/91
commit eac244055c32d18d15e0b2a923aebcb8587acdb0 Author: Simon Pichugin <[email protected]> Date: Fri Aug 11 23:43:06 2017 +0200 Issue 91 - Fix replication topology Bug Description: When you use m1h1c1 topologies the replica type is incorrectly set for consumers(maybe the others too), and it adds repl agreements to consumers. Also there is circular repl initialization going on which causes everybody to have different db generation IDs which then breaks replication. Fix Description: Refactor create_topology(), make it more readable and depricate cascading replication scenarion. Let's create it when we need it. Fix an issue with creating agreements on consumers. Fix an issue with agreements initialization. Remade topology_m1h1c1 so it wouldn't use create_topology() func. Fix small replica.py issues. Reviewed by: wibrown, mreynolds (Thanks!) https://pagure.io/lib389/issue/91 diff --git a/src/lib389/lib389/replica.py b/src/lib389/lib389/replica.py index 9251ecd71..a0283962d 100644 --- a/src/lib389/lib389/replica.py +++ b/src/lib389/lib389/replica.py @@ -1055,7 +1055,7 @@ class Replica(DSLdapObject): entry = self._instance.getEntry( agmtdn, ldap.SCOPE_BASE, "(objectclass=*)", attrlist) except NoSuchEntryError: - self.log.exception("Error reading status from agreement {}".format(agmtdn)) + self._log.exception("Error reading status from agreement {}".format(agmtdn)) hasError = 1 else: refresh = entry.nsds5BeginReplicaRefresh @@ -1121,7 +1121,7 @@ class Replica(DSLdapObject): THIS SHOULD BE IN THE NEW AGREEMENT CLASS """ - self.log.info("Starting async replication %s" % agmtdn) + self._log.info("Starting async replication %s" % agmtdn) mod = [(ldap.MOD_ADD, 'nsds5BeginReplicaRefresh', 'start')] self._instance.modify_s(agmtdn, mod) diff --git a/src/lib389/lib389/topologies.py b/src/lib389/lib389/topologies.py index 225e1388a..1ffc09e2f 100644 --- a/src/lib389/lib389/topologies.py +++ b/src/lib389/lib389/topologies.py @@ -27,22 +27,25 @@ log = logging.getLogger(__name__) def create_topology(topo_dict): - """Create a requested topology + """Create a requested topology. Cascading replication scenario isn't supported - @param topo_dict - dictionary {REPLICAROLE: number_of_insts} - @return - dictionary {serverid: topology_instance} + @param topo_dict - dictionary {ReplicaRole.STANDALONE: num, ReplicaRole.MASTER: num, + ReplicaRole.CONSUMER: num} + @return - TopologyMain object """ if not topo_dict: ValueError("You need to specify the dict. For instance: {ReplicaRole.STANDALONE: 1}") + if ReplicaRole.HUB in topo_dict.keys(): + NotImplementedError("Cascading replication scenario isn't supported." + "Please, use existing topology or create your own.") + instances = {} ms = {} - hs = {} cs = {} ins = {} replica_dict = {} - agmts = {} # Create instances for role in topo_dict.keys(): @@ -73,48 +76,48 @@ def create_topology(topo_dict): if role == ReplicaRole.MASTER: ms[instance.serverid] = instance instances.update(ms) - if role == ReplicaRole.HUB: - hs[instance.serverid] = instance - instances.update(hs) if role == ReplicaRole.CONSUMER: cs[instance.serverid] = instance instances.update(cs) log.info("Instance with parameters {} was created.".format(args_copied)) # Set up replication - if role in (ReplicaRole.MASTER, ReplicaRole.HUB, ReplicaRole.CONSUMER): + if role in (ReplicaRole.MASTER, ReplicaRole.CONSUMER): replicas = Replicas(instance) replica = replicas.enable(DEFAULT_SUFFIX, role, instance_data[REPLICA_ID]) replica_dict[replica] = instance - # Create agreements for role_from in topo_dict.keys(): + # Do not create agreements on consumer + if role_from == ReplicaRole.CONSUMER: + continue + + # Create agreements: master -> masters, consumers for inst_num_from in range(1, topo_dict[role]+1): - roles_to = [ReplicaRole.HUB, ReplicaRole.CONSUMER] - if role == ReplicaRole.MASTER: - roles_to.append(ReplicaRole.MASTER) + roles_to = [ReplicaRole.MASTER, ReplicaRole.CONSUMER] for role_to in [role for role in topo_dict if role in roles_to]: for inst_num_to in range(1, topo_dict[role]+1): - # Exclude our instance + # Exclude the instance we created it from if role_from != role_to or inst_num_from != inst_num_to: inst_from_id = "{}{}".format(role_from.name.lower(), inst_num_from) inst_to_id = "{}{}".format(role_to.name.lower(), inst_num_to) inst_from = instances[inst_from_id] inst_to = instances[inst_to_id] - agmt = inst_from.agreement.create(suffix=DEFAULT_SUFFIX, - host=inst_to.host, - port=inst_to.port) - agmts[agmt] = (inst_from, inst_to) + inst_from.agreement.create(suffix=DEFAULT_SUFFIX, + host=inst_to.host, + port=inst_to.port) # Allow the replicas to get situated with the new agreements - if agmts: + if replica_dict: time.sleep(5) - # Initialize all the agreements - for agmt, insts in agmts.items(): - insts[0].agreement.init(DEFAULT_SUFFIX, insts[1].host, insts[1].port) - insts[0].waitForReplInit(agmt) + # Initialize all agreements of one master (consumers) + for replica_from, inst_from in replica_dict.items(): + if replica_from.get_role() == ReplicaRole.MASTER: + agmts = inst_from.agreement.list(DEFAULT_SUFFIX) + map(lambda agmt: replica_from.start_and_wait(agmt.dn), agmts) + break # Clear out the tmp dir for instance in instances.values(): @@ -123,7 +126,7 @@ def create_topology(topo_dict): if "standalone1" in instances and len(instances) == 1: return TopologyMain(standalones=instances["standalone1"]) else: - return TopologyMain(standalones=ins, masters=ms, hubs=hs, consumers=cs) + return TopologyMain(standalones=ins, masters=ms, consumers=cs) class TopologyMain(object): @@ -227,26 +230,6 @@ def topology_m1c1(request): return topology [email protected](scope="module") -def topology_m1h1c1(request): - """Create Replication Deployment with one master, one consumer and one hub""" - - topology = create_topology({ReplicaRole.MASTER: 1, - ReplicaRole.HUB: 1, - ReplicaRole.CONSUMER: 1}) - replicas = Replicas(topology.ms["master1"]) - replicas.test(DEFAULT_SUFFIX, topology.cs["consumer1"]) - - def fin(): - if DEBUGGING: - map(lambda inst: inst.stop(), topology.all_insts.values()) - else: - map(lambda inst: inst.delete(), topology.all_insts.values()) - request.addfinalizer(fin) - - return topology - - @pytest.fixture(scope="module") def topology_m2(request): """Create Replication Deployment with two masters""" @@ -318,3 +301,82 @@ def topology_m2c2(request): request.addfinalizer(fin) return topology + + [email protected](scope="module") +def topology_m1h1c1(request): + """Create Replication Deployment with one master, one consumer and one hub""" + + roles = (ReplicaRole.MASTER, ReplicaRole.HUB, ReplicaRole.CONSUMER) + instances = [] + replica_dict = {} + + # Create instances + for role in roles: + instance_data = generate_ds_params(1, role) + if DEBUGGING: + instance = DirSrv(verbose=True) + else: + instance = DirSrv(verbose=False) + args_instance[SER_HOST] = instance_data[SER_HOST] + args_instance[SER_PORT] = instance_data[SER_PORT] + args_instance[SER_SERVERID_PROP] = instance_data[SER_SERVERID_PROP] + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_copied = args_instance.copy() + instance.allocate(args_copied) + instance_exists = instance.exists() + if instance_exists: + instance.delete() + instance.create() + instance.open() + log.info("Instance with parameters {} was created.".format(args_copied)) + + # Set up replication + replicas = Replicas(instance) + replica = replicas.enable(DEFAULT_SUFFIX, role, instance_data[REPLICA_ID]) + + if role == ReplicaRole.MASTER: + master = instance + replica_master = replica + instances.append(master) + if role == ReplicaRole.HUB: + hub = instance + replica_hub = replica + instances.append(hub) + if role == ReplicaRole.CONSUMER: + consumer = instance + instances.append(consumer) + + # Create all the agreements + # Creating agreement from master to hub + master.agreement.create(suffix=DEFAULT_SUFFIX, host=hub.host, port=hub.port) + + # Creating agreement from hub to consumer + hub.agreement.create(suffix=DEFAULT_SUFFIX, host=consumer.host, port=consumer.port) + + # Allow the replicas to get situated with the new agreements... + time.sleep(5) + + # Initialize all the agreements + agmt = master.agreement.list(DEFAULT_SUFFIX)[0].dn + replica_master.start_and_wait(agmt) + + agmt = hub.agreement.list(DEFAULT_SUFFIX)[0].dn + replica_hub.start_and_wait(agmt) + + # Check replication is working... + replicas = Replicas(master) + replicas.test(DEFAULT_SUFFIX, consumer) + + # Clear out the tmp dir + master.clearTmpDir(__file__) + + def fin(): + if DEBUGGING: + map(lambda inst: inst.stop(), instances) + else: + map(lambda inst: inst.delete(), instances) + request.addfinalizer(fin) + + return TopologyMain(masters={"master1": master}, hubs={"hub1": hub}, consumers={"consumer1": consumer}) +
0
b8c90a5a757099f466326c1ffc68cb89c74c0b14
389ds/389-ds-base
Issue 3604 - Create a private key/CSR with dsconf/Cockpit (#5584) RFE description: There's no way to create a private key and a CSR with dsconf/Cockpit. However, features for importing a certificate exists, but DS also requires the private key in the NSS database to use the certificate Fix Description: Modify dsconf/UI to allow creation of a CSR. relates: https://github.com/389ds/389-ds-base/issues/3604 Reviewed by: @mreynolds389, @droideck (Thank you)
commit b8c90a5a757099f466326c1ffc68cb89c74c0b14 Author: James Chapman <[email protected]> Date: Tue Jan 17 11:42:56 2023 +0000 Issue 3604 - Create a private key/CSR with dsconf/Cockpit (#5584) RFE description: There's no way to create a private key and a CSR with dsconf/Cockpit. However, features for importing a certificate exists, but DS also requires the private key in the NSS database to use the certificate Fix Description: Modify dsconf/UI to allow creation of a CSR. relates: https://github.com/389ds/389-ds-base/issues/3604 Reviewed by: @mreynolds389, @droideck (Thank you) diff --git a/src/cockpit/389-console/src/lib/security/certificateManagement.jsx b/src/cockpit/389-console/src/lib/security/certificateManagement.jsx index aff70fea0..3bfbb3674 100644 --- a/src/cockpit/389-console/src/lib/security/certificateManagement.jsx +++ b/src/cockpit/389-console/src/lib/security/certificateManagement.jsx @@ -12,12 +12,16 @@ import { } from "@patternfly/react-core"; import { DoubleConfirmModal } from "../../lib/notifications.jsx"; import { - CertTable + CertTable, + CSRTable, + KeyTable, } from "./securityTables.jsx"; import { EditCertModal, SecurityAddCertModal, SecurityAddCACertModal, + SecurityAddCSRModal, + SecurityViewCSRModal, } from "./securityModals.jsx"; import PropTypes from "prop-types"; import { log_cmd } from "../../lib/tools.jsx"; @@ -29,13 +33,30 @@ export class CertificateManagement extends React.Component { activeTabKey: 0, ServerCerts: this.props.ServerCerts, CACerts: this.props.CACerts, + ServerCSRs: this.props.ServerCSRs, + ServerKeys: this.props.ServerKeys, tableKey: 0, showEditModal: false, showAddModal: false, + showAddCSRModal: false, + showViewCSRModal: false, modalSpinning: false, showConfirmDelete: false, + showCSRConfirmDelete: false, + showKeyConfirmDelete: false, certName: "", certFile: "", + csrContent: "", + csrName: "", + csrSubject: "", + csrSubjectCommonName: "", + csrSubjectOrg: "", + csrSubjectOrgUnit: "", + csrSubjectLocality: "", + csrSubjectState: "", + csrSubjectCountry: "", + csrSubjectEmail: "", + keyID: "", flags: "", _flags: "", errObj: {}, @@ -60,11 +81,16 @@ export class CertificateManagement extends React.Component { this.addCACert = this.addCACert.bind(this); this.handleChange = this.handleChange.bind(this); + this.handleCSRChange = this.handleCSRChange.bind(this); this.addCert = this.addCert.bind(this); this.showAddModal = this.showAddModal.bind(this); this.closeAddModal = this.closeAddModal.bind(this); this.showAddCAModal = this.showAddCAModal.bind(this); this.closeAddCAModal = this.closeAddCAModal.bind(this); + this.showAddCSRModal = this.showAddCSRModal.bind(this); + this.closeAddCSRModal = this.closeAddCSRModal.bind(this); + this.showViewCSRModal = this.showViewCSRModal.bind(this); + this.closeViewCSRModal = this.closeViewCSRModal.bind(this); this.showEditModal = this.showEditModal.bind(this); this.closeEditModal = this.closeEditModal.bind(this); this.showEditCAModal = this.showEditCAModal.bind(this); @@ -73,10 +99,21 @@ export class CertificateManagement extends React.Component { this.doEditCert = this.doEditCert.bind(this); this.closeConfirmCAChange = this.closeConfirmCAChange.bind(this); this.showDeleteConfirm = this.showDeleteConfirm.bind(this); + this.showCSRDeleteConfirm = this.showCSRDeleteConfirm.bind(this); + this.showKeyDeleteConfirm = this.showKeyDeleteConfirm.bind(this); this.delCert = this.delCert.bind(this); + this.addCSR = this.addCSR.bind(this); + this.delCSR = this.delCSR.bind(this); + this.showCSR = this.showCSR.bind(this); + this.delKey = this.delKey.bind(this); this.closeConfirmDelete = this.closeConfirmDelete.bind(this); + this.closeCSRConfirmDelete = this.closeCSRConfirmDelete.bind(this); + this.closeKeyConfirmDelete = this.closeKeyConfirmDelete.bind(this); this.reloadCerts = this.reloadCerts.bind(this); this.reloadCACerts = this.reloadCACerts.bind(this); + this.reloadCSRs = this.reloadCSRs.bind(this); + this.reloadOrphanKeys = this.reloadOrphanKeys.bind(this); + this.buildSubject = this.buildSubject.bind(this); } showAddModal () { @@ -109,6 +146,52 @@ export class CertificateManagement extends React.Component { }); } + showAddCSRModal () { + this.setState({ + showAddCSRModal: true, + csrSubject: "", + csrName: "", + csrSubjectCommonName: "", + csrSubjectOrg: "", + csrSubjectOrgUnit: "", + csrSubjectLocality: "", + csrSubjectState: "", + csrSubjectCountry: "", + csrSubjectEmail: "", + errObj: { csrName: true, csrSubjectCommonName: true}, + }); + } + + closeAddCSRModal () { + this.setState({ + showAddCSRModal: false, + csrSubject: "", + csrName: "", + csrSubjectCommonName: "", + csrSubjectOrg: "", + csrSubjectOrgUnit: "", + csrSubjectLocality: "", + csrSubjectState: "", + csrSubjectCountry: "", + csrSubjectEmail: "", + }); + } + + showViewCSRModal (name) { + this.showCSR(name) + this.setState({ + showViewCSRModal: true, + csrName: name, + errObj: { csrName: true}, + }); + } + + closeViewCSRModal () { + this.setState({ + showViewCSRModal: false, + }); + } + addCert () { if (this.state.certName == "") { this.props.addNotification( @@ -143,6 +226,7 @@ export class CertificateManagement extends React.Component { certName: '', modalSpinning: false }); + this.reloadOrphanKeys(); this.props.addNotification( "success", `Successfully added certificate` @@ -165,6 +249,98 @@ export class CertificateManagement extends React.Component { }); } + addCSR () { + if (this.state.csrName == "") { + this.props.addNotification( + "warning", + `Missing CSR Name` + ); + return; + } else if (this.state.csrSubjectCommonName == "") { + this.props.addNotification( + "warning", + `Missing CSR Subject Common Name` + ); + return; + } + + this.setState({ + modalSpinning: true, + loading: true, + }); + const cmd = [ + "dsconf", "-j", "ldapi://%2fvar%2frun%2fslapd-" + this.props.serverId + ".socket", + "security", "csr", "req", "--name=" + this.state.csrName, "--subject=" + this.state.csrSubject + ]; + log_cmd("addCSR", "Creating CSR", cmd); + cockpit + .spawn(cmd, { superuser: true, err: "message" }) + .done(() => { + this.reloadCSRs(); + this.setState({ + showAddCSRModal: false, + csrSubject: '', + csrName: '', + modalSpinning: false, + }); + this.props.addNotification( + "success", + `Successfully created CSR` + ); + }) + .fail(err => { + const errMsg = JSON.parse(err); + if (errMsg.desc.includes('certutil -s: improperly formatted name:')) { + this.props.addNotification( + "error", + `Error Improperly formatted subject` + ); + } else { + this.props.addNotification( + "error", + `Error creating CSR - ${errMsg.desc}` + ); + } + this.setState({ + modalSpinning: false, + loading: false, + }); + + }); + } + + showCSR (name) { + if (name == "") { + this.props.addNotification( + "warning", + `Missing CSR Name` + ); + return; + } + + const cmd = [ + "dsconf", "-j", "ldapi://%2fvar%2frun%2fslapd-" + this.props.serverId + ".socket", + "security", "csr", "get", name + ]; + + log_cmd("showCSR", "Displaying CSR", cmd); + cockpit + .spawn(cmd, { superuser: true, err: "message"}) + .done(content => { + this.setState({ + csrContent: content, + showViewCSRModal: true, + }); + }) + .fail(err => { + const errMsg = JSON.parse(err); + this.props.addNotification( + "error", + `Error displaying CSR - ${errMsg.desc}` + ); + }); + } + addCACert () { if (this.state.certName == "") { this.props.addNotification( @@ -230,6 +406,24 @@ export class CertificateManagement extends React.Component { }); } + showCSRDeleteConfirm(name) { + this.setState({ + showCSRConfirmDelete: true, + csrName: name, + modalSpinning: false, + modalChecked: false, + }); + } + + showKeyDeleteConfirm(key_id) { + this.setState({ + showKeyConfirmDelete: true, + keyID: key_id, + modalSpinning: false, + modalChecked: false, + }); + } + delCert () { this.setState({ modalSpinning: true, @@ -272,6 +466,92 @@ export class CertificateManagement extends React.Component { }); } + delCSR (name) { + this.setState({ + modalSpinning: true, + loading: true + }); + const cmd = [ + "dsconf", "-j", "ldapi://%2fvar%2frun%2fslapd-" + this.props.serverId + ".socket", + "security", "csr", "del", this.state.csrName + ]; + log_cmd("delCSR", "Deleting CSR", cmd); + cockpit + .spawn(cmd, { superuser: true, err: "message" }) + .done(() => { + this.reloadCSRs(); + this.setState({ + csrName: '', + csrSubject: '', + modalSpinning: false, + showCSRConfirmDelete: false, + }); + this.props.addNotification( + "success", + `Successfully deleted CSR` + ); + }) + .fail(err => { + const errMsg = JSON.parse(err); + let msg = errMsg.desc; + if ('info' in errMsg) { + msg = errMsg.desc + " - " + errMsg.info; + } + this.setState({ + csrName: '', + csrSubject: '', + modalSpinning: false, + loading: false, + }); + this.props.addNotification( + "error", + `Error deleting CSR - ${msg}` + ); + }); + } + + delKey (name) { + this.setState({ + modalSpinning: true, + loading: true + }); + const cmd = [ + "dsconf", "-j", "ldapi://%2fvar%2frun%2fslapd-" + this.props.serverId + ".socket", + "security", "key", "del", this.state.keyID + ]; + log_cmd("delKey", "Deleting key", cmd); + cockpit + .spawn(cmd, { superuser: true, err: "message" }) + .done(() => { + this.reloadOrphanKeys(); + this.setState({ + keyID: '', + modalSpinning: false, + showKeyConfirmDelete: false, + }); + this.props.addNotification( + "success", + `Successfully deleted key` + ); + }) + .fail(err => { + const errMsg = JSON.parse(err); + let msg = errMsg.desc; + if ('info' in errMsg) { + msg = errMsg.desc + " - " + errMsg.info; + } + this.setState({ + keyID: '', + modalSpinning: false, + loading: false, + }); + this.props.addNotification( + "error", + `Error deleting key - ${msg}` + ); + }); + } + showEditModal (name, flags) { this.setState({ showEditModal: true, @@ -384,6 +664,61 @@ export class CertificateManagement extends React.Component { }); } + handleCSRChange (e) { + const value = e.target.value; + const errObj = this.state.errObj; + let valueErr = false; + + if (value == "") { + valueErr = true; + } + errObj[e.target.id] = valueErr; + this.setState({ + [e.target.id]: value, + errObj: errObj + }, this.buildSubject); + } + + buildSubject () { + let subject = "" + const csrSubjectCN = this.state.csrSubjectCommonName; + const csrSubjectO = this.state.csrSubjectOrg; + const csrSubjectOU = this.state.csrSubjectOrgUnit; + const csrSubjectL = this.state.csrSubjectLocality; + const csrSubjectST = this.state.csrSubjectState; + const csrSubjectC = this.state.csrSubjectCountry; + const csrSubjectE = this.state.csrSubjectEmail; + + // Construct CSR subject string from state fields + if (csrSubjectCN.length != 0) { + subject = 'CN=' + csrSubjectCN; + } + if (csrSubjectO.length != 0) { + subject += ',O=' + csrSubjectO; + } + if (csrSubjectOU.length != 0) { + subject += ',OU=' + csrSubjectOU; + } + if (csrSubjectL.length != 0) { + subject += ',L=' + csrSubjectL; + } + if (csrSubjectST.length != 0) { + subject += ',ST=' + csrSubjectST; + } + // It would be nice to validate country code, certutil will complain if it isnt valid... + if (csrSubjectC.length != 0) { + subject += ',C=' + csrSubjectC; + } + if (csrSubjectE.length != 0) { + subject += ',E=' + csrSubjectE; + } + + // Update subject state + this.setState({ + csrSubject: subject + }); + } + handleFlagChange (e) { const checked = e.target.checked; const id = e.target.id; @@ -432,7 +767,7 @@ export class CertificateManagement extends React.Component { } newFlags = SSLFlags + "," + EmailFlags + "," + OSFlags; - console.log("MARK flags: ", newFlags, this.state._flags); + if (newFlags != this.state._flags) { disableSaveBtn = false; } @@ -450,6 +785,22 @@ export class CertificateManagement extends React.Component { }); } + closeCSRConfirmDelete () { + this.setState({ + showCSRConfirmDelete: false, + modalSpinning: false, + modalChecked: false, + }); + } + + closeKeyConfirmDelete () { + this.setState({ + showKeyConfirmDelete: false, + modalSpinning: false, + modalChecked: false, + }); + } + reloadCerts () { const cmd = [ "dsconf", "-j", "ldapi://%2fvar%2frun%2fslapd-" + this.props.serverId + ".socket", @@ -485,6 +836,72 @@ export class CertificateManagement extends React.Component { }); } + reloadCSRs () { + const cmd = [ + "dsconf", "-j", "ldapi://%2fvar%2frun%2fslapd-" + this.props.serverId + ".socket", + "security", "csr", "list", + ]; + log_cmd("reloadCSRs", "Reload CSRs", cmd); + cockpit + .spawn(cmd, { superuser: true, err: "message" }) + .done(content => { + const csrs = JSON.parse(content); + const key = this.state.tableKey + 1; + this.setState({ + ServerCSRs: csrs, + loading: false, + tableKey: key, + showConfirmCSRChange: false + }), this.reloadOrphanKeys(); + }) + .fail(err => { + const errMsg = JSON.parse(err); + let msg = errMsg.desc; + if ('info' in errMsg) { + msg = errMsg.desc + " - " + errMsg.info; + } + this.props.addNotification( + "error", + `Error loading CSRs - ${msg}` + ); + }); + } + + reloadOrphanKeys () { + // Set loaded: true + const cmd = [ + "dsconf", "-j", "ldapi://%2fvar%2frun%2fslapd-" + this.props.serverId + ".socket", + "security", "key", "list", "--orphan" + ]; + log_cmd("reloadOrphanKeys", "Reload Orphan Keys", cmd); + cockpit + .spawn(cmd, { superuser: true, err: "message" }) + .done(content => { + const keys = JSON.parse(content); + const key = this.state.tableKey + 1; + this.setState(() => ( + { + ServerKeys: keys, + loading: false, + tableKey: key, + }) + ); + }) + .fail(err => { + const errMsg = JSON.parse(err); + if (!errMsg.desc.includes('certutil: no keys found')) { + this.props.addNotification( + "error", + `Error loading Orphan Keys - ${errMsg.desc}` + ); + } + this.setState({ + loading: false, + ServerKeys: [] + }); + }); + } + reloadCACerts () { const cmd = [ "dsconf", "-j", "ldapi://%2fvar%2frun%2fslapd-" + this.props.serverId + ".socket", @@ -566,6 +983,34 @@ export class CertificateManagement extends React.Component { </Button> </div> </Tab> + <Tab eventKey={2} title={<TabTitleText>Certificate Sigining Requests <font size="2">({this.state.ServerCSRs.length})</font></TabTitleText>}> + <div className="ds-margin-top-lg ds-left-indent"> + <CSRTable + ServerCSRs={this.state.ServerCSRs} + key={this.state.tableKey} + delCSR={this.showCSRDeleteConfirm} + viewCSR={this.showViewCSRModal} + /> + <Button + variant="primary" + className="ds-margin-top-med" + onClick={() => { + this.showAddCSRModal(); + }} + > + Create Certificate Sigining Request + </Button> + </div> + </Tab> + <Tab eventKey={3} title={<TabTitleText> Orphan Keys <font size="2">({this.state.ServerKeys.length})</font></TabTitleText>}> + <div className="ds-margin-top-lg ds-left-indent"> + <KeyTable + ServerKeys={this.state.ServerKeys} + key={this.state.tableKey} + delKey={this.showKeyDeleteConfirm} + /> + </div> + </Tab> </Tabs>; } return ( @@ -596,6 +1041,22 @@ export class CertificateManagement extends React.Component { spinning={this.state.modalSpinning} error={this.state.errObj} /> + <SecurityAddCSRModal + showModal={this.state.showAddCSRModal} + closeHandler={this.closeAddCSRModal} + handleChange={this.handleCSRChange} + saveHandler={this.addCSR} + previewValue={this.state.csrSubject} + spinning={this.state.modalSpinning} + error={this.state.errObj} + /> + <SecurityViewCSRModal + showModal={this.state.showViewCSRModal} + closeHandler={this.closeViewCSRModal} + name={this.state.csrName} + item={this.state.csrContent} + error={this.state.errObj} + /> <DoubleConfirmModal showModal={this.state.showConfirmDelete} closeHandler={this.closeConfirmDelete} @@ -609,6 +1070,32 @@ export class CertificateManagement extends React.Component { mSpinningMsg="Deleting Certificate ..." mBtnName="Delete Certificate" /> + <DoubleConfirmModal + showModal={this.state.showCSRConfirmDelete} + closeHandler={this.closeCSRConfirmDelete} + handleChange={this.handleChange} + actionHandler={this.delCSR} + spinning={this.state.modalSpinning} + item={this.state.csrName} + checked={this.state.modalChecked} + mTitle="Delete CSR" + mMsg="Are you sure you want to delete this CSR?" + mSpinningMsg="Deleting CSR ..." + mBtnName="Delete CSR" + /> + <DoubleConfirmModal + showModal={this.state.showKeyConfirmDelete} + closeHandler={this.closeKeyConfirmDelete} + handleChange={this.handleChange} + actionHandler={this.delKey} + spinning={this.state.modalSpinning} + item={this.state.keyID} + checked={this.state.modalChecked} + mTitle="Delete Key" + mMsg="Are you sure you want to delete this Key?" + mSpinningMsg="Deleting Key ..." + mBtnName="Delete Key" + /> <DoubleConfirmModal showModal={this.state.showConfirmCAChange} closeHandler={this.closeConfirmCAChange} @@ -633,6 +1120,8 @@ CertificateManagement.propTypes = { serverId: PropTypes.string, CACerts: PropTypes.array, ServerCerts: PropTypes.array, + ServerCSRs: PropTypes.array, + ServerKeys: PropTypes.array, addNotification: PropTypes.func, }; @@ -640,6 +1129,8 @@ CertificateManagement.defaultProps = { serverId: "", CACerts: [], ServerCerts: [], + ServerCSRs: [], + ServerKeys: [], }; export default CertificateManagement; diff --git a/src/cockpit/389-console/src/lib/security/securityModals.jsx b/src/cockpit/389-console/src/lib/security/securityModals.jsx index 36dc84155..79cc93489 100644 --- a/src/cockpit/389-console/src/lib/security/securityModals.jsx +++ b/src/cockpit/389-console/src/lib/security/securityModals.jsx @@ -2,6 +2,7 @@ import React from "react"; import { Button, Checkbox, + ClipboardCopy, ClipboardCopyVariant, Card, CardBody, CardFooter, CardTitle, Form, FormSelect, FormSelectOption, @@ -200,6 +201,261 @@ export class SecurityAddCertModal extends React.Component { } } +export class SecurityAddCSRModal extends React.Component { + render() { + const { + showModal, + closeHandler, + handleChange, + saveHandler, + previewValue, + spinning, + error + } = this.props; + + let saveBtnName = "Create Certificate Signing Request"; + const extraPrimaryProps = {}; + if (spinning) { + saveBtnName = "Creating Certificate Signing Request ..."; + extraPrimaryProps.spinnerAriaValueText = "Saving"; + } + + return ( + <Modal + variant={ModalVariant.medium} + aria-labelledby="ds-modal" + title="Create Certificate Signing Request" + isOpen={showModal} + onClose={closeHandler} + actions={[ + <Button + key="confirm" + variant="primary" + onClick={saveHandler} + isLoading={spinning} + spinnerAriaValueText={spinning ? "Saving" : undefined} + {...extraPrimaryProps} + isDisabled={error.csrName || error.csrSubjectCommonName || spinning} + > + {saveBtnName} + </Button>, + <Button key="cancel" variant="link" onClick={closeHandler}> + Cancel + </Button> + ]} + > + <Form className="ds-margin-top" isHorizontal autoComplete="off"> + <Grid title="CSR Name"> + <GridItem className="ds-label" span={3}> + Name + </GridItem> + <GridItem span={9}> + <TextInput + title="Name used to identify a CSR" + type="text" + id="csrName" + aria-describedby="horizontal-form-name-helper" + name="csrName" + onChange={(str, e) => { + handleChange(e); + }} + validated={error.csrName ? ValidatedOptions.error : ValidatedOptions.default} + /> + </GridItem> + </Grid> + <Grid + title="CSR Subject: Common Name" + className="ds-margin-top" + > + <GridItem className="ds-label" span={3}> + Common Name (CN) + </GridItem> + <GridItem span={9}> + <TextInput + title="The fully qualified domain name (FQDN) of your server" + type="text" + id="csrSubjectCommonName" + aria-describedby="horizontal-form-name-helper" + name="csrSubjectCommonName" + onChange={(str, e) => { + handleChange(e); + }} + validated={error.csrSubjectCommonName ? ValidatedOptions.error : ValidatedOptions.default} + /> + </GridItem> + </Grid> + <Grid + title="CSR Subject: Organisation" + className="ds-margin-top" + > + <GridItem className="ds-label" span={3}> + Organization (O) + </GridItem> + <GridItem span={9}> + <TextInput + title="The legal name of your organization" + type="text" + id="csrSubjectOrg" + aria-describedby="horizontal-form-name-helper" + name="csrSubjectOrg" + onChange={(str, e) => { + handleChange(e); + }} + /> + </GridItem> + </Grid> + <Grid + title="CSR Subject: Organisational Unit" + className="ds-margin-top" + > + <GridItem className="ds-label" span={3}> + Organizational Unit (OU) + </GridItem> + <GridItem span={9}> + <TextInput + title="The division of your organization handling the certificate" + type="text" + id="csrSubjectOrgUnit" + aria-describedby="horizontal-form-name-helper" + name="csrSubjectOrgUnit" + onChange={(str, e) => { + handleChange(e); + }} + /> + </GridItem> + </Grid> + <Grid + title="CSR Subject: City/Locality" + className="ds-margin-top" + > + <GridItem className="ds-label" span={3}> + City/Locality (L) + </GridItem> + <GridItem span={9}> + <TextInput + title="The city where your organization is located" + type="text" + id="csrSubjectLocality" + aria-describedby="horizontal-form-name-helper" + name="csrSubjectLocality" + onChange={(str, e) => { + handleChange(e); + }} + /> + </GridItem> + </Grid> + <Grid + title="CSR Subject: State/Region" + className="ds-margin-top" + > + <GridItem className="ds-label" span={3}> + State/County/Region (ST) + </GridItem> + <GridItem span={9}> + <TextInput + title="The state/region where your organization is located" + type="text" + id="csrSubjectState" + aria-describedby="horizontal-form-name-helper" + name="csrSubjectState" + onChange={(str, e) => { + handleChange(e); + }} + /> + </GridItem> + </Grid> + <Grid + title="CSR Subject: Country Code" + className="ds-margin-top" + > + <GridItem className="ds-label" span={3}> + Country Code (C) + </GridItem> + <GridItem span={9}> + <TextInput + title="Two-letter country code where organization is located" + type="text" + id="csrSubjectCountry" + aria-describedby="horizontal-form-name-helper" + name="csrSubjectCountry" + onChange={(str, e) => { + handleChange(e); + }} + /> + </GridItem> + </Grid> + <Grid + title="CSR Subject: Email Address" + className="ds-margin-top" + > + <GridItem className="ds-label" span={3}> + Email Address + </GridItem> + <GridItem span={9}> + <TextInput + title="Email address used to contact your organization" + type="text" + id="csrSubjectEmail" + aria-describedby="horizontal-form-name-helper" + name="csrSubjectEmail" + onChange={(str, e) => { + handleChange(e); + }} + /> + </GridItem> + </Grid> + <hr /> + <Grid> + <GridItem span={3}> + Computed Subject + </GridItem> + <GridItem span={9}> + <b>{previewValue}</b> + </GridItem> + </Grid> + <hr /> + </Form> + </Modal> + ); + } +} + +export class SecurityViewCSRModal extends React.Component { + render() { + const { + showModal, + closeHandler, + item, + name, + } = this.props; + + return ( + <Modal + variant={ModalVariant.medium} + aria-labelledby="ds-modal" + title={name + ".csr"} + isOpen={showModal} + onClose={closeHandler} + actions={[ + <Button key="cancel" variant="link" onClick={closeHandler}> + Cancel + </Button> + ]} + > + <TextContent title="CSR content"> + <Text component={TextVariants.pre}> + <Text component={TextVariants.small}> + <ClipboardCopy hoverTip="Copy to clipboard" clickTip="Copied" variant={ClipboardCopyVariant.expansion} isBlock> + {item ? item : "Nothing to display"} + </ClipboardCopy> + </Text> + </Text> + </TextContent> + </Modal> + ); + } +} + export class SecurityEnableModal extends React.Component { render() { const { @@ -668,3 +924,33 @@ SecurityAddCACertModal.defaultProps = { spinning: false, error: {}, }; + +SecurityAddCSRModal.propTypes = { + showModal: PropTypes.bool, + closeHandler: PropTypes.func, + handleChange: PropTypes.func, + saveHandler: PropTypes.func, + spinning: PropTypes.bool, + error: PropTypes.object, +}; + +SecurityAddCSRModal.defaultProps = { + showModal: false, + spinning: false, + error: {}, +}; + +SecurityViewCSRModal.propTypes = { + showModal: PropTypes.bool, + closeHandler: PropTypes.func, + handleChange: PropTypes.func, + saveHandler: PropTypes.func, + spinning: PropTypes.bool, + error: PropTypes.object, +}; + +SecurityViewCSRModal.defaultProps = { + showModal: false, + spinning: false, + error: {}, +}; \ No newline at end of file diff --git a/src/cockpit/389-console/src/lib/security/securityTables.jsx b/src/cockpit/389-console/src/lib/security/securityTables.jsx index 0ce8facdd..725cf2c35 100644 --- a/src/cockpit/389-console/src/lib/security/securityTables.jsx +++ b/src/cockpit/389-console/src/lib/security/securityTables.jsx @@ -5,6 +5,7 @@ import { Pagination, PaginationVariant, SearchInput, + Tooltip, } from '@patternfly/react-core'; import { expandable, @@ -17,6 +18,284 @@ import { } from '@patternfly/react-table'; import PropTypes from "prop-types"; +class KeyTable extends React.Component { + constructor(props) { + super(props); + + this.state = { + page: 1, + perPage: 10, + value: '', + sortBy: {}, + rows: [], + columns: [ + { title: 'Cipher', transforms: [sortable] }, + { title: 'Key Identifier', transforms: [sortable] }, + { title: 'State', transforms: [sortable] }, + ], + }; + + this.onSetPage = (_event, pageNumber) => { + this.setState({ + page: pageNumber + }); + }; + + this.onPerPageSelect = (_event, perPage) => { + this.setState({ + perPage: perPage + }); + }; + } + + componentDidMount() { + let rows = []; + let columns = this.state.columns; + + for (const ServerKey of this.props.ServerKeys) { + rows.push( + { + isOpen: false, + cells: [ServerKey.attrs.cipher, ServerKey.attrs.key_id, ServerKey.attrs.state], + }, + ); + } + + if (rows.length == 0) { + rows = [{ cells: ['No Orphan keys'] }]; + columns = [{ title: 'Orphan keys' }]; + } + this.setState({ + rows: rows, + columns: columns + }); + } + + actions() { + return [ + { + title: 'Delete Key', + onClick: (event, rowId, rowData, extra) => { + if (rowData.cells[1]) { + this.props.delKey(rowData.cells[1]) + } + } + } + ]; + } + + render() { + const { perPage, page, sortBy, rows, columns } = this.state; + + return ( + <div className="ds-margin-top-lg"> + <Tooltip + content={ + <div> + <p align="justify"> + An orphan key is a private key in the NSS DB for which there is NO cert + with the corresponding public key. An orphan key is created during CSR creation, + when the certificate associated with a CSR has been imported into the NSS DB its + orphan state will be removed. + <br /><br /> + Make sure an orphan key is not associated with a submitted CSR before you delete it. + </p> + </div> + } + > + <a className="ds-font-size-sm">What is an orphan key?</a> + </Tooltip> + <Table + className="ds-margin-top" + aria-label="orph key table" + cells={columns} + key={rows} + rows={rows} + variant={TableVariant.compact} + sortBy={sortBy} + onSort={this.onSort} + actions={rows.length > 0 ? this.actions() : null} + dropdownPosition="right" + dropdownDirection="bottom" + > + <TableHeader /> + <TableBody /> + </Table> + <Pagination + itemCount={this.state.rows.length} + widgetId="pagination-options-menu-bottom" + perPage={this.state.perPage} + page={page} + variant={PaginationVariant.bottom} + onSetPage={this.onSetPage} + onPerPageSelect={this.onPerPageSelect} + /> + </div> + ); + } +} + +class CSRTable extends React.Component { + constructor(props) { + super(props); + + this.state = { + page: 1, + perPage: 10, + value: '', + sortBy: {}, + rows: [], + columns: [ + { title: 'Name', transforms: [sortable] }, + { title: 'Subject DN', transforms: [sortable] }, + { title: 'Modification Date', transforms: [sortable] }, + ], + }; + + this.onSetPage = (_event, pageNumber) => { + this.setState({ + page: pageNumber + }); + }; + + this.onPerPageSelect = (_event, perPage) => { + this.setState({ + perPage: perPage, + page: 1 + }); + }; + + this.onSort = this.onSort.bind(this); + this.onSearchChange = this.onSearchChange.bind(this); + } + + + onSort(_event, index, direction) { + const sortedRows = this.state.rows.sort((a, b) => (a[index] < b[index] ? -1 : a[index] > b[index] ? 1 : 0)); + this.setState({ + sortBy: { + index, + direction + }, + rows: direction === SortByDirection.asc ? sortedRows : sortedRows.reverse() + }); + } + + componentDidMount() { + let rows = []; + let columns = this.state.columns; + + for (const ServerCSR of this.props.ServerCSRs) { + rows.push( + { + isOpen: false, + cells: [ServerCSR.attrs.name, ServerCSR.attrs.subject, ServerCSR.attrs.modified], + }, + ); + } + if (rows.length == 0) { + rows = [{ cells: ['No Certificate Signing Requests'] }]; + columns = [{ title: 'Certificate Signing Requests' }]; + } + this.setState({ + rows: rows, + columns: columns, + }); + } + + onSearchChange(value, event) { + const rows = []; + let count = 0; + + for (const cert of this.props.ServerCSRs) { + const val = value.toLowerCase(); + + // Check for matches of all the parts + if (val != "" && cert.attrs.name.toLowerCase().indexOf(val) == -1 && + cert.attrs.subject.toLowerCase().indexOf(val) == -1 && + cert.attrs.modified.toLowerCase().indexOf(val) == -1) { + // Not a match + continue; + } + + rows.push( + { + isOpen: false, + cells: [cert.attrs.name, cert.attrs.subject, cert.attrs.modified], + + }, + ); + } + + this.setState({ + rows: rows, + value: value, + page: 1, + }); + } + + actions() { + return [ + { + title: 'Delete CSR', + onClick: (event, rowId, rowData, extra) => { + if (rowData.cells.length > 1) { + this.props.delCSR(rowData.cells[0]) + } + } + }, + { + title: 'View CSR', + onClick: (event, rowId, rowData, extra) => { + if (rowData.cells.length > 1) { + this.props.viewCSR(rowData.cells[0]) + } + } + } + ]; + } + + render() { + const { perPage, page, sortBy, rows, columns } = this.state; + + return ( + <div className="ds-margin-top-lg"> + <SearchInput + placeholder='Search CSRs' + value={this.state.value} + onChange={this.onSearchChange} + onClear={(evt) => this.onSearchChange('', evt)} + /> + <Table + className="ds-margin-top" + aria-label="csr table" + cells={columns} + key={rows} + rows={rows} + variant={TableVariant.compact} + sortBy={sortBy} + onSort={this.onSort} + actions={rows.length > 0 ? this.actions() : null} + dropdownPosition="right" + dropdownDirection="bottom" + > + <TableHeader /> + <TableBody /> + </Table> + <Pagination + itemCount={this.state.rows.length} + widgetId="pagination-options-menu-bottom" + perPage={perPage} + page={page} + variant={PaginationVariant.bottom} + onSetPage={this.onSetPage} + onPerPageSelect={this.onPerPageSelect} + /> + </div> + ); + } +} + class CertTable extends React.Component { constructor(props) { super(props); @@ -446,7 +725,27 @@ CertTable.defaultProps = { certs: [], }; +CSRTable.propTypes = { + ServerCSRs: PropTypes.array, + delCSR: PropTypes.func, + viewCSR: PropTypes.func, +}; + +CSRTable.defaultProps = { + ServerCSRs: [], +}; + +KeyTable.propTypes = { + ServerKeys: PropTypes.array, + delKey: PropTypes.func, +}; + +KeyTable.defaultProps = { + ServerKeys: [], +}; export { CertTable, - CRLTable + CRLTable, + CSRTable, + KeyTable, }; diff --git a/src/cockpit/389-console/src/security.jsx b/src/cockpit/389-console/src/security.jsx index cefe6eb0b..e5c4f9021 100644 --- a/src/cockpit/389-console/src/security.jsx +++ b/src/cockpit/389-console/src/security.jsx @@ -74,6 +74,10 @@ export class Security extends React.Component { // Ciphers supportedCiphers: [], enabledCiphers: [], + // Certificate Signing Requests + serverCSRs: [], + // Orphan keys + serverOrphanKeys: [], // Config settings securityEnabled: false, requireSecureBinds: false, @@ -226,6 +230,7 @@ export class Security extends React.Component { this.loadSupportedCiphers = this.loadSupportedCiphers.bind(this); this.loadCerts = this.loadCerts.bind(this); this.loadCACerts = this.loadCACerts.bind(this); + this.loadCSRs = this.loadCSRs.bind(this); this.closeConfirmDisable = this.closeConfirmDisable.bind(this); this.enableSecurity = this.enableSecurity.bind(this); this.disableSecurity = this.disableSecurity.bind(this); @@ -318,7 +323,7 @@ export class Security extends React.Component { "dsconf", "-j", "ldapi://%2fvar%2frun%2fslapd-" + this.props.serverId + ".socket", "security", "ca-certificate", "list", ]; - log_cmd("loadCACerts", "Load certificates", cmd); + log_cmd("loadCACerts", "Load CA certificates", cmd); cockpit .spawn(cmd, { superuser: true, err: "message" }) .done(content => { @@ -326,8 +331,7 @@ export class Security extends React.Component { this.setState(() => ( { CACerts: certs, - loaded: true - }), this.props.enableTree() + }), this.loadCSRs ); }) .fail(err => { @@ -378,6 +382,67 @@ export class Security extends React.Component { }); } + loadCSRs () { + const cmd = [ + "dsconf", "-j", "ldapi://%2fvar%2frun%2fslapd-" + this.props.serverId + ".socket", + "security", "csr", "list", + ]; + log_cmd("loadCSRs", "Load CSRs", cmd); + cockpit + .spawn(cmd, { superuser: true, err: "message" }) + .done(content => { + const csrs = JSON.parse(content); + this.setState(() => ( + { + serverCSRs: csrs, + }), this.loadOrphanKeys + ); + }) + .fail(err => { + const errMsg = JSON.parse(err); + let msg = errMsg.desc; + if ('info' in errMsg) { + msg = errMsg.desc + " - " + errMsg.info; + } + this.props.addNotification( + "error", + `Error loading CSRs - ${msg}` + ); + }); + } + + loadOrphanKeys () { + const cmd = [ + "dsconf", "-j", "ldapi://%2fvar%2frun%2fslapd-" + this.props.serverId + ".socket", + "security", "key", "list", "--orphan" + ]; + log_cmd("loadOrphanKeys", "Load Orphan Keys", cmd); + cockpit + .spawn(cmd, { superuser: true, err: "message" }) + .done(content => { + const keys = JSON.parse(content); + this.setState(() => ( + { + serverOrphanKeys: keys, + loaded: true + }), this.props.enableTree() + ); + }) + .fail(err => { + const errMsg = JSON.parse(err); + if (!errMsg.desc.includes('certutil: no keys found')) { + this.props.addNotification( + "error", + `Error loading Orphan Keys - ${errMsg.desc}` + ); + } + this.setState({ + loaded: true, + serverOrphanKeys: [] + }, this.props.enableTree()); + }); + } + loadRSAConfig() { const cmd = [ "dsconf", "-j", "ldapi://%2fvar%2frun%2fslapd-" + this.props.serverId + ".socket", @@ -1071,6 +1136,8 @@ export class Security extends React.Component { serverId={this.props.serverId} CACerts={this.state.CACerts} ServerCerts={this.state.serverCerts} + ServerCSRs={this.state.serverCSRs} + ServerKeys={this.state.serverOrphanKeys} addNotification={this.props.addNotification} /> </Tab> diff --git a/src/lib389/lib389/cli_conf/security.py b/src/lib389/lib389/cli_conf/security.py index d06eee05c..733a0dd4e 100644 --- a/src/lib389/lib389/cli_conf/security.py +++ b/src/lib389/lib389/cli_conf/security.py @@ -344,6 +344,68 @@ def cert_get(inst, basedn, log, args): log.info('Trust Flags: {}'.format(details[4])) +def csr_list(inst, basedn, log, args): + """ + List all files with .csr extension in instance config dir + """ + csr_list = [] + tlsdb = NssSsl(dirsrv=inst) + details = tlsdb._csr_list(args.path) + for detail in details: + if args.json: + csr_list.append( + { + "type": "csr", + "attrs": { + 'modified': detail[0], + 'subject': detail[1], + 'name': detail[2], + } + } + ) + else: + log.info('Modified: {}'.format(detail[0])) + log.info('Subject: {}\n'.format(detail[1])) + log.info('Name: {}'.format(detail[2])) + + if args.json: + log.info(json.dumps(csr_list, indent=4)) + + +def csr_get(inst, basedn, log, args): + """ + Show PEM format of a csr + """ + tlsdb = NssSsl(dirsrv=inst) + details = tlsdb._csr_show(args.name) + log.info(f"{details}") + + +def csr_gen(inst, basedn, log, args): + """ + Generate a .csr file in instance config dir + """ + tlsdb = NssSsl(dirsrv=inst) + alt_names = args.alt_names + subject = args.subject + name = args.name + out_path = tlsdb.create_rsa_key_and_csr(alt_names, subject, name) + log.info(out_path) + + +def csr_del(inst, basedn, log, args): + """ + Delete a .csr file from instance config dir + """ + csr_dir = inst.get_cert_dir() + file_path = os.path.join(csr_dir, args.name + ".csr") + try: + os.remove(file_path) + except FileNotFoundError: + raise ValueError(file_path + " not found") + log.info(f"Successfully deleted: " + file_path) + + def cert_edit(inst, basedn, log, args): """Edit cert """ @@ -360,6 +422,44 @@ def cert_del(inst, basedn, log, args): log.info(f"Successfully deleted certificate") +def key_list(inst, basedn, log, args): + """ + List keys in the NSS DB + """ + key_list = [] + tls = NssSsl(dirsrv=inst) + keys = tls.list_keys(args.orphan) + + for key in keys: + if args.json: + key_list.append( + { + "type": "key", + "attrs": { + 'cipher': key[0], + 'key_id': key[1], + 'state': key[2], + } + } + ) + else: + log.info('Cipher: {}'.format(key[0])) + log.info('Key Id: {}'.format(key[1])) + log.info('State: {}\n'.format(key[2])) + + if args.json: + log.info(json.dumps(key_list, indent=4)) + + +def key_del(inst, basedn, log, args): + """ + Delete a key from NSS DB + """ + tls = NssSsl(dirsrv=inst) + keys = tls.del_key(args.key_id) + log.info(keys) + + def create_parser(subparsers): security = subparsers.add_parser('security', help='Manage security settings') security_sub = security.add_subparsers(help='security') @@ -495,3 +595,45 @@ def create_parser(subparsers): help='Lists only supported ciphers') ciphers_list_group.add_argument('--disabled', action='store_true', help='Lists only supported ciphers but without enabled ciphers') + + # Certificate Signing Request Management + csr = security_sub.add_parser('csr', help='Manage certificate signing requests') + csr_sub = csr.add_subparsers(help='csr') + + csr_list_parser = csr_sub.add_parser('list', help='List CSRs', description=('List all CSR files in instance' + ' configuration directiory')) + csr_list_parser.add_argument('--path', '-p', default=None, help="Directory contanining CSR file") + csr_list_parser.set_defaults(func=csr_list) + + csr_get_parser = csr_sub.add_parser('get', help='Display CSR content', description=('Displays the contents of a CSR, ' + ' which can be used for submittal to CA')) + csr_get_parser.add_argument('name', help="Name of the CSR file to display") + csr_get_parser.set_defaults(func=csr_get) + + csr_req_parser = csr_sub.add_parser('req', help='Generate a Certificate Signing Request', + description=('Generate a CSR that can be submitted to a CA for verification')) + csr_req_parser.add_argument('--subject', '-s', required=True, help="Subject field") + csr_req_parser.add_argument('--name', '-n', required=True, help="Name") + csr_req_parser.add_argument('alt_names', nargs='*', + help="CSR alternative names. These are auto-detected if not provided") + csr_req_parser.set_defaults(func=csr_gen) + + csr_delete_parser = csr_sub.add_parser('del', help='Delete a CSR file', description=('Delete a CSR file')) + csr_delete_parser.add_argument('name', help="Name of the CSR file to delete") + csr_delete_parser.set_defaults(func=csr_del) + + # Key Management + key = security_sub.add_parser('key', help='Manage keys in NSS DB') + key_sub = key.add_subparsers(help='key') + + key_list_parser = key_sub.add_parser('list', help='List all keys in NSS DB') + key_list_parser.add_argument('--orphan', action='store_true', help='List orphan keys (An orphan key is' + ' a private key in the NSS DB for which there is NO cert with the corresponding ' + ' public key). An orphan key is created during CSR generation, when the associated certificate is imported' + ' into the NSS DB, its orphan state will be removed.') + key_list_parser.set_defaults(func=key_list) + + key_del_parser = key_sub.add_parser('del', help='Delete a key from NSS DB', description=('Remove a' + ' key from the NSS DB. Make sure the key is not in use before you delete')) + key_del_parser.add_argument('key_id', help='This is the key ID displayed when listing keys') + key_del_parser.set_defaults(func=key_del) \ No newline at end of file diff --git a/src/lib389/lib389/nss_ssl.py b/src/lib389/lib389/nss_ssl.py index b4d913cf3..b0e4aec8c 100644 --- a/src/lib389/lib389/nss_ssl.py +++ b/src/lib389/lib389/nss_ssl.py @@ -132,7 +132,6 @@ class NssSsl(DSLint): :type alt_names: [str, ] :returns: String of the subject DN. """ - if self.dirsrv and len(alt_names) > 0: return SELF_ISSUER.format(GIVENNAME=self.dirsrv.get_uuid(), HOSTNAME=alt_names[0]) elif len(alt_names) > 0: @@ -461,6 +460,98 @@ only. cert_values.append(re.match(r'^(.+[^\s])[\s]+([^\s]+)$', line.rstrip()).groups()) return cert_values + def _openssl_get_csr_subject(self, csr_dir, csr_name): + cmd = [ + '/usr/bin/openssl', + 'req', + '-subject', + '-noout', + '-in', + '%s/%s'% (csr_dir, csr_name), + ] + self.log.debug("cmd: %s", format_cmd_list(cmd)) + try: + result = ensure_str(check_output(cmd, stderr=subprocess.STDOUT)) + except subprocess.CalledProcessError as e: + raise ValueError(e.output.decode('utf-8').rstrip()) + + # Parse the subject string from openssl output + result = result.replace("subject=", "") + result = result.replace(" ", "").strip() + result = result.split(',') + result = result[slice(None, None, -1)] + result = ','.join([str(elem) for elem in result]) + return result + + def _csr_show(self, name): + csr_dir = self.dirsrv.get_cert_dir() + result = "" + # Display PEM contents of a CSR file + if name and csr_dir: + if os.path.exists(csr_dir + "/" + name + ".csr"): + cmd = [ + "/usr/bin/sed", + "-n", + '/BEGIN NEW/,/END NEW/p', + csr_dir + "/" + name + ".csr" + ] + self.log.debug("cmd: %s", format_cmd_list(cmd)) + try: + result = ensure_str(check_output(cmd, stderr=subprocess.STDOUT)) + except subprocess.CalledProcessError as e: + raise ValueError(e.output.decode('utf-8').rstrip()) + + return result + + def _csr_list(self, csr_dir=None): + csr_list = [] + csr_dir = self.dirsrv.get_cert_dir() + # Search for .csr file extensions in instance config dir + cmd = [ + '/usr/bin/find', + csr_dir, + '-type', + 'f', + '-name', + '*.csr', + '-printf', + '%f\\n', + ] + self.log.debug("cmd: %s", format_cmd_list(cmd)) + try: + result = ensure_str(check_output(cmd, stderr=subprocess.STDOUT)) + except subprocess.CalledProcessError as e: + raise ValueError(e.output.decode('utf-8').rstrip()) + + # Bail out if we cant find any .csr files + if len(result) == 0: + return [] + + # For each .csr file, get last modified time and subject DN + for csr_file in result.splitlines(): + csr = [] + # Get last modified time stamp + cmd = [ + '/usr/bin/date', + '-r', + '%s/%s'% (csr_dir, csr_file), + '+%Y-%m-%d %H:%M:%S', + ] + try: + result = ensure_str(check_output(cmd, stderr=subprocess.STDOUT)) + except subprocess.CalledProcessError as e: + raise ValueError(e.output.decode('utf-8').rstrip()) + + # Add csr modified timestamp + csr.append(result.strip()) + # Use openssl to get the csr subject DN + csr.append(self._openssl_get_csr_subject(csr_dir, csr_file)) + # Add csr name, without extension + csr.append(csr_file.rsplit('.', 1)[0]) + csr_list.append(csr) + + return csr_list + def _rsa_cert_key_exists(self, cert_tuple): name = cert_tuple[0] cmd = [ @@ -587,18 +678,22 @@ only. self.log.debug("nss output: %s", result) return True - def create_rsa_key_and_csr(self, alt_names=[], subject=None): + def create_rsa_key_and_csr(self, alt_names=[], subject=None, name=None): """Create a new RSA key and the certificate signing request. This request can be submitted to a CA for signing. The returned certificate can be added with import_rsa_crt. """ - csr_path = os.path.join(self._certdb, '%s.csr' % CERT_NAME) + if name is None: + csr_path = os.path.join(self._certdb, '%s.csr' % CERT_NAME) + else: + csr_path = os.path.join(self._certdb, '%s.csr' % name) if len(alt_names) == 0: alt_names = self.detect_alt_names(alt_names) if subject is None: subject = self.generate_cert_subject(alt_names) + self.log.debug(f"CSR name -> {name}") self.log.debug(f"CSR subject -> {subject}") self.log.debug(f"CSR alt_names -> {alt_names}") @@ -856,6 +951,52 @@ only. crt_der_path = '%s/%s%s.der' % (self._certdb, USER_PREFIX, name) return {'ca': ca_path, 'key': key_path, 'crt': crt_path, 'crt_der_path': crt_der_path} + def list_keys(self, orphan=None): + key_list = [] + cmd = [ + '/usr/bin/certutil', + '-K', + '-d', + self._certdb, + '-f', + '%s/%s' % (self._certdb, PWD_TXT), + ] + try: + result = ensure_str(check_output(cmd, stderr=subprocess.STDOUT)) + except subprocess.CalledProcessError as e: + raise ValueError(e.output.decode('utf-8').rstrip()) + + # Ignore the first line of certutil output + for line in result.splitlines()[1:]: + # Normalise the output of certutil + line = re.sub(r"\<[^>]*\>","", line) + key = re.split(r'\s{2,}', line) + if orphan: + if 'orphan' in line: + key_list.append(key) + else: + key_list.append(key) + + return key_list + + def del_key(self, keyid): + cmd = [ + '/usr/bin/certutil', + '-F', + '-d', + self._certdb, + '-f', + '%s/%s' % (self._certdb, PWD_TXT), + '-k', + keyid, + ] + try: + result = ensure_str(check_output(cmd, stderr=subprocess.STDOUT)) + except subprocess.CalledProcessError as e: + raise ValueError(e.output.decode('utf-8').rstrip()) + + return result + # Certificate helper functions def del_cert(self, nickname): """Delete this certificate
0
c4c033621feccdb9860af47d498074adc332a2bc
389ds/389-ds-base
Ticket 48415 Bug Description: We commonly use encoded or other forms of our default suffix. We should make these variables in case we change them. Fix Description: Add default domain parameter https://fedorahosted.org/389/ticket/48415 Author: wibrown Review by: One line rule
commit c4c033621feccdb9860af47d498074adc332a2bc Author: William Brown <[email protected]> Date: Wed Jan 20 09:28:09 2016 +1000 Ticket 48415 Bug Description: We commonly use encoded or other forms of our default suffix. We should make these variables in case we change them. Fix Description: Add default domain parameter https://fedorahosted.org/389/ticket/48415 Author: wibrown Review by: One line rule diff --git a/src/lib389/lib389/_constants.py b/src/lib389/lib389/_constants.py index 38593be7c..a0476f8cf 100644 --- a/src/lib389/lib389/_constants.py +++ b/src/lib389/lib389/_constants.py @@ -85,6 +85,7 @@ DEFAULT_PORT = 389 DEFAULT_SECURE_PORT = 636 DEFAULT_SUFFIX = 'dc=example,dc=com' DEFAULT_SUFFIX_ESCAPED = 'dc\3Dexample\2Cdc\3Dcom' +DEFAULT_DOMAIN = 'example.com' DEFAULT_BENAME = 'userRoot' # warning it is case sensitive DEFAULT_BACKUPDIR = '/tmp' DEFAULT_INST_HEAD = 'slapd-'
0
f51305f5a6739c20d95bdbce98baa26f6cebf844
389ds/389-ds-base
Issue 47536 - Fix CI testcase Description: The test was failing because we were not generating server certs using the fqdn\ https://pagure.io/389-ds-base/issue/47536 Reviewed by: viktor(Thanks!)
commit f51305f5a6739c20d95bdbce98baa26f6cebf844 Author: Mark Reynolds <[email protected]> Date: Wed Mar 8 15:08:59 2017 -0500 Issue 47536 - Fix CI testcase Description: The test was failing because we were not generating server certs using the fqdn\ https://pagure.io/389-ds-base/issue/47536 Reviewed by: viktor(Thanks!) diff --git a/dirsrvtests/tests/tickets/ticket47536_test.py b/dirsrvtests/tests/tickets/ticket47536_test.py index b5ec020c0..ef9817f92 100644 --- a/dirsrvtests/tests/tickets/ticket47536_test.py +++ b/dirsrvtests/tests/tickets/ticket47536_test.py @@ -7,7 +7,7 @@ # --- END COPYRIGHT BLOCK --- # import base64 - +import os import pytest from lib389.tasks import * from lib389.utils import * @@ -28,8 +28,8 @@ M1SERVERCERT = 'Server-Cert1' M2SERVERCERT = 'Server-Cert2' M1LDAPSPORT = '41636' M2LDAPSPORT = '42636' -M1SUBJECT = 'CN={},OU=389 Directory Server'.format(HOST_MASTER_1) -M2SUBJECT = 'CN={},OU=390 Directory Server'.format(HOST_MASTER_2) +M1SUBJECT = 'CN=' + os.uname()[1] + ',OU=389 Directory Server' +M2SUBJECT = 'CN=' + os.uname()[1] + ',OU=390 Directory Server' def add_entry(server, name, rdntmpl, start, num):
0
29043c5716a1bc8364689d518cb4e35722eaaf77
389ds/389-ds-base
Ticket #48224 - redux 2 - logconv.pl should handle *.tar.xz, *.txz, *.xz log files https://fedorahosted.org/389/ticket/48224 Reviewed by: nhosoi (Thanks!) Branch: master Fix Description: Use $? instead of $! to get pipe errors. Platforms tested: Fedora 21, RHEL 7.2 candidate Flag Day: no Doc impact: no
commit 29043c5716a1bc8364689d518cb4e35722eaaf77 Author: Rich Megginson <[email protected]> Date: Mon Jul 20 10:31:46 2015 -0600 Ticket #48224 - redux 2 - logconv.pl should handle *.tar.xz, *.txz, *.xz log files https://fedorahosted.org/389/ticket/48224 Reviewed by: nhosoi (Thanks!) Branch: master Fix Description: Use $? instead of $! to get pipe errors. Platforms tested: Fedora 21, RHEL 7.2 candidate Flag Day: no Doc impact: no diff --git a/ldap/admin/src/logconv.pl b/ldap/admin/src/logconv.pl index 0038a038d..3113f8ae8 100755 --- a/ldap/admin/src/logconv.pl +++ b/ldap/admin/src/logconv.pl @@ -437,9 +437,9 @@ sub doUncompress { # so use the xz command directly # NOTE: This doesn't work if the argument is a file handle e.g. from # Archive::Tar - $! = 0; # clear - if (!open($TARFH, "xz -dc $_ |") or $!) { - openFailed($!, $_); + $? = 0; # clear + if (!open($TARFH, "xz -dc $_ |") or $?) { + openFailed($?, $_); return; } } else {
0
efac90bad8f5e21c05d5ea1b77f5186c9393429d
389ds/389-ds-base
Rename add_pwd_control to slapi_add_pwd_control Rename pwpolicy_make_response_control slapi_pwpolicy_make_response_control
commit efac90bad8f5e21c05d5ea1b77f5186c9393429d Author: Rich Megginson <[email protected]> Date: Mon Feb 14 21:42:54 2005 +0000 Rename add_pwd_control to slapi_add_pwd_control Rename pwpolicy_make_response_control slapi_pwpolicy_make_response_control diff --git a/ldap/servers/slapd/libslapd.def b/ldap/servers/slapd/libslapd.def index ce7da8654..94ca945a7 100644 --- a/ldap/servers/slapd/libslapd.def +++ b/ldap/servers/slapd/libslapd.def @@ -355,7 +355,7 @@ EXPORTS defbackend_init @352 defbackend_get_backend @353 log_set_logging @354 - pwpolicy_make_response_control @355 + slapi_pwpolicy_make_response_control @355 delete_passwdPolicy @356 g_get_user_backend @357 new_passwdPolicy @358 @@ -769,7 +769,7 @@ EXPORTS slapi_search_internal_callback_pb @780 plugin_build_operation_action_bitmap @781 plugin_get_server_plg @782 - add_pwd_control @783 + slapi_add_pwd_control @783 pw_mod_allowchange_aci @784 do_add @785 do_modify @786
0