commit_id
string | repo
string | commit_message
string | diff
string | label
int64 |
---|---|---|---|---|
c72f6ba1e991f6e1a4fef5fac04ee474ec846692
|
389ds/389-ds-base
|
Ticket 317 - RHDS fractional replication with excluded password policy attributes leads to wrong error messages.
https://fedorahosted.org/389/ticket/317
Resolves: Ticket 317
Bug Description: RHDS fractional replication with excluded password policy attributes leads to wrong error messages.
Reviewed by: nhosoi (Thanks!)
Branch: master
Fix Description: Fractional replication can remove _all_ mods in an add or
modify operation. The code should just skip add and modify operations
that contain no data.
Platforms tested: RHEL6 x86_64
Flag Day: no
Doc impact: no
cherry picked and ported from 72401ea2588d3aec7622155fdf5dd9e5af7f8f95 on Directory_Server_8_2_Branch
|
commit c72f6ba1e991f6e1a4fef5fac04ee474ec846692
Author: Rich Megginson <[email protected]>
Date: Thu Mar 8 12:06:37 2012 -0700
Ticket 317 - RHDS fractional replication with excluded password policy attributes leads to wrong error messages.
https://fedorahosted.org/389/ticket/317
Resolves: Ticket 317
Bug Description: RHDS fractional replication with excluded password policy attributes leads to wrong error messages.
Reviewed by: nhosoi (Thanks!)
Branch: master
Fix Description: Fractional replication can remove _all_ mods in an add or
modify operation. The code should just skip add and modify operations
that contain no data.
Platforms tested: RHEL6 x86_64
Flag Day: no
Doc impact: no
cherry picked and ported from 72401ea2588d3aec7622155fdf5dd9e5af7f8f95 on Directory_Server_8_2_Branch
diff --git a/ldap/servers/plugins/replication/repl5_inc_protocol.c b/ldap/servers/plugins/replication/repl5_inc_protocol.c
index c9ad6fc70..a615a2943 100644
--- a/ldap/servers/plugins/replication/repl5_inc_protocol.c
+++ b/ldap/servers/plugins/replication/repl5_inc_protocol.c
@@ -163,6 +163,9 @@ typedef struct result_data
*/
#define PROTOCOL_IS_SHUTDOWN(prp) (event_occurred(prp, EVENT_PROTOCOL_SHUTDOWN) || prp->terminate)
+/* mods should be LDAPMod **mods */
+#define MODS_ARE_EMPTY(mods) ((mods == NULL) || (mods[0] == NULL))
+
/* Forward declarations */
static PRUint32 event_occurred(Private_Repl_Protocol *prp, PRUint32 event);
static void reset_events (Private_Repl_Protocol *prp);
@@ -1385,6 +1388,12 @@ replay_update(Private_Repl_Protocol *prp, slapi_operation_parameters *op, int *m
LDAPMod **modrdn_mods = NULL;
char csn_str[CSN_STRSIZE]; /* For logging only */
+ if (message_id) {
+ /* if we get out of this function without setting message_id, it means
+ we didn't send an op, so no result needs to be processed */
+ *message_id = 0;
+ }
+
/* Construct the replication info control that accompanies the operation */
if (SLAPI_OPERATION_ADD == op->operation_type)
{
@@ -1445,8 +1454,21 @@ replay_update(Private_Repl_Protocol *prp, slapi_operation_parameters *op, int *m
{
repl5_strip_fractional_mods(prp->agmt,entryattrs);
}
- return_value = conn_send_add(prp->conn, REPL_GET_DN(&op->target_address),
- entryattrs, update_control, message_id);
+ if (MODS_ARE_EMPTY(entryattrs)) {
+ if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) {
+ slapi_log_error(SLAPI_LOG_REPL, repl_plugin_name,
+ "%s: replay_update: %s operation (dn=\"%s\" csn=%s) "
+ "not sent - empty\n",
+ agmt_get_long_name(prp->agmt),
+ op2string(op->operation_type),
+ REPL_GET_DN(&op->target_address),
+ csn_as_string(op->csn, PR_FALSE, csn_str));
+ }
+ return_value = CONN_OPERATION_SUCCESS;
+ } else {
+ return_value = conn_send_add(prp->conn, REPL_GET_DN(&op->target_address),
+ entryattrs, update_control, message_id);
+ }
ldap_mods_free(entryattrs, 1);
}
break;
@@ -1457,8 +1479,21 @@ replay_update(Private_Repl_Protocol *prp, slapi_operation_parameters *op, int *m
{
repl5_strip_fractional_mods(prp->agmt,op->p.p_modify.modify_mods);
}
- return_value = conn_send_modify(prp->conn, REPL_GET_DN(&op->target_address),
- op->p.p_modify.modify_mods, update_control, message_id);
+ if (MODS_ARE_EMPTY(op->p.p_modify.modify_mods)) {
+ if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) {
+ slapi_log_error(SLAPI_LOG_REPL, repl_plugin_name,
+ "%s: replay_update: %ss operation (dn=\"%s\" csn=%s) "
+ "not sent - empty\n",
+ agmt_get_long_name(prp->agmt),
+ op2string(op->operation_type),
+ REPL_GET_DN(&op->target_address),
+ csn_as_string(op->csn, PR_FALSE, csn_str));
+ }
+ return_value = CONN_OPERATION_SUCCESS;
+ } else {
+ return_value = conn_send_modify(prp->conn, REPL_GET_DN(&op->target_address),
+ op->p.p_modify.modify_mods, update_control, message_id);
+ }
break;
case SLAPI_OPERATION_DELETE:
return_value = conn_send_delete(prp->conn, REPL_GET_DN(&op->target_address),
@@ -1869,7 +1904,7 @@ send_updates(Private_Repl_Protocol *prp, RUV *remote_update_vector, PRUint32 *nu
replica_id = csn_get_replicaid(entry.op->csn);
uniqueid = entry.op->target_address.uniqueid;
- if (prp->repl50consumer)
+ if (prp->repl50consumer && message_id)
{
int operation, error = 0;
@@ -1881,7 +1916,7 @@ send_updates(Private_Repl_Protocol *prp, RUV *remote_update_vector, PRUint32 *nu
csn_as_string(entry.op->csn, PR_FALSE, csn_str);
return_value = repl5_inc_update_from_op_result(prp, replay_crc, error, csn_str, uniqueid, replica_id, &finished, num_changes_sent);
}
- else {
+ else if (message_id) {
/* Queue the details for pickup later in the response thread */
repl5_inc_operation *sop = NULL;
sop = repl5_inc_operation_new();
@@ -1891,6 +1926,12 @@ send_updates(Private_Repl_Protocol *prp, RUV *remote_update_vector, PRUint32 *nu
sop->replica_id = replica_id;
PL_strncpyz(sop->uniqueid, uniqueid, sizeof(sop->uniqueid));
repl5_int_push_operation(rd,sop);
+ } else {
+ slapi_log_error(SLAPI_LOG_REPL, repl_plugin_name,
+ "%s: Skipping update operation with no message_id (uniqueid %s, CSN %s):\n",
+ agmt_get_long_name(prp->agmt),
+ entry.op->target_address.uniqueid, csn_str);
+ agmt_inc_last_update_changecount (prp->agmt, csn_get_replicaid(entry.op->csn), 1 /*skipped*/);
}
}
break;
| 0 |
3e2262e1c0410bdf4f9b9211aef13deb28f174d1
|
389ds/389-ds-base
|
Ticket 47379 - DNA plugin failed to fetch replication agreement
Bug Description: When trying to get the next range of available values, if a shared config
server does not have a replication agreement that points to itself then
it can not obtain the bind credentials/info to successfully contact that
server when trying to get the next range.
Fix Description: Extend the shared config server to allow for bind information, such as bind
method, and connection protocol. For the bind DN and pw(for SIMPLE and DIGEST-MD5
bind methods), we store this in the plugin config entry. As each bind pw needs to
use DES pw storage scheme, and this must be local to each server (not replicated
with the shared config server entry).
Fixed infinite loop in dna_fix_maxval() - we did not advance the server list if
there was a error. Also improved config validation to make sure the shared config
entry is not within the scope & filter of the DNA config.
https://fedorahosted.org/389/ticket/47379
Reviewed by: richm & nkinder(Thanks!!)
|
commit 3e2262e1c0410bdf4f9b9211aef13deb28f174d1
Author: Mark Reynolds <[email protected]>
Date: Fri Jun 14 11:05:46 2013 -0400
Ticket 47379 - DNA plugin failed to fetch replication agreement
Bug Description: When trying to get the next range of available values, if a shared config
server does not have a replication agreement that points to itself then
it can not obtain the bind credentials/info to successfully contact that
server when trying to get the next range.
Fix Description: Extend the shared config server to allow for bind information, such as bind
method, and connection protocol. For the bind DN and pw(for SIMPLE and DIGEST-MD5
bind methods), we store this in the plugin config entry. As each bind pw needs to
use DES pw storage scheme, and this must be local to each server (not replicated
with the shared config server entry).
Fixed infinite loop in dna_fix_maxval() - we did not advance the server list if
there was a error. Also improved config validation to make sure the shared config
entry is not within the scope & filter of the DNA config.
https://fedorahosted.org/389/ticket/47379
Reviewed by: richm & nkinder(Thanks!!)
diff --git a/ldap/schema/10dna-plugin.ldif b/ldap/schema/10dna-plugin.ldif
index c89c6b529..ac9b5f489 100644
--- a/ldap/schema/10dna-plugin.ldif
+++ b/ldap/schema/10dna-plugin.ldif
@@ -170,6 +170,38 @@ attributeTypes: ( 2.16.840.1.113730.3.1.2130 NAME 'dnaRangeRequestTimeout'
#
################################################################################
#
+attributeTypes: ( 2.16.840.1.113730.3.1.2157 NAME 'dnaRemoteBindCred'
+ DESC 'Remote bind credentials'
+ SYNTAX 1.3.6.1.4.1.1466.115.121.1.15
+ SINGLE-VALUE
+ X-ORIGIN '389 Directory Server' )
+#
+################################################################################
+#
+attributeTypes: ( 2.16.840.1.113730.3.1.2158 NAME 'dnaRemoteBindDN'
+ DESC 'Remote bind DN'
+ SYNTAX 1.3.6.1.4.1.1466.115.121.1.12
+ SINGLE-VALUE
+ X-ORIGIN '389 Directory Server' )
+#
+################################################################################
+#
+attributeTypes: ( 2.16.840.1.113730.3.1.2159 NAME 'dnaRemoteConnProtocol'
+ DESC 'Connection protocol: LDAP, TLS, or SSL'
+ SYNTAX 1.3.6.1.4.1.1466.115.121.1.15
+ SINGLE-VALUE
+ X-ORIGIN '389 Directory Server' )
+#
+################################################################################
+#
+attributeTypes: ( 2.16.840.1.113730.3.1.2160 NAME 'dnaRemoteBindMethod'
+ DESC 'Remote bind method: SIMPLE, SSL, SASL/DIGEST-MD5, or SASL/GSSAPI'
+ SYNTAX 1.3.6.1.4.1.1466.115.121.1.15
+ SINGLE-VALUE
+ X-ORIGIN '389 Directory Server' )
+#
+################################################################################
+#
objectClasses: ( 2.16.840.1.113730.3.2.324 NAME 'dnaPluginConfig'
DESC 'DNA plugin configuration'
SUP top
@@ -185,7 +217,9 @@ objectClasses: ( 2.16.840.1.113730.3.2.324 NAME 'dnaPluginConfig'
dnaSharedCfgDN $
dnaThreshold $
dnaNextRange $
- dnaRangeRequestTimeout $
+ dnaRangeRequestTimeout $
+ dnaRemoteBindDN $
+ dnaRemoteBindCred $
cn
)
X-ORIGIN '389 Directory Server' )
@@ -199,6 +233,8 @@ objectClasses: ( 2.16.840.1.113730.3.2.325 NAME 'dnaSharedConfig'
MAY ( dnaHostname $
dnaPortNum $
dnaSecurePortNum $
+ dnaRemoteBindMethod $
+ dnaRemoteConnProtocol $
dnaRemainingValues
)
X-ORIGIN '389 Directory Server' )
diff --git a/ldap/servers/plugins/dna/dna.c b/ldap/servers/plugins/dna/dna.c
index ff49f32be..a552fb641 100644
--- a/ldap/servers/plugins/dna/dna.c
+++ b/ldap/servers/plugins/dna/dna.c
@@ -87,18 +87,31 @@
#define DNA_GENERATE "dnaMagicRegen"
#define DNA_FILTER "dnaFilter"
#define DNA_SCOPE "dnaScope"
+#define DNA_REMOTE_BIND_DN "dnaRemoteBindDN"
+#define DNA_REMOTE_BIND_PW "dnaRemoteBindCred"
/* since v2 */
#define DNA_MAXVAL "dnaMaxValue"
#define DNA_SHARED_CFG_DN "dnaSharedCfgDN"
/* Shared Config */
-#define DNA_SHAREDCONFIG "dnaSharedConfig"
-#define DNA_REMAINING "dnaRemainingValues"
-#define DNA_THRESHOLD "dnaThreshold"
-#define DNA_HOSTNAME "dnaHostname"
-#define DNA_PORTNUM "dnaPortNum"
-#define DNA_SECURE_PORTNUM "dnaSecurePortNum"
+#define DNA_SHAREDCONFIG "dnaSharedConfig"
+#define DNA_REMAINING "dnaRemainingValues"
+#define DNA_THRESHOLD "dnaThreshold"
+#define DNA_HOSTNAME "dnaHostname"
+#define DNA_PORTNUM "dnaPortNum"
+#define DNA_SECURE_PORTNUM "dnaSecurePortNum"
+#define DNA_REMOTE_BIND_METHOD "dnaRemoteBindMethod"
+#define DNA_REMOTE_CONN_PROT "dnaRemoteConnProtocol"
+
+/* Bind Methods & Protocols */
+#define DNA_METHOD_SIMPLE "SIMPLE"
+#define DNA_METHOD_SSL "SSL"
+#define DNA_METHOD_GSSAPI "SASL/GSSAPI"
+#define DNA_METHOD_DIGESTMD5 "SASL/DIGEST-MD5"
+#define DNA_PROT_LDAP "LDAP"
+#define DNA_PROT_TLS "TLS"
+#define DNA_PROT_SSL "SSL"
/* For transferred ranges */
#define DNA_NEXT_RANGE "dnaNextRange"
@@ -154,6 +167,8 @@ struct configEntry {
PRUint64 threshold;
char *shared_cfg_base;
char *shared_cfg_dn;
+ char *remote_binddn;
+ char *remote_bindpw;
PRUint64 timeout;
/* This lock protects the 5 members below. All
* of the above members are safe to read as long
@@ -195,6 +210,12 @@ struct dnaServer {
unsigned int port;
unsigned int secureport;
PRUint64 remaining;
+ /* Remote replica settings from config */
+ PRUint64 remote_defined;
+ char *remote_bind_method;
+ char *remote_conn_prot;
+ char *remote_binddn; /* contains pointer to main config binddn */
+ char *remote_bindpw; /* contains pointer to main config bindpw */
};
static char *dna_extend_exop_oid_list[] = {
@@ -220,8 +241,8 @@ static int dna_be_txn_preop_init(Slapi_PBlock *pb);
* Local operation functions
*
*/
-static int dna_load_plugin_config(int use_eventq);
-static int dna_parse_config_entry(Slapi_Entry * e, int apply);
+static int dna_load_plugin_config(Slapi_PBlock *pb, int use_eventq);
+static int dna_parse_config_entry(Slapi_PBlock *pb, Slapi_Entry * e, int apply);
static void dna_delete_config();
static void dna_free_config_entry(struct configEntry ** entry);
static int dna_load_host_port();
@@ -264,6 +285,8 @@ static void dna_list_remove_type(char **list, char *type);
static int dna_is_multitype_range(struct configEntry *config_entry);
static void dna_create_valcheck_filter(struct configEntry *config_entry, PRUint64 value, char **filter);
static int dna_isrepl(Slapi_PBlock *pb);
+static int dna_get_remote_config_info( struct dnaServer *server, char **bind_dn, char **bind_passwd,
+ char **bind_method, int *is_ssl, int *port);
/**
*
@@ -572,7 +595,7 @@ dna_start(Slapi_PBlock * pb)
slapi_ch_calloc(1, sizeof(struct configEntry));
PR_INIT_CLIST(dna_global_config);
- if (dna_load_plugin_config(1/* use eventq */) != DNA_SUCCESS) {
+ if (dna_load_plugin_config(pb, 1/* use eventq */) != DNA_SUCCESS) {
slapi_log_error(SLAPI_LOG_FATAL, DNA_PLUGIN_SUBSYSTEM,
"dna_start: unable to load plug-in configuration\n");
return DNA_FAILURE;
@@ -640,7 +663,7 @@ done:
* ------ cn=etc etc
*/
static int
-dna_load_plugin_config(int use_eventq)
+dna_load_plugin_config(Slapi_PBlock *pb, int use_eventq)
{
int status = DNA_SUCCESS;
int result;
@@ -682,7 +705,7 @@ dna_load_plugin_config(int use_eventq)
/* We don't care about the status here because we may have
* some invalid config entries, but we just want to continue
* looking for valid ones. */
- dna_parse_config_entry(entries[i], 1);
+ dna_parse_config_entry(pb, entries[i], 1);
}
dna_unlock();
@@ -719,7 +742,7 @@ cleanup:
* if it is invalid.
*/
static int
-dna_parse_config_entry(Slapi_Entry * e, int apply)
+dna_parse_config_entry(Slapi_PBlock *pb, Slapi_Entry * e, int apply)
{
char *value;
struct configEntry *entry = NULL;
@@ -883,6 +906,45 @@ dna_parse_config_entry(Slapi_Entry * e, int apply)
slapi_log_error(SLAPI_LOG_CONFIG, DNA_PLUGIN_SUBSYSTEM,
"----------> %s [%" NSPRIu64 "]\n", DNA_MAXVAL, entry->maxval);
+ /* get the global bind dn and password(if any) */
+ value = slapi_entry_attr_get_charptr(e, DNA_REMOTE_BIND_DN);
+ if (value) {
+ Slapi_DN *sdn = NULL;
+ char *normdn = NULL;
+
+ sdn = slapi_sdn_new_dn_passin(value);
+ if (!sdn) {
+ slapi_log_error(SLAPI_LOG_FATAL, DNA_PLUGIN_SUBSYSTEM,
+ "dna_parse_config_entry: Unable to create "
+ "slapi_dn from dnaRemoteBindDN (%s)\n", value);
+ ret = DNA_FAILURE;
+ slapi_ch_free_string(&value);
+ goto bail;
+ }
+ normdn = (char *)slapi_sdn_get_dn(sdn);
+ if (NULL == normdn) {
+ slapi_log_error(SLAPI_LOG_FATAL, DNA_PLUGIN_SUBSYSTEM,
+ "dna_parse_config_entry: failed to normalize dn: "
+ "%s\n", value);
+ ret = DNA_FAILURE;
+ slapi_sdn_free(&sdn);
+ goto bail;
+ }
+ entry->remote_binddn = slapi_ch_strdup(normdn);
+ slapi_sdn_free(&sdn);
+ }
+ /* now grab the password */
+ entry->remote_bindpw = slapi_entry_attr_get_charptr(e, DNA_REMOTE_BIND_PW);
+
+ /* validate that we have both a bind dn or password, or we have none */
+ if((entry->remote_bindpw != NULL && entry->remote_binddn == NULL) ||
+ (entry->remote_binddn != NULL && entry->remote_bindpw == NULL)){
+ slapi_log_error(SLAPI_LOG_FATAL, DNA_PLUGIN_SUBSYSTEM,
+ "dna_parse_config_entry: Invalid remote bind DN and password settings.\n");
+ ret = DNA_FAILURE;
+ goto bail;
+ }
+
value = slapi_entry_attr_get_charptr(e, DNA_SHARED_CFG_DN);
if (value) {
Slapi_Entry *shared_e = NULL;
@@ -1057,6 +1119,21 @@ dna_parse_config_entry(Slapi_Entry * e, int apply)
goto bail;
}
+ /* Check if the shared config base matches the config scope and filter */
+ if (entry->scope && slapi_dn_issuffix(entry->shared_cfg_base, entry->scope)){
+ if (entry->slapi_filter) {
+ ret = slapi_vattr_filter_test(pb, e, entry->slapi_filter, 0);
+ if (LDAP_SUCCESS == ret) {
+ slapi_log_error(SLAPI_LOG_FATAL, DNA_PLUGIN_SUBSYSTEM, "dna_parse_config_entry: "
+ "Error: shared config entry (%s) matches scope \"%s\", and filter \"%s\" "
+ "of the DNA config entry (%s).\n", entry->shared_cfg_base,
+ entry->scope, entry->filter, entry->dn);
+ ret = DNA_FAILURE;
+ goto bail;
+ }
+ }
+ }
+
/**
* Finally add the entry to the list.
* We sort by scope dn length with longer
@@ -1140,6 +1217,8 @@ dna_free_config_entry(struct configEntry ** entry)
slapi_ch_free_string(&e->scope);
slapi_ch_free_string(&e->shared_cfg_base);
slapi_ch_free_string(&e->shared_cfg_dn);
+ slapi_ch_free_string(&e->remote_binddn);
+ slapi_ch_free_string(&e->remote_bindpw);
slapi_destroy_mutex(e->lock);
@@ -1170,13 +1249,14 @@ static void
dna_free_shared_server(struct dnaServer **server)
{
struct dnaServer *s;
+
if ((NULL == server) || (NULL == *server)) {
return;
}
-
s = *server;
slapi_ch_free_string(&s->host);
-
+ slapi_ch_free_string(&s->remote_bind_method);
+ slapi_ch_free_string(&s->remote_conn_prot);
slapi_ch_free((void **)server);
}
@@ -1358,6 +1438,7 @@ static int dna_fix_maxval(struct configEntry *config_entry,
if ((ret = dna_update_next_range(config_entry, lower, upper)) == 0) {
break;
}
+ server = PR_NEXT_LINK(server);
}
}
@@ -1434,7 +1515,7 @@ dna_get_shared_servers(struct configEntry *config_entry, PRCList **servers)
int ret = LDAP_SUCCESS;
Slapi_PBlock *pb = NULL;
Slapi_Entry **entries = NULL;
- char *attrs[5];
+ char *attrs[7];
/* First do a search in the shared config area for this
* range to find other servers who are managing this range. */
@@ -1442,7 +1523,9 @@ dna_get_shared_servers(struct configEntry *config_entry, PRCList **servers)
attrs[1] = DNA_PORTNUM;
attrs[2] = DNA_SECURE_PORTNUM;
attrs[3] = DNA_REMAINING;
- attrs[4] = NULL;
+ attrs[4] = DNA_REMOTE_BIND_METHOD;
+ attrs[5] = DNA_REMOTE_CONN_PROT;
+ attrs[6] = NULL;
pb = slapi_pblock_new();
if (NULL == pb) {
@@ -1490,9 +1573,16 @@ dna_get_shared_servers(struct configEntry *config_entry, PRCList **servers)
server->secureport = slapi_entry_attr_get_uint(entries[i], DNA_SECURE_PORTNUM);
server->remaining = slapi_entry_attr_get_ulonglong(entries[i],
DNA_REMAINING);
+ server->remote_binddn = config_entry->remote_binddn;
+ server->remote_bindpw = config_entry->remote_bindpw;
+ server->remote_bind_method = slapi_entry_attr_get_charptr(entries[i],
+ DNA_REMOTE_BIND_METHOD);
+ server->remote_conn_prot = slapi_entry_attr_get_charptr(entries[i],
+ DNA_REMOTE_CONN_PROT);
/* validate the entry */
- if (!server->host || server->port == 0 || server->remaining == 0) {
+ if (!server->host || (server->port == 0 && server->secureport == 0) || server->remaining == 0)
+ {
/* free and skip this one */
slapi_log_error(SLAPI_LOG_PLUGIN, DNA_PLUGIN_SUBSYSTEM,
"dna_get_shared_servers: skipping invalid "
@@ -1500,6 +1590,40 @@ dna_get_shared_servers(struct configEntry *config_entry, PRCList **servers)
dna_free_shared_server(&server);
continue;
}
+ /* see if we defined a server manually */
+ if(server->remote_bind_method){
+ char *reason;
+ int err = 0;
+
+ if(strcasecmp(server->remote_bind_method, DNA_METHOD_DIGESTMD5) == 0 ||
+ strcasecmp(server->remote_bind_method, DNA_METHOD_SIMPLE) == 0){
+ /* requires a DN and password */
+ if(!server->remote_binddn || !server->remote_bindpw){
+ reason = "missing bind DN and/or password.";
+ err = 1;
+ }
+ }
+ if(strcasecmp(server->remote_bind_method, DNA_METHOD_SSL) == 0){
+ /* requires a bind DN */
+ if(strcasecmp(server->remote_conn_prot, DNA_PROT_SSL) != 0 &&
+ strcasecmp(server->remote_conn_prot, DNA_PROT_TLS) != 0 )
+ {
+ reason = "bind method (SSL) requires either SSL or TLS connection "
+ "protocol.";
+ err = 1;
+ }
+ }
+ if(err){
+ slapi_log_error(SLAPI_LOG_PLUGIN, DNA_PLUGIN_SUBSYSTEM,
+ "dna_get_shared_servers: skipping invalid "
+ "shared config entry (%s). Reason: %s\n",
+ slapi_entry_get_dn(entries[i]), reason);
+ dna_free_shared_server(&server);
+ continue;
+ }
+ /* everything is ok */
+ server->remote_defined = 1;
+ }
/* add a server entry to the list */
if (*servers == NULL) {
@@ -2487,7 +2611,8 @@ static int dna_get_replica_bind_creds(char *range_dn, struct dnaServer *server,
slapi_log_error(SLAPI_LOG_FATAL, DNA_PLUGIN_SUBSYSTEM,
"dna_get_replica_bind_creds: Failed to fetch replica "
"bind credentials for range %s, server %s, port %u [error %d]\n",
- range_dn, server->host, server->port, ret);
+ range_dn, server->host,
+ server->port ? server->port : server->secureport, ret);
goto bail;
}
@@ -2495,10 +2620,18 @@ static int dna_get_replica_bind_creds(char *range_dn, struct dnaServer *server,
&entries);
if (NULL == entries || NULL == entries[0]) {
- slapi_log_error(SLAPI_LOG_FATAL, DNA_PLUGIN_SUBSYSTEM,
+ if(server->remote_defined){
+ /*
+ * Ok there are no replication agreements for this shared server, but we
+ * do have custom defined authentication settings we can use.
+ */
+ ret = dna_get_remote_config_info(server, bind_dn, bind_passwd, bind_method, is_ssl, port);
+ goto bail;
+ }
+ slapi_log_error(SLAPI_LOG_PLUGIN, DNA_PLUGIN_SUBSYSTEM,
"dna_get_replica_bind_creds: Failed to fetch replication "
"agreement for range %s, server %s, port %u\n", range_dn,
- server->host, server->port);
+ server->host, server->port ? server->port : server->secureport);
ret = LDAP_OPERATIONS_ERROR;
goto bail;
}
@@ -2577,6 +2710,92 @@ bail:
return ret;
}
+static int
+dna_get_remote_config_info( struct dnaServer *server, char **bind_dn, char **bind_passwd,
+ char **bind_method, int *is_ssl, int *port)
+{
+ int rc = 0;
+
+ /* populate the bind info */
+ slapi_ch_free_string(bind_dn);
+ slapi_ch_free_string(bind_method);
+ *bind_dn = slapi_ch_strdup(server->remote_binddn);
+ *bind_method = slapi_ch_strdup(server->remote_bind_method);
+ /* fix up the bind method */
+ if ((NULL == *bind_method) || (strcasecmp(*bind_method, DNA_METHOD_SIMPLE) == 0)) {
+ slapi_ch_free_string(bind_method);
+ *bind_method = slapi_ch_strdup(LDAP_SASL_SIMPLE);
+ } else if (strcasecmp(*bind_method, "SSLCLIENTAUTH") == 0) {
+ slapi_ch_free_string(bind_method);
+ *bind_method = slapi_ch_strdup(LDAP_SASL_EXTERNAL);
+ } else if (strcasecmp(*bind_method, DNA_METHOD_GSSAPI) == 0) {
+ slapi_ch_free_string(bind_method);
+ *bind_method = slapi_ch_strdup("GSSAPI");
+ } else if (strcasecmp(*bind_method, DNA_METHOD_DIGESTMD5) == 0) {
+ slapi_ch_free_string(bind_method);
+ *bind_method = slapi_ch_strdup("DIGEST-MD5");
+ } else { /* some other weird value */
+ ; /* just use it directly */
+ }
+
+ if(server->remote_conn_prot && strcasecmp(server->remote_conn_prot, DNA_PROT_SSL) == 0){
+ *is_ssl = 1;
+ } else if(server->remote_conn_prot && strcasecmp(server->remote_conn_prot, DNA_PROT_TLS) == 0){
+ *is_ssl = 2;
+ } else {
+ *is_ssl = 0;
+ }
+ if(*is_ssl == 1){ /* SSL(covers TLS over ssl) */
+ if (server->secureport){
+ *port = server->secureport;
+ } else {
+ slapi_log_error(SLAPI_LOG_FATAL, DNA_PLUGIN_SUBSYSTEM,
+ "dna_get_remote_config_info: Using SSL protocol, but the secure "
+ "port is not defined.\n");
+ return -1;
+ }
+ } else { /* LDAP/TLS(non secure port) */
+ if(server->port){
+ *port = server->port;
+ } else {
+ slapi_log_error(SLAPI_LOG_FATAL, DNA_PLUGIN_SUBSYSTEM,
+ "dna_get_remote_config_info: Using %s protocol, but the non-secure "
+ "port is not defined.\n", server->remote_conn_prot);
+ return -1;
+ }
+ }
+
+ /* Decode the password */
+ if (server->remote_bindpw) {
+ char *bind_cred = slapi_ch_strdup(server->remote_bindpw);
+ int pw_ret = 0;
+
+ slapi_ch_free_string(bind_passwd);
+ pw_ret = pw_rever_decode(bind_cred, bind_passwd, DNA_REPL_CREDS);
+
+ if (pw_ret == -1) {
+ slapi_log_error(SLAPI_LOG_FATAL, DNA_PLUGIN_SUBSYSTEM,
+ "dna_get_remote_config_info: Failed to decode "
+ "replica bind credentials for server %s, "
+ "port %u\n", server->host,
+ server->port ? server->port : server->secureport);
+ rc = -1;
+ } else if (pw_ret != 0) {
+ /*
+ * The password was already in clear text, so pw_rever_decode
+ * simply set bind_passwd to bind_cred. Set bind_cred to NULL
+ * to prevent a double free. The memory is now owned by
+ * bind_passwd, which is the callers responsibility to free.
+ */
+ bind_cred = NULL;
+ }
+ slapi_ch_free_string(&bind_cred);
+ }
+
+ return rc;
+}
+
+
/*
* dna_list_contains_type()
*
@@ -3313,7 +3532,7 @@ dna_pre_op(Slapi_PBlock * pb, int modtype)
* here at the pre-op stage. Applying the config
* needs to be done at the post-op stage. */
- if (dna_parse_config_entry(test_e, 0) != DNA_SUCCESS) {
+ if (dna_parse_config_entry(pb, test_e, 0) != DNA_SUCCESS) {
/* Refuse the operation if config parsing failed. */
ret = LDAP_UNWILLING_TO_PERFORM;
if (LDAP_CHANGETYPE_ADD == modtype) {
@@ -3683,7 +3902,7 @@ static int dna_config_check_post_op(Slapi_PBlock * pb)
if (!slapi_op_internal(pb)) { /* If internal, no need to check. */
if ((dn = dna_get_dn(pb))) {
if (dna_dn_is_config(dn)) {
- dna_load_plugin_config(0);
+ dna_load_plugin_config(pb, 0);
}
}
}
| 0 |
5fded8ec52bc6f8e6d381efe5268c4a174973b30
|
389ds/389-ds-base
|
Implement SASL I/O as an NSPR I/O layer
This is part of the port to OpenLDAP, to simplify the code that
interacts with the BER I/O layer. Ideally, since we only deal
with NSPR I/O, not raw I/O, in the directory server, we can push
any additional layers, such as SASL, as NSPR I/O layers. This
is how NSS works, to push the SSL codec layer on top of the regular
NSPR network I/O layer.
Only 3 functions are implemented - PR_Send (sasl_io_send), PR_Recv
(sasl_io_recv), and PR_Write (sasl_io_write).
This simplified the code in saslbind.c and connection.c, and removed
special handling for SASL connections - now they are just treated as
regular NSPR connections - the app has not nor does not need to know
the connection is a SASL connection.
In addition, this gives us the ability to use SASL and SSL at the same
time. The SASL I/O layer can be pushed on top of the SSL layer, so
that we can use SSL for connection encryption, and SASL for authentication,
without having to worry about mixing the two.
Reviewed by: nkinder (Thanks!)
Platforms tested: RHEL5 x86_64, Fedora 9 x86_64
|
commit 5fded8ec52bc6f8e6d381efe5268c4a174973b30
Author: Rich Megginson <[email protected]>
Date: Fri Jun 5 14:16:48 2009 -0600
Implement SASL I/O as an NSPR I/O layer
This is part of the port to OpenLDAP, to simplify the code that
interacts with the BER I/O layer. Ideally, since we only deal
with NSPR I/O, not raw I/O, in the directory server, we can push
any additional layers, such as SASL, as NSPR I/O layers. This
is how NSS works, to push the SSL codec layer on top of the regular
NSPR network I/O layer.
Only 3 functions are implemented - PR_Send (sasl_io_send), PR_Recv
(sasl_io_recv), and PR_Write (sasl_io_write).
This simplified the code in saslbind.c and connection.c, and removed
special handling for SASL connections - now they are just treated as
regular NSPR connections - the app has not nor does not need to know
the connection is a SASL connection.
In addition, this gives us the ability to use SASL and SSL at the same
time. The SASL I/O layer can be pushed on top of the SSL layer, so
that we can use SSL for connection encryption, and SASL for authentication,
without having to worry about mixing the two.
Reviewed by: nkinder (Thanks!)
Platforms tested: RHEL5 x86_64, Fedora 9 x86_64
diff --git a/ldap/servers/slapd/connection.c b/ldap/servers/slapd/connection.c
index 421b3e52b..290c08d37 100644
--- a/ldap/servers/slapd/connection.c
+++ b/ldap/servers/slapd/connection.c
@@ -1674,18 +1674,10 @@ static int
connection_read_ldap_data(Connection *conn, PRInt32 *err)
{
int ret = 0;
- /* Is SASL encryption enabled on this connection ? */
- if (conn->c_sasl_io) {
- /* If so, call the SASL I/O layer */
- ret = sasl_recv_connection(conn,conn->c_private->c_buffer, conn->c_private->c_buffer_size,err);
- } else
- {
- /* Otherwise, just call PRRecv() */
- ret = PR_Recv(conn->c_prfd,conn->c_private->c_buffer,conn->c_private->c_buffer_size,0,PR_INTERVAL_NO_WAIT);
- if (ret < 0) {
- *err = PR_GetError();
- }
- }
+ ret = PR_Recv(conn->c_prfd,conn->c_private->c_buffer,conn->c_private->c_buffer_size,0,PR_INTERVAL_NO_WAIT);
+ if (ret < 0) {
+ *err = PR_GetError();
+ }
return ret;
}
@@ -1718,17 +1710,6 @@ int connection_read_operation(Connection *conn, Operation *op, ber_tag_t *tag, i
(conn->c_flags & CONN_FLAG_CLOSING) ) {
return CONN_DONE;
}
-
- /* See if we should enable SASL I/O for this connection */
- if (conn->c_enable_sasl_io) {
- ret = sasl_io_setup(conn);
- if (ret) {
- LDAPDebug( LDAP_DEBUG_ANY,
- "conn=%" NSPRIu64 " unable to enable SASL I/O\n", conn->c_connid, 0, 0 );
- disconnect_server( conn, conn->c_connid, -1, SLAPD_DISCONNECT_BAD_BER_TAG, EPROTO );
- return CONN_DONE;
- }
- }
*tag = LBER_DEFAULT;
/* First check to see if we have buffered data from "before" */
diff --git a/ldap/servers/slapd/fe.h b/ldap/servers/slapd/fe.h
index ddc85b66d..b932f4509 100644
--- a/ldap/servers/slapd/fe.h
+++ b/ldap/servers/slapd/fe.h
@@ -186,10 +186,7 @@ void configure_ns_socket( int * ns );
/*
* sasl_io.c
*/
-int sasl_read_function(int ignore, void *buffer, int count, struct lextiof_socket_private *handle );
-int sasl_write_function(int ignore, const void *buffer, int count, struct lextiof_socket_private *handle );
int sasl_io_enable(Connection *c);
-int sasl_recv_connection(Connection *c, char *buffer, size_t count,PRInt32 *err);
/*
* sasl_map.c
diff --git a/ldap/servers/slapd/proto-slap.h b/ldap/servers/slapd/proto-slap.h
index edd66d083..7955e86a6 100644
--- a/ldap/servers/slapd/proto-slap.h
+++ b/ldap/servers/slapd/proto-slap.h
@@ -1301,7 +1301,6 @@ int slapd_ldap_sasl_interactive_bind(
/*
* sasl_io.c
*/
-/* This function should be called under the connection mutex */
int sasl_io_setup(Connection *c);
/*
diff --git a/ldap/servers/slapd/sasl_io.c b/ldap/servers/slapd/sasl_io.c
index e65e60b3d..c6ecf4b38 100644
--- a/ldap/servers/slapd/sasl_io.c
+++ b/ldap/servers/slapd/sasl_io.c
@@ -62,9 +62,7 @@
* a full packet.
*/
-struct _sasl_io_private {
- struct lextiof_socket_private *real_handle;
- struct lber_x_ext_io_fns *real_iofns;
+struct PRFilePrivate {
char *decrypted_buffer;
size_t decrypted_buffer_size;
size_t decrypted_buffer_count;
@@ -73,20 +71,69 @@ struct _sasl_io_private {
size_t encrypted_buffer_size;
size_t encrypted_buffer_count;
size_t encrypted_buffer_offset;
- Connection *conn;
+ Connection *conn; /* needed for connid and sasl_conn context */
+ PRBool send_encrypted; /* can only send encrypted data after the first read -
+ that is, we cannot send back an encrypted response
+ to the bind request that established the sasl io */
+
};
-int
-sasl_io_enable(Connection *c)
+typedef PRFilePrivate sasl_io_private;
+
+static PRInt32 PR_CALLBACK
+sasl_io_recv(PRFileDesc *fd, void *buf, PRInt32 len, PRIntn flags,
+ PRIntervalTime timeout);
+
+static void
+debug_print_layers(PRFileDesc *fd)
{
- int ret = 0;
+#if 0
+ PR_ASSERT(fd->higher == NULL); /* this is the topmost layer */
+ while (fd) {
+ PRSocketOptionData sod;
+ PRInt32 err;
- LDAPDebug( LDAP_DEBUG_CONNS,
- "sasl_io_enable for connection %" NSPRIu64 "\n", c->c_connid, 0, 0 );
- /* Flag that we should enable SASL I/O for the next read operation on this connection */
- c->c_enable_sasl_io = 1;
-
- return ret;
+ LDAPDebug2Args( LDAP_DEBUG_CONNS,
+ "debug_print_layers: fd %d sasl_io_recv = %p\n",
+ PR_FileDesc2NativeHandle(fd), sasl_io_recv );
+ LDAPDebug( LDAP_DEBUG_CONNS,
+ "debug_print_layers: fd name %s type = %d recv = %p\n",
+ PR_GetNameForIdentity(fd->identity),
+ PR_GetDescType(fd),
+ fd->methods->recv ? fd->methods->recv : NULL );
+ sod.option = PR_SockOpt_Nonblocking;
+ if (PR_FAILURE == PR_GetSocketOption(fd, &sod)) {
+ err = PR_GetError();
+ LDAPDebug2Args( LDAP_DEBUG_CONNS,
+ "debug_print_layers: error getting nonblocking option: %d %s\n",
+ err, slapd_pr_strerror(err) );
+ } else {
+ LDAPDebug1Arg( LDAP_DEBUG_CONNS,
+ "debug_print_layers: non blocking %d\n", sod.value.non_blocking );
+ }
+ sod.option = PR_SockOpt_Reuseaddr;
+ if (PR_FAILURE == PR_GetSocketOption(fd, &sod)) {
+ err = PR_GetError();
+ LDAPDebug2Args( LDAP_DEBUG_CONNS,
+ "debug_print_layers: error getting reuseaddr option: %d %s\n",
+ err, slapd_pr_strerror(err) );
+ } else {
+ LDAPDebug1Arg( LDAP_DEBUG_CONNS,
+ "debug_print_layers: reuseaddr %d\n", sod.value.reuse_addr );
+ }
+ sod.option = PR_SockOpt_RecvBufferSize;
+ if (PR_FAILURE == PR_GetSocketOption(fd, &sod)) {
+ err = PR_GetError();
+ LDAPDebug2Args( LDAP_DEBUG_CONNS,
+ "debug_print_layers: error getting recvbuffer option: %d %s\n",
+ err, slapd_pr_strerror(err) );
+ } else {
+ LDAPDebug1Arg( LDAP_DEBUG_CONNS,
+ "debug_print_layers: recvbuffer %d\n", sod.value.recv_buffer_size );
+ }
+ fd = fd->lower;
+ }
+#endif
}
static void
@@ -98,68 +145,6 @@ sasl_io_init_buffers(sasl_io_private *sp)
sp->encrypted_buffer_size = SASL_IO_BUFFER_SIZE;
}
-/* This function should be called under the connection mutex */
-int
-sasl_io_setup(Connection *c)
-{
- int ret = 0;
- struct lber_x_ext_io_fns func_pointers = {0};
- struct lber_x_ext_io_fns *real_iofns = (struct lber_x_ext_io_fns *) slapi_ch_malloc(LBER_X_EXTIO_FNS_SIZE);
- sasl_io_private *sp = (sasl_io_private*) slapi_ch_calloc(1, sizeof(sasl_io_private));
-
- LDAPDebug( LDAP_DEBUG_CONNS,
- "sasl_io_setup for connection %" NSPRIu64 "\n", c->c_connid, 0, 0 );
- /* Get the current functions and store them for later */
- real_iofns->lbextiofn_size = LBER_X_EXTIO_FNS_SIZE;
- ber_sockbuf_get_option( c->c_sb, LBER_SOCKBUF_OPT_EXT_IO_FNS, real_iofns );
- sp->real_iofns = real_iofns; /* released in sasl_io_cleanup */
-
- /* Set up the private structure */
- sp->real_handle = (struct lextiof_socket_private*) c->c_prfd;
- sp->conn = c;
- /* Store the private structure in the connection */
- c->c_sasl_io_private = sp;
- /* Insert the sasl i/o functions into the ber layer */
- func_pointers.lbextiofn_size = LBER_X_EXTIO_FNS_SIZE;
- func_pointers.lbextiofn_read = sasl_read_function;
- func_pointers.lbextiofn_write = sasl_write_function;
- func_pointers.lbextiofn_writev = NULL;
- func_pointers.lbextiofn_socket_arg = (struct lextiof_socket_private *) sp;
- ret = ber_sockbuf_set_option( c->c_sb, LBER_SOCKBUF_OPT_EXT_IO_FNS, &func_pointers);
- /* Setup the data buffers for the fast read path */
- sasl_io_init_buffers(sp);
- /* Reset the enable flag, so we don't process it again */
- c->c_enable_sasl_io = 0;
- /* Mark the connection as having SASL I/O */
- c->c_sasl_io = 1;
- return ret;
-}
-
-int
-sasl_io_cleanup(Connection *c)
-{
- int ret = 0;
- sasl_io_private *sp = c->c_sasl_io_private;
- if (sp) {
- LDAPDebug( LDAP_DEBUG_CONNS,
- "sasl_io_cleanup for connection %" NSPRIu64 "\n", c->c_connid, 0, 0 );
- /* Free the buffers */
- slapi_ch_free((void**)&(sp->encrypted_buffer));
- slapi_ch_free((void**)&(sp->decrypted_buffer));
- /* Put the I/O functions back how they were */
- if (NULL != sp->real_iofns) {
- ber_sockbuf_set_option( c->c_sb, LBER_SOCKBUF_OPT_EXT_IO_FNS, sp->real_iofns );
- slapi_ch_free((void**)&(sp->real_iofns));
- }
- slapi_ch_free((void**)&sp);
- c->c_sasl_io_private = NULL;
- c->c_enable_sasl_io = 0;
- c->c_sasl_io = 0;
- c->c_sasl_ssf = 0;
- }
- return ret;
-}
-
static void sasl_io_resize_encrypted_buffer(sasl_io_private *sp, size_t requested_size)
{
@@ -189,24 +174,59 @@ sasl_io_finished_packet(sasl_io_private *sp)
return (sp->encrypted_buffer_count && (sp->encrypted_buffer_offset == sp->encrypted_buffer_count) );
}
-static int
-sasl_io_start_packet(Connection *c, PRInt32 *err)
+static const char* const sasl_LayerName = "SASL";
+static PRDescIdentity sasl_LayerID;
+static PRIOMethods sasl_IoMethods;
+static PRCallOnceType sasl_callOnce = {0,0};
+
+static sasl_io_private *
+sasl_get_io_private(PRFileDesc *fd)
{
- int ret = 0;
+ sasl_io_private *sp;
+
+ PR_ASSERT(fd != NULL);
+ PR_ASSERT(fd->methods->file_type == PR_DESC_LAYERED);
+ PR_ASSERT(fd->identity == sasl_LayerID);
+
+ sp = (sasl_io_private *)fd->secret;
+ return sp;
+}
+
+static PRInt32
+sasl_io_start_packet(PRFileDesc *fd, PRIntn flags, PRIntervalTime timeout, PRInt32 *err)
+{
+ PRInt32 ret = 0;
unsigned char buffer[4];
size_t packet_length = 0;
size_t saslio_limit;
-
- ret = PR_Recv(c->c_prfd,buffer,sizeof(buffer),0,PR_INTERVAL_NO_WAIT);
- if (ret < 0) {
+ sasl_io_private *sp = sasl_get_io_private(fd);
+ Connection *c = sp->conn;
+
+ *err = 0;
+ debug_print_layers(fd);
+ /* first we need the length bytes */
+ ret = PR_Recv(fd->lower, buffer, sizeof(buffer), flags, timeout);
+ LDAPDebug( LDAP_DEBUG_CONNS,
+ "read sasl packet length returned %d on connection %" NSPRIu64 "\n", ret, c->c_connid, 0 );
+ if (ret <= 0) {
*err = PR_GetError();
- return -1;
+ LDAPDebug( LDAP_DEBUG_ANY,
+ "sasl_io_start_packet: error reading sasl packet length on connection %" NSPRIu64 " %d:%s\n", c->c_connid, *err, slapd_pr_strerror(*err) );
+ return PR_FAILURE;
}
+ /*
+ * NOTE: A better way to do this would be to read the bytes and add them to
+ * sp->encrypted_buffer - if offset < 4, tell caller we didn't read enough
+ * bytes yet - if offset >= 4, decode the length and proceed. However, it
+ * is highly unlikely that a request to read 4 bytes will return < 4 bytes,
+ * perhaps only in error conditions, in which case the ret < 0 case above
+ * will run
+ */
if (ret != 0 && ret < sizeof(buffer)) {
LDAPDebug( LDAP_DEBUG_ANY,
- "failed to read sasl packet length on connection %" NSPRIu64 "\n", c->c_connid, 0, 0 );
- return -1;
-
+ "sasl_io_start_packet: failed - read only %d bytes of sasl packet length on connection %" NSPRIu64 "\n", ret, c->c_connid, 0 );
+ PR_SetError(PR_IO_ERROR, 0);
+ return PR_FAILURE;
}
if (ret == sizeof(buffer)) {
/* Decode the length (could use ntohl here ??) */
@@ -215,7 +235,7 @@ sasl_io_start_packet(Connection *c, PRInt32 *err)
packet_length += 4;
LDAPDebug( LDAP_DEBUG_CONNS,
- "read sasl packet length %ld on connection %" NSPRIu64 "\n", packet_length, c->c_connid, 0 );
+ "read sasl packet length %ld on connection %" NSPRIu64 "\n", packet_length, c->c_connid, 0 );
/* Check if the packet length is larger than our max allowed. A
* setting of -1 means that we allow any size SASL IO packet. */
@@ -225,59 +245,66 @@ sasl_io_start_packet(Connection *c, PRInt32 *err)
"SASL encrypted packet length exceeds maximum allowed limit (length=%ld, limit=%ld)."
" Change the nsslapd-maxsasliosize attribute in cn=config to increase limit.\n",
packet_length, config_get_maxsasliosize(), 0);
- return -1;
+ PR_SetError(PR_BUFFER_OVERFLOW_ERROR, 0);
+ *err = PR_BUFFER_OVERFLOW_ERROR;
+ return PR_FAILURE;
}
- sasl_io_resize_encrypted_buffer(c->c_sasl_io_private, packet_length);
+ sasl_io_resize_encrypted_buffer(sp, packet_length);
/* Cyrus SASL implementation expects to have the length at the first
4 bytes */
- memcpy(c->c_sasl_io_private->encrypted_buffer, buffer, 4);
- c->c_sasl_io_private->encrypted_buffer_count = packet_length;
- c->c_sasl_io_private->encrypted_buffer_offset = 4;
+ memcpy(sp->encrypted_buffer, buffer, 4);
+ sp->encrypted_buffer_count = packet_length;
+ sp->encrypted_buffer_offset = 4;
}
- return 0;
+
+ return PR_SUCCESS;
}
-static int
-sasl_io_read_packet(Connection *c, PRInt32 *err)
+
+static PRInt32
+sasl_io_read_packet(PRFileDesc *fd, PRIntn flags, PRIntervalTime timeout, PRInt32 *err)
{
PRInt32 ret = 0;
- sasl_io_private *sp = c->c_sasl_io_private;
+ sasl_io_private *sp = sasl_get_io_private(fd);
+ Connection *c = sp->conn;
size_t bytes_remaining_to_read = sp->encrypted_buffer_count - sp->encrypted_buffer_offset;
- ret = PR_Recv(c->c_prfd,sp->encrypted_buffer + sp->encrypted_buffer_offset,bytes_remaining_to_read,0,PR_INTERVAL_NO_WAIT);
+ LDAPDebug( LDAP_DEBUG_CONNS,
+ "sasl_io_read_packet: reading %d bytes for connection %" NSPRIu64 "\n",
+ bytes_remaining_to_read,
+ c->c_connid, 0 );
+ ret = PR_Recv(fd->lower, sp->encrypted_buffer + sp->encrypted_buffer_offset, bytes_remaining_to_read, flags, timeout);
if (ret < 0) {
*err = PR_GetError();
- return -1;
- }
- if (ret > 0) {
- sp->encrypted_buffer_offset += ret;
+ LDAPDebug( LDAP_DEBUG_ANY,
+ "sasl_io_read_packet: error reading sasl packet on connection %" NSPRIu64 " %d:%s\n", c->c_connid, *err, slapd_pr_strerror(*err) );
+ return PR_FAILURE;
}
+ sp->encrypted_buffer_offset += ret;
return ret;
}
-/* Special recv function for the server connection code */
-/* Here, we return bytes to the caller, either the bytes
- remaining in the decrypted data buffer, from 'before',
- or the number of bytes we get decrypted from sasl,
- or the requested number of bytes whichever is lower.
- */
-int
-sasl_recv_connection(Connection *c, char *buffer, size_t count,PRInt32 *err)
+static PRInt32 PR_CALLBACK
+sasl_io_recv(PRFileDesc *fd, void *buf, PRInt32 len, PRIntn flags,
+ PRIntervalTime timeout)
{
- int ret = 0;
+ sasl_io_private *sp = sasl_get_io_private(fd);
+ Connection *c = sp->conn;
+ PRInt32 ret = 0;
size_t bytes_in_buffer = 0;
- sasl_io_private *sp = c->c_sasl_io_private;
+ PRInt32 err = 0;
- *err = 0;
- LDAPDebug( LDAP_DEBUG_CONNS,
- "sasl_recv_connection for connection %" NSPRIu64 "\n", c->c_connid, 0, 0 );
/* Do we have decrypted data buffered from 'before' ? */
bytes_in_buffer = sp->decrypted_buffer_count - sp->decrypted_buffer_offset;
+ LDAPDebug( LDAP_DEBUG_CONNS,
+ "sasl_io_recv for connection %" NSPRIu64 " len %d bytes_in_buffer %d\n", c->c_connid, len, bytes_in_buffer );
+ LDAPDebug( LDAP_DEBUG_CONNS,
+ "sasl_io_recv for connection %" NSPRIu64 " len %d encrypted buffer count %d\n", c->c_connid, len, sp->encrypted_buffer_count );
if (0 == bytes_in_buffer) {
/* If there wasn't buffered decrypted data, we need to get some... */
if (!sasl_io_reading_packet(sp)) {
/* First read the packet length and so on */
- ret = sasl_io_start_packet(c, err);
+ ret = sasl_io_start_packet(fd, flags, timeout, &err);
if (0 != ret) {
/* Most likely the i/o timed out */
return ret;
@@ -286,23 +313,33 @@ sasl_recv_connection(Connection *c, char *buffer, size_t count,PRInt32 *err)
/* We now have the packet length
* we now must read more data off the wire until we have the complete packet
*/
- do {
- ret = sasl_io_read_packet(c,err);
- if (0 == ret || -1 == ret) {
- return ret;
- }
- } while (!sasl_io_finished_packet(sp));
- /* We are there. */
+ ret = sasl_io_read_packet(fd, flags, timeout, &err);
+ if (PR_FAILURE == ret) {
+ return ret; /* read packet will set pr error */
+ }
+ /* If we have not read the packet yet, we cannot return any decrypted data to the
+ * caller - so just tell the caller we don't have enough data yet
+ * this is equivalent to recv() returning EAGAIN on a non-blocking socket
+ * the caller must handle this condition and poll() or similar to know
+ * when more data arrives
+ */
+ if (!sasl_io_finished_packet(sp)) {
+ LDAPDebug( LDAP_DEBUG_CONNS,
+ "sasl_io_recv for connection %" NSPRIu64 " - not finished reading packet yet\n", c->c_connid, 0, 0 );
+ PR_SetError(PR_WOULD_BLOCK_ERROR, 0);
+ return PR_FAILURE;
+ }
+ /* We have the full encrypted buffer now - decrypt it */
{
const char *output_buffer = NULL;
unsigned int output_length = 0;
LDAPDebug( LDAP_DEBUG_CONNS,
- "sasl_recv_connection finished reading packet for connection %" NSPRIu64 "\n", c->c_connid, 0, 0 );
+ "sasl_io_recv finished reading packet for connection %" NSPRIu64 "\n", c->c_connid, 0, 0 );
/* Now decode it */
ret = sasl_decode(c->c_sasl_conn,sp->encrypted_buffer,sp->encrypted_buffer_count,&output_buffer,&output_length);
if (SASL_OK == ret) {
LDAPDebug( LDAP_DEBUG_CONNS,
- "sasl_recv_connection decoded packet length %d for connection %" NSPRIu64 "\n", output_length, c->c_connid, 0 );
+ "sasl_io_recv decoded packet length %d for connection %" NSPRIu64 "\n", output_length, c->c_connid, 0 );
if (output_length) {
sasl_io_resize_decrypted_buffer(sp,output_length);
memcpy(sp->decrypted_buffer,output_buffer,output_length);
@@ -310,88 +347,206 @@ sasl_recv_connection(Connection *c, char *buffer, size_t count,PRInt32 *err)
sp->decrypted_buffer_offset = 0;
sp->encrypted_buffer_offset = 0;
sp->encrypted_buffer_count = 0;
+ bytes_in_buffer = output_length;
}
} else {
LDAPDebug( LDAP_DEBUG_ANY,
- "sasl_recv_connection failed to decode packet for connection %" NSPRIu64 "\n", c->c_connid, 0, 0 );
+ "sasl_io_recv failed to decode packet for connection %" NSPRIu64 "\n", c->c_connid, 0, 0 );
+ PR_SetError(PR_IO_ERROR, 0);
+ return PR_FAILURE;
}
}
}
/* Finally, return data from the buffer to the caller */
{
size_t bytes_to_return = sp->decrypted_buffer_count - sp->decrypted_buffer_offset;
- if (bytes_to_return > count) {
- bytes_to_return = count;
+ if (bytes_to_return > len) {
+ bytes_to_return = len;
}
/* Copy data from the decrypted buffer starting at the offset */
- memcpy(buffer, sp->decrypted_buffer + sp->decrypted_buffer_offset, bytes_to_return);
+ memcpy(buf, sp->decrypted_buffer + sp->decrypted_buffer_offset, bytes_to_return);
if (bytes_in_buffer == bytes_to_return) {
sp->decrypted_buffer_offset = 0;
sp->decrypted_buffer_count = 0;
- } else {
- sp->decrypted_buffer_offset += bytes_to_return;
+ LDAPDebug( LDAP_DEBUG_CONNS,
+ "sasl_io_recv all decrypted data returned for connection %" NSPRIu64 "\n", c->c_connid, 0, 0 );
+ } else {
+ sp->decrypted_buffer_offset += bytes_to_return;
+ LDAPDebug( LDAP_DEBUG_CONNS,
+ "sasl_io_recv returning %d bytes to caller %d bytes left to return for connection %" NSPRIu64 "\n",
+ bytes_to_return,
+ sp->decrypted_buffer_count - sp->decrypted_buffer_offset,
+ c->c_connid );
}
ret = bytes_to_return;
}
+ if (ret > 0) {
+ /* we actually read something - we can now send encrypted data */
+ sp->send_encrypted = PR_TRUE;
+ }
return ret;
}
-
-int
-sasl_read_function(int ignore, void *buffer, int count, struct lextiof_socket_private *handle )
+
+PRInt32
+sasl_io_send(PRFileDesc *fd, const void *buf, PRInt32 amount,
+ PRIntn flags, PRIntervalTime timeout)
{
- int ret = 0;
- sasl_io_private *sp = (sasl_io_private*) handle;
-
- /* First we look to see if we have buffered data that we can return to the caller */
- if ( (NULL == sp->decrypted_buffer) || ((sp->decrypted_buffer_count - sp->decrypted_buffer_offset) <= 0) ) {
- /* If we didn't have buffered data, we need to perform I/O and decrypt */
- PRUint32 buffer_length = 0;
- /* Read the packet length */
- ret = read_function(0, &buffer_length, sizeof(buffer_length), sp->real_handle);
- if (ret) {
+ PRInt32 ret = 0;
+ sasl_io_private *sp = sasl_get_io_private(fd);
+ Connection *c = sp->conn;
+ const char *crypt_buffer = NULL;
+ unsigned crypt_buffer_size = 0;
+
+ LDAPDebug( LDAP_DEBUG_CONNS,
+ "sasl_io_send writing %d bytes\n", amount, 0, 0 );
+ if (sp->send_encrypted) {
+ /* Get SASL to encrypt the buffer */
+ ret = sasl_encode(c->c_sasl_conn, buf, amount, &crypt_buffer, &crypt_buffer_size);
+ LDAPDebug( LDAP_DEBUG_CONNS,
+ "sasl_io_send encoded as %d bytes\n", crypt_buffer_size, 0, 0 );
+ ret = PR_Send(fd->lower, crypt_buffer, crypt_buffer_size, flags, timeout);
+ /* we need to return the amount of cleartext sent */
+ if (ret == crypt_buffer_size) {
+ ret = amount; /* sent amount of data requested by caller */
+ } else if (ret > 0) { /* could not send the entire encrypted buffer - error */
+ LDAPDebug( LDAP_DEBUG_CONNS,
+ "sasl_io_send error: only sent %d of %d encoded bytes\n", ret, crypt_buffer_size, 0 );
+ ret = PR_FAILURE;
+ PR_SetError(PR_IO_ERROR, 0);
}
- /* Read the payload */
- ret = read_function(0, sp->encrypted_buffer, buffer_length, sp->real_handle);
- if (ret) {
+ /* else - ret is error */
+ } else {
+ ret = PR_Send(fd->lower, buf, amount, flags, timeout);
+ }
+
+ return ret;
+}
+
+/*
+ * Need to handle cases where caller uses PR_Write instead of
+ * PR_Send on the network socket
+ */
+static PRInt32 PR_CALLBACK
+sasl_io_write(PRFileDesc *fd, const void *buf, PRInt32 amount)
+{
+ return sasl_io_send(fd, buf, amount, 0, PR_INTERVAL_NO_TIMEOUT);
+}
+
+static PRStatus PR_CALLBACK
+sasl_pop_IO_layer(PRFileDesc* stack)
+{
+ PRFileDesc* layer = PR_PopIOLayer(stack, sasl_LayerID);
+ sasl_io_private *sp = NULL;
+
+ if (!layer) {
+ LDAPDebug0Args( LDAP_DEBUG_CONNS,
+ "sasl_pop_IO_layer: error - no SASL IO layer\n" );
+ return PR_FAILURE;
+ }
+
+ sp = sasl_get_io_private(layer);
+
+ if (sp) {
+ LDAPDebug0Args( LDAP_DEBUG_CONNS,
+ "sasl_pop_IO_layer: removing SASL IO layer\n" );
+ /* Free the buffers */
+ slapi_ch_free_string(&sp->encrypted_buffer);
+ slapi_ch_free_string(&sp->decrypted_buffer);
+ slapi_ch_free((void**)&sp);
+ }
+ layer->secret = NULL;
+ if (layer->dtor) {
+ layer->dtor(layer);
+ }
+
+ return PR_SUCCESS;
+}
+
+static PRStatus PR_CALLBACK
+closeLayer(PRFileDesc* stack)
+{
+ LDAPDebug0Args( LDAP_DEBUG_CONNS,
+ "closeLayer: closing SASL IO layer\n" );
+ if (PR_FAILURE == sasl_pop_IO_layer(stack)) {
+ LDAPDebug0Args( LDAP_DEBUG_CONNS,
+ "closeLayer: error closing SASL IO layer\n" );
+ return PR_FAILURE;
+ }
+
+ LDAPDebug0Args( LDAP_DEBUG_CONNS,
+ "closeLayer: calling PR_Close to close other layers\n" );
+ return PR_Close(stack);
+}
+
+static PRStatus PR_CALLBACK
+initialize(void)
+{
+ sasl_LayerID = PR_GetUniqueIdentity(sasl_LayerName);
+ if (PR_INVALID_IO_LAYER == sasl_LayerID) {
+ return PR_FAILURE;
+ } else {
+ const PRIOMethods* defaults = PR_GetDefaultIOMethods();
+ if (!defaults) {
+ return PR_FAILURE;
+ } else {
+ memcpy(&sasl_IoMethods, defaults, sizeof(sasl_IoMethods));
}
- /* Now we can call sasl to decrypt */
- /* ret = sasl_decode(sp->conn->c_sasl_conn,sp->encrypted_buffer, buffer_length, sp->decrypted_buffer, &sp->decrypted_buffer_count ); */
}
- /* If things went well, copy the payload for the caller */
- if ( 0 == ret ) {
-/* size_t real_count = 0;
+ /* Customize methods: */
+ sasl_IoMethods.recv = sasl_io_recv;
+ sasl_IoMethods.send = sasl_io_send;
+ sasl_IoMethods.close = closeLayer;
+ sasl_IoMethods.write = sasl_io_write; /* some code uses PR_Write instead of PR_Send */
+ return PR_SUCCESS;
+}
+
+/*
+ * Push the SASL I/O layer on top of the current NSPR I/O layer of the prfd used
+ * by the connection.
+ */
+int
+sasl_io_enable(Connection *c)
+{
+ PRStatus rv = PR_CallOnce(&sasl_callOnce, initialize);
+ if (PR_SUCCESS == rv) {
+ PRFileDesc* layer = PR_CreateIOLayerStub(sasl_LayerID, &sasl_IoMethods);
+ sasl_io_private *sp = (sasl_io_private*) slapi_ch_calloc(1, sizeof(sasl_io_private));
+
+ sasl_io_init_buffers(sp);
+ layer->secret = sp;
- if (count >= (sp->buffer_count - sp->buffer_offset) ) {
- real_count = count;
+ PR_Lock( c->c_mutex );
+ sp->conn = c;
+ rv = PR_PushIOLayer(c->c_prfd, PR_TOP_IO_LAYER, layer);
+ PR_Unlock( c->c_mutex );
+ if (rv) {
+ LDAPDebug( LDAP_DEBUG_ANY,
+ "sasl_io_enable: error enabling sasl io on connection %" NSPRIu64 " %d:%s\n", c->c_connid, rv, slapd_pr_strerror(rv) );
} else {
- real_count = (sp->buffer_count - sp->buffer_offset);
+ LDAPDebug( LDAP_DEBUG_CONNS,
+ "sasl_io_enable: enabled sasl io on connection %" NSPRIu64 " \n", c->c_connid, 0, 0 );
+ debug_print_layers(c->c_prfd);
}
- memcpy(buffer, sp->buffer, real_count);
- sp->buffer_offset += real_count; */
}
-
- return ret;
+ return (int)rv;
}
+/*
+ * Remove the SASL I/O layer from the top of the current NSPR I/O layer of the prfd used
+ * by the connection. Must either be called within the connection lock, or be
+ * called while the connection is not being referenced by another thread.
+ */
int
-sasl_write_function(int ignore, const void *buffer, int count, struct lextiof_socket_private *handle)
+sasl_io_cleanup(Connection *c)
{
int ret = 0;
- sasl_io_private *sp = (sasl_io_private*) handle;
- const char *crypt_buffer = NULL;
- unsigned crypt_buffer_size = 0;
LDAPDebug( LDAP_DEBUG_CONNS,
- "sasl_write_function writing %d bytes\n", count, 0, 0 );
- /* Get SASL to encrypt the buffer */
- ret = sasl_encode(sp->conn->c_sasl_conn, buffer, count, &crypt_buffer, &crypt_buffer_size);
- LDAPDebug( LDAP_DEBUG_CONNS,
- "sasl_write_function encoded as %d bytes\n", crypt_buffer_size, 0, 0 );
+ "sasl_io_cleanup for connection %" NSPRIu64 "\n", c->c_connid, 0, 0 );
+
+ ret = sasl_pop_IO_layer(c->c_prfd);
+
+ c->c_sasl_ssf = 0;
- ret = write_function(0, crypt_buffer, crypt_buffer_size, sp->real_handle);
- if (ret) {
- }
-
return ret;
}
-
diff --git a/ldap/servers/slapd/saslbind.c b/ldap/servers/slapd/saslbind.c
index fbd0b3dac..3bee6f8ca 100644
--- a/ldap/servers/slapd/saslbind.c
+++ b/ldap/servers/slapd/saslbind.c
@@ -910,24 +910,14 @@ void ids_sasl_check_bind(Slapi_PBlock *pb)
(const void**)&ssfp) == SASL_OK) && (*ssfp > 0)) {
LDAPDebug(LDAP_DEBUG_TRACE, "sasl ssf=%u\n", (unsigned)*ssfp, 0, 0);
- if (pb->pb_conn->c_flags & CONN_FLAG_SSL) {
+ /* Enable SASL I/O on the connection now */
+ if (0 != sasl_io_enable(pb->pb_conn) ) {
send_ldap_result(pb, LDAP_OPERATIONS_ERROR, NULL,
- "sasl encryption not supported over ssl",
- 0, NULL);
- if ( bind_target_entry != NULL )
- slapi_entry_free(bind_target_entry);
- break;
- } else {
- /* Enable SASL I/O on the connection now */
- /* Note that this doesn't go into effect until the next _read_ operation is done */
- if (0 != sasl_io_enable(pb->pb_conn) ) {
- send_ldap_result(pb, LDAP_OPERATIONS_ERROR, NULL,
"failed to enable sasl i/o",
0, NULL);
- }
+ }
/* Set the SSF in the connection */
pb->pb_conn->c_sasl_ssf = (unsigned)*ssfp;
- }
}
/* set the connection bind credentials */
diff --git a/ldap/servers/slapd/slap.h b/ldap/servers/slapd/slap.h
index ffcba46c5..fd2c7d734 100644
--- a/ldap/servers/slapd/slap.h
+++ b/ldap/servers/slapd/slap.h
@@ -1229,8 +1229,6 @@ typedef struct op {
struct Conn_Private;
typedef struct Conn_private Conn_private;
-struct _sasl_io_private;
-typedef struct _sasl_io_private sasl_io_private;
typedef struct conn {
Sockbuf *c_sb; /* ber connection stuff */
@@ -1271,9 +1269,6 @@ typedef struct conn {
Slapi_Backend *c_bi_backend; /* which backend is doing the import */
void *c_extension; /* plugins are able to extend the Connection object */
void *c_sasl_conn; /* sasl library connection sasl_conn_t */
- sasl_io_private *c_sasl_io_private; /* Private data for SASL I/O Layer */
- int c_enable_sasl_io; /* Flag to tell us to enable SASL I/O on the next read */
- int c_sasl_io; /* Flag to tell us to enable SASL I/O on the next read */
int c_sasl_ssf; /* flag to tell us the SASL SSF */
int c_unix_local; /* flag true for LDAPI */
int c_local_valid; /* flag true if the uid/gid are valid */
| 0 |
29a517d5a5c6fa913d2fea619d990e9274f3f3de
|
389ds/389-ds-base
|
bump version to 1.2.9.a1 - console version to 1.2.4
bump version to 1.2.9.a1 - console version to 1.2.4
|
commit 29a517d5a5c6fa913d2fea619d990e9274f3f3de
Author: Rich Megginson <[email protected]>
Date: Tue Feb 22 13:12:31 2011 -0700
bump version to 1.2.9.a1 - console version to 1.2.4
bump version to 1.2.9.a1 - console version to 1.2.4
diff --git a/VERSION.sh b/VERSION.sh
index c1b0bcaf4..e725b61f4 100644
--- a/VERSION.sh
+++ b/VERSION.sh
@@ -10,11 +10,11 @@ vendor="389 Project"
# PACKAGE_VERSION is constructed from these
VERSION_MAJOR=1
VERSION_MINOR=2
-VERSION_MAINT=8
+VERSION_MAINT=9
# if this is a PRERELEASE, set VERSION_PREREL
# otherwise, comment it out
# be sure to include the dot prefix in the prerel
-VERSION_PREREL=.a2
+VERSION_PREREL=.a1
# NOTES on VERSION_PREREL
# use aN for an alpha release e.g. a1, a2, etc.
# use rcN for a release candidate e.g. rc1, rc2, etc.
@@ -50,4 +50,4 @@ PACKAGE_BUGREPORT="${PACKAGE_BUGREPORT}enter_bug.cgi?product=$brand"
PACKAGE_STRING="$PACKAGE_TARNAME $PACKAGE_VERSION"
# the version of the ds console package that this directory server
# is compatible with
-CONSOLE_VERSION=$VERSION_MAJOR.$VERSION_MINOR.3
+CONSOLE_VERSION=$VERSION_MAJOR.$VERSION_MINOR.4
| 0 |
bd148df8221f798463e060c66a2b7311c0778353
|
389ds/389-ds-base
|
Bug 687974 - (cov#10715) Fix Coverity uninitialized variables issues
The ldif2ldbm code can return an uninitialized integer if it runs
into a problem starting the dblayer to get the next USN. This block
of code should use the ret variable to capture the result of starting
the dblayer to ensure it is set before it gets returned.
|
commit bd148df8221f798463e060c66a2b7311c0778353
Author: Nathan Kinder <[email protected]>
Date: Tue Mar 15 14:35:58 2011 -0700
Bug 687974 - (cov#10715) Fix Coverity uninitialized variables issues
The ldif2ldbm code can return an uninitialized integer if it runs
into a problem starting the dblayer to get the next USN. This block
of code should use the ret variable to capture the result of starting
the dblayer to ensure it is set before it gets returned.
diff --git a/ldap/servers/slapd/back-ldbm/ldif2ldbm.c b/ldap/servers/slapd/back-ldbm/ldif2ldbm.c
index fd7934ae9..cf0f5395d 100644
--- a/ldap/servers/slapd/back-ldbm/ldif2ldbm.c
+++ b/ldap/servers/slapd/back-ldbm/ldif2ldbm.c
@@ -708,21 +708,21 @@ int ldbm_back_ldif2ldbm( Slapi_PBlock *pb )
* initialize the USN counter to get the next USN */
if (plugin_enabled("USN", li->li_identity)) {
/* close immediately; no need to run db threads */
- int rc = dblayer_start(li,
+ ret = dblayer_start(li,
DBLAYER_NORMAL_MODE|DBLAYER_NO_DBTHREADS_MODE);
- if (rc) {
+ if (ret) {
LDAPDebug2Args(LDAP_DEBUG_ANY,
"ldbm_back_ldif2ldbm: dblayer_start failed! %s (%d)\n",
- dblayer_strerror(rc), rc);
+ dblayer_strerror(ret), ret);
goto fail;
}
/* initialize the USN counter */
ldbm_usn_init(li);
- rc = dblayer_close(li, DBLAYER_NORMAL_MODE);
- if (rc) {
+ ret = dblayer_close(li, DBLAYER_NORMAL_MODE);
+ if (ret != 0) {
LDAPDebug2Args(LDAP_DEBUG_ANY,
"ldbm_back_ldif2ldbm: dblayer_close failed! %s (%d)\n",
- dblayer_strerror(rc), rc);
+ dblayer_strerror(ret), ret);
}
}
| 0 |
13184ae13da8594b85efc7fff5317bbe2791c848
|
389ds/389-ds-base
|
Ticket #48048 - Fix coverity issues - 2015/2/24
Coverity defect 13050 - Resource leak
Description: Free s_attrName when returning for "Invalid syntax".
modified: DS_LASGroupDnAttrEval in acllas.c
|
commit 13184ae13da8594b85efc7fff5317bbe2791c848
Author: Noriko Hosoi <[email protected]>
Date: Tue Feb 24 17:11:20 2015 -0800
Ticket #48048 - Fix coverity issues - 2015/2/24
Coverity defect 13050 - Resource leak
Description: Free s_attrName when returning for "Invalid syntax".
modified: DS_LASGroupDnAttrEval in acllas.c
diff --git a/ldap/servers/plugins/acl/acllas.c b/ldap/servers/plugins/acl/acllas.c
index e3c1778fb..935e5df35 100644
--- a/ldap/servers/plugins/acl/acllas.c
+++ b/ldap/servers/plugins/acl/acllas.c
@@ -2586,7 +2586,6 @@ DS_LASGroupDnAttrEval(NSErr_t *errp, char *attr_name, CmpOp_t comparator,
PList_t subject, PList_t resource, PList_t auth_info,
PList_t global_auth)
{
-
char *s_attrName = NULL;
char *attrName;
char *ptr;
@@ -2731,6 +2730,7 @@ DS_LASGroupDnAttrEval(NSErr_t *errp, char *attr_name, CmpOp_t comparator,
slapi_log_error( SLAPI_LOG_FATAL, plugin_name,
"DS_LASGroupDnAttrEval: Invalid syntax: %s\n",
attrVal->bv_val );
+ slapi_ch_free_string(&s_attrName);
return 0;
}
matched = acllas__user_ismember_of_group (
@@ -2804,37 +2804,36 @@ DS_LASGroupDnAttrEval(NSErr_t *errp, char *attr_name, CmpOp_t comparator,
lasinfo.clientDn, ACLLAS_CACHE_ALL_GROUPS,
lasinfo.aclpb->aclpb_clientcert);
if (matched == ACL_TRUE) {
- break;
- } else if ( matched == ACL_DONT_KNOW ) {
- /* record this but keep going--maybe another group will evaluate to TRUE */
- got_undefined = 1;
- }
+ break;
+ } else if ( matched == ACL_DONT_KNOW ) {
+ /* record this but keep going--maybe another group will evaluate to TRUE */
+ got_undefined = 1;
+ }
}
/* Deallocate the member array and the member struct */
for (j=0; j < info.numofGroups; j++)
slapi_ch_free ((void **) &info.member[j]);
slapi_ch_free ((void **) &info.member);
- }
- if (matched == ACL_TRUE) {
+ }
+ if (matched == ACL_TRUE) {
slapi_log_error( SLAPI_LOG_ACL, plugin_name,
"groupdnattr matches at level (%d)\n", levels[i]);
break;
} else if ( matched == ACL_DONT_KNOW ) {
- /* record this but keep going--maybe another group at another level
+ /* record this but keep going--maybe another group at another level
* will evaluate to TRUE.
- */
- got_undefined = 1;
- }
-
+ */
+ got_undefined = 1;
+ }
} /* NumofLevels */
}
- if (s_attrName) slapi_ch_free ((void**) &s_attrName );
+ slapi_ch_free_string(&s_attrName);
/*
* If no terms were undefined, then evaluate as normal.
* If there was an undefined term, but another one was TRUE, then we also evaluate
* as normal. Otherwise, the whole expression is UNDEFINED.
- */
+ */
if ( matched == ACL_TRUE || !got_undefined ) {
if (comparator == CMP_OP_EQ) {
rc = (matched == ACL_TRUE ? LAS_EVAL_TRUE : LAS_EVAL_FALSE);
| 0 |
2de4dc57dc0cdd0e0daaa16c0e385a703693155f
|
389ds/389-ds-base
|
146294 - changed dsgw to be brand agnostic - [email protected]
|
commit 2de4dc57dc0cdd0e0daaa16c0e385a703693155f
Author: Nathan Kinder <[email protected]>
Date: Wed Jan 26 21:19:47 2005 +0000
146294 - changed dsgw to be brand agnostic - [email protected]
diff --git a/ldap/clients/dsgw/html/Makefile b/ldap/clients/dsgw/html/Makefile
index 6fe5280b7..8ce1b4777 100644
--- a/ldap/clients/dsgw/html/Makefile
+++ b/ldap/clients/dsgw/html/Makefile
@@ -25,7 +25,7 @@ HTMLDEST = $(DSGW_HTML_RELDIR)
HTML= auth.html authroot.html authtitle.html csearchtitle.html \
emptyFrame.html greeting.html index.html maintitle.html \
newentrytitle.html searchtitle.html style.css \
- transparent.gif back1.gif content1.gif netscape.gif \
+ transparent.gif back1.gif content1.gif rolodex.gif \
country.gif exit1.gif forward1.gif group.gif index1.gif \
left_bottom.gif left_on.gif right_off.gif \
left_off.gif right_bottom.gif right_on.gif \
diff --git a/ldap/clients/dsgw/html/auth.html b/ldap/clients/dsgw/html/auth.html
index e3fabe5da..aa59ef28f 100644
--- a/ldap/clients/dsgw/html/auth.html
+++ b/ldap/clients/dsgw/html/auth.html
@@ -7,7 +7,7 @@
<HTML>
<HEAD>
<TITLE>
-Netscape Directory Server Gateway: Authenticate
+Directory Server Gateway: Authenticate
</TITLE>
</HEAD>
@@ -18,9 +18,9 @@ Netscape Directory Server Gateway: Authenticate
</FRAMESET>
<NOFRAMES>
<H3>Frames-capable browser required</H3>
-Sorry, but in order to use the Netscape Directory Server Gateway,
-you must use a browser which supports HTML forms and JavaScript, such
-as Netscape Navigator version 3 or later. To learn how to obtain
-Navigator, visit the <A HREF="http://home.netscape.com">Netscape Home Page</A>.
+Sorry, but in order to use the Directory Server Gateway, you
+must use a browser which supports HTML forms and JavaScript, such
+as Mozilla Firefox. To learn how to obtain Firefox, visit the
+<A HREF="http://www.mozilla.org">Mozilla Home Page</A>.
</NOFRAMES>
</HTML>
diff --git a/ldap/clients/dsgw/html/authroot.html b/ldap/clients/dsgw/html/authroot.html
index 0d32aa120..160edd171 100644
--- a/ldap/clients/dsgw/html/authroot.html
+++ b/ldap/clients/dsgw/html/authroot.html
@@ -7,7 +7,7 @@
<HTML>
<HEAD>
<TITLE>
-Netscape Directory Server Gateway: Authenticate
+Directory Server Gateway: Authenticate
</TITLE>
</HEAD>
<FRAMESET ROWS=130,* BORDER=0>
@@ -18,9 +18,9 @@ NORESIZE SCROLLING="auto" MARGINHEIGHT=8 MARGINWIDTH=8>
</FRAMESET>
<NOFRAMES>
<H3>Frames-capable browser required</H3>
-Sorry, but in order to use the Netscape Directory Server Gateway,
-you must use a browser which supports HTML forms and JavaScript, such
-as Netscape Navigator version 3 or later. To learn how to obtain
-Navigator, visit the <A HREF="http://home.netscape.com">Netscape Home Page</A>.
+Sorry, but in order to use the Directory Server Gateway, you
+must use a browser which supports HTML forms and JavaScript, such
+as Mozilla Firefox. To learn how to obtain Firefox, visit the
+<A HREF="http://www.mozilla.org">Mozilla Home Page</A>.
</NOFRAMES>
</HTML>
diff --git a/ldap/clients/dsgw/html/authtitle.html b/ldap/clients/dsgw/html/authtitle.html
index 1bdbd3f43..fbb5a7b5c 100644
--- a/ldap/clients/dsgw/html/authtitle.html
+++ b/ldap/clients/dsgw/html/authtitle.html
@@ -6,7 +6,7 @@
-->
<HTML>
<HEAD>
-<TITLE>Netscape Directory Server</TITLE>
+<TITLE>Directory Server</TITLE>
<LINK REL=stylesheet TYPE="text/css" HREF="/clients/dsgw/bin/lang?<!-- GCONTEXT -->&file=style.css">
</HEAD>
@@ -22,9 +22,9 @@
</tr>
<tr>
<td><img border="0" height="1" width="15" src="/clients/dsgw/bin/lang?<!-- GCONTEXT -->&file=clear.gif"></td>
- <td><a href="http://www.netscape.com"><img border="0" height="19" width="19" src="/clients/dsgw/bin/lang?<!-- GCONTEXT -->&file=netscape.gif"></a></td>
+ <td><img border="0" height="54" width="51" src="/clients/dsgw/bin/lang?<!-- GCONTEXT -->&file=rolodex.gif"></td>
<td><img border="0" height="1" width="7" src="/clients/dsgw/bin/lang?<!-- GCONTEXT -->&file=clear.gif"></td>
- <td nowrap="true" align=left width="100%" class="appName">Netscape Directory Server Gateway</td>
+ <td nowrap="true" align=left width="100%" class="appName">Directory Server Gateway</td>
</tr>
<tr>
<td colspan="4"><img border="0" height="12" width="1" src="/clients/dsgw/bin/lang?<!-- GCONTEXT -->&file=clear.gif">
diff --git a/ldap/clients/dsgw/html/csearchtitle.html b/ldap/clients/dsgw/html/csearchtitle.html
index 4a1d4ddc2..d416efc6d 100644
--- a/ldap/clients/dsgw/html/csearchtitle.html
+++ b/ldap/clients/dsgw/html/csearchtitle.html
@@ -6,7 +6,7 @@
-->
<HTML>
<HEAD>
-<TITLE>Netscape Directory Server</TITLE>
+<TITLE>Directory Server</TITLE>
<LINK REL=stylesheet TYPE="text/css" HREF="/clients/dsgw/bin/lang?<!-- GCONTEXT -->&file=style.css">
</HEAD>
@@ -22,9 +22,9 @@
</tr>
<tr>
<td><img border="0" height="1" width="15" src="/clients/dsgw/bin/lang?<!-- GCONTEXT -->&file=clear.gif"></td>
- <td><a href="http://www.netscape.com"><img border="0" height="19" width="19" src="/clients/dsgw/bin/lang?<!-- GCONTEXT -->&file=netscape.gif"></a></td>
+ <td><img border="0" height="54" width="51" src="/clients/dsgw/bin/lang?<!-- GCONTEXT -->&file=rolodex.gif"></td>
<td><img border="0" height="1" width="7" src="/clients/dsgw/bin/lang?<!-- GCONTEXT -->&file=clear.gif"></td>
- <td nowrap="true" align=left width="100%" class="appName">Netscape Directory Server Gateway</td>
+ <td nowrap="true" align=left width="100%" class="appName">Directory Server Gateway</td>
</tr>
<tr>
<td colspan="4"><img border="0" height="12" width="1" src="/clients/dsgw/bin/lang?<!-- GCONTEXT -->&file=clear.gif">
diff --git a/ldap/clients/dsgw/html/greeting.html b/ldap/clients/dsgw/html/greeting.html
index aa81b54d0..d57bdb11e 100644
--- a/ldap/clients/dsgw/html/greeting.html
+++ b/ldap/clients/dsgw/html/greeting.html
@@ -6,7 +6,7 @@
-->
<HTML>
<HEAD>
-<TITLE>Netscape Directory Server</TITLE>
+<TITLE>Directory Server</TITLE>
<SCRIPT LANGUAGE="JavaScript">
<!-- Hide from non-JavaScript browsers
function gotoURL(h) {
@@ -22,8 +22,8 @@ function gotoURL(h) {
<CENTER> <TABLE CELLPADDING=5 CELLSPACING=5 WIDTH="80%">
<TR> <TD COLSPAN=2>
-You are using the Netscape Directory Server Gateway. This interface can be
-used to search for, modify, and create entries that are stored in the Netscape Directory Server.
+You are using the Directory Server Gateway. This interface can be
+used to search for, modify, and create entries that are stored in the directory.
<P>
You are currently viewing the Standard Search screen, which provides an
easy and convenient way to search the directory. Standard Search
diff --git a/ldap/clients/dsgw/html/index.html b/ldap/clients/dsgw/html/index.html
index ee2012d00..7246311b7 100644
--- a/ldap/clients/dsgw/html/index.html
+++ b/ldap/clients/dsgw/html/index.html
@@ -6,7 +6,7 @@
-->
<HTML>
<HEAD>
-<TITLE>Netscape Directory Server Gateway</TITLE>
+<TITLE>Directory Server Gateway</TITLE>
</HEAD>
<FRAMESET ROWS=130,* BORDER=0>
<FRAME SRC="/clients/dsgw/bin/lang?<!-- GCONTEXT -->&file=maintitle.html" NAME="buttonBarFrame" NORESIZE
@@ -16,9 +16,9 @@
</FRAMESET>
<NOFRAMES>
<H3>Frames-capable browser required</H3>
-Sorry, but in order to use the Netscape Directory Server Gateway,
-you must use a browser which supports HTML forms and JavaScript, such
-as Netscape Navigator version 3 or later. To learn how to obtain
-Navigator, visit the <A HREF="http://home.netscape.com">Netscape Home Page</A>.
+Sorry, but in order to use the Directory Server Gateway, you
+must use a browser which supports HTML forms and JavaScript, such
+as Mozilla Firefox. To learn how to obtain Firefox, visit the
+<A HREF="http://www.mozilla.org">Mozilla Home Page</A>.
</NOFRAMES>
</HTML>
diff --git a/ldap/clients/dsgw/html/left_bottom.gif b/ldap/clients/dsgw/html/left_bottom.gif
index bbd159ca0..fa1746c08 100644
Binary files a/ldap/clients/dsgw/html/left_bottom.gif and b/ldap/clients/dsgw/html/left_bottom.gif differ
diff --git a/ldap/clients/dsgw/html/left_off.gif b/ldap/clients/dsgw/html/left_off.gif
index 041754a7f..ff44de671 100644
Binary files a/ldap/clients/dsgw/html/left_off.gif and b/ldap/clients/dsgw/html/left_off.gif differ
diff --git a/ldap/clients/dsgw/html/left_on.gif b/ldap/clients/dsgw/html/left_on.gif
index 079675ca1..b5f0ae436 100644
Binary files a/ldap/clients/dsgw/html/left_on.gif and b/ldap/clients/dsgw/html/left_on.gif differ
diff --git a/ldap/clients/dsgw/html/maintitle.html b/ldap/clients/dsgw/html/maintitle.html
index 5b5fee782..479332330 100644
--- a/ldap/clients/dsgw/html/maintitle.html
+++ b/ldap/clients/dsgw/html/maintitle.html
@@ -6,7 +6,7 @@
-->
<HTML>
<HEAD>
-<TITLE>Netscape Directory Server</TITLE>
+<TITLE>Directory Server</TITLE>
<LINK REL=stylesheet TYPE="text/css" HREF="/clients/dsgw/bin/lang?<!-- GCONTEXT -->&file=style.css">
</HEAD>
@@ -22,9 +22,9 @@
</tr>
<tr>
<td><img border="0" height="1" width="15" src="/clients/dsgw/bin/lang?<!-- GCONTEXT -->&file=clear.gif"></td>
- <td><a href="http://www.netscape.com"><img border="0" height="19" width="19" src="/clients/dsgw/bin/lang?<!-- GCONTEXT -->&file=netscape.gif"></a></td>
+ <td><img border="0" height="54" width="51" src="/clients/dsgw/bin/lang?<!-- GCONTEXT -->&file=rolodex.gif"></td>
<td><img border="0" height="1" width="7" src="/clients/dsgw/bin/lang?<!-- GCONTEXT -->&file=clear.gif"></td>
- <td nowrap="true" align=left width="100%" class="appName">Netscape Directory Server Gateway</td>
+ <td nowrap="true" align=left width="100%" class="appName">Directory Server Gateway</td>
</tr>
<tr>
<td colspan="4"><img border="0" height="12" width="1" src="/clients/dsgw/bin/lang?<!-- GCONTEXT -->&file=clear.gif">
diff --git a/ldap/clients/dsgw/html/netscape.gif b/ldap/clients/dsgw/html/netscape.gif
deleted file mode 100644
index 81a3e4a67..000000000
Binary files a/ldap/clients/dsgw/html/netscape.gif and /dev/null differ
diff --git a/ldap/clients/dsgw/html/newentrytitle.html b/ldap/clients/dsgw/html/newentrytitle.html
index 4c6683969..0b0a9d17c 100644
--- a/ldap/clients/dsgw/html/newentrytitle.html
+++ b/ldap/clients/dsgw/html/newentrytitle.html
@@ -6,7 +6,7 @@
-->
<HTML>
<HEAD>
-<TITLE>Netscape Directory Server</TITLE>
+<TITLE>Directory Server</TITLE>
<LINK REL=stylesheet TYPE="text/css" HREF="/clients/dsgw/bin/lang?<!-- GCONTEXT -->&file=style.css">
</HEAD>
@@ -22,9 +22,9 @@
</tr>
<tr>
<td><img border="0" height="1" width="15" src="/clients/dsgw/bin/lang?<!-- GCONTEXT -->&file=clear.gif"></td>
- <td><a href="http://www.netscape.com"><img border="0" height="19" width="19" src="/clients/dsgw/bin/lang?<!-- GCONTEXT -->&file=netscape.gif"></a></td>
+ <td><img border="0" height="54" width="51" src="/clients/dsgw/bin/lang?<!-- GCONTEXT -->&file=rolodex.gif"></td>
<td><img border="0" height="1" width="7" src="/clients/dsgw/bin/lang?<!-- GCONTEXT -->&file=clear.gif"></td>
- <td nowrap="true" align=left width="100%" class="appName">Netscape Directory Server Gateway</td>
+ <td nowrap="true" align=left width="100%" class="appName">Directory Server Gateway</td>
</tr>
<tr>
<td colspan="4"><img border="0" height="12" width="1" src="/clients/dsgw/bin/lang?<!-- GCONTEXT -->&file=clear.gif">
diff --git a/ldap/clients/dsgw/html/right_bottom.gif b/ldap/clients/dsgw/html/right_bottom.gif
index b49e450e9..9bf54fa2f 100644
Binary files a/ldap/clients/dsgw/html/right_bottom.gif and b/ldap/clients/dsgw/html/right_bottom.gif differ
diff --git a/ldap/clients/dsgw/html/right_off.gif b/ldap/clients/dsgw/html/right_off.gif
index cf5b930bc..b360dbbd3 100644
Binary files a/ldap/clients/dsgw/html/right_off.gif and b/ldap/clients/dsgw/html/right_off.gif differ
diff --git a/ldap/clients/dsgw/html/right_on.gif b/ldap/clients/dsgw/html/right_on.gif
index 643b540b7..268a050db 100644
Binary files a/ldap/clients/dsgw/html/right_on.gif and b/ldap/clients/dsgw/html/right_on.gif differ
diff --git a/ldap/clients/dsgw/html/rolodex.gif b/ldap/clients/dsgw/html/rolodex.gif
new file mode 100644
index 000000000..515c0d25b
Binary files /dev/null and b/ldap/clients/dsgw/html/rolodex.gif differ
diff --git a/ldap/clients/dsgw/html/searchtitle.html b/ldap/clients/dsgw/html/searchtitle.html
index 417195125..95ea8223f 100644
--- a/ldap/clients/dsgw/html/searchtitle.html
+++ b/ldap/clients/dsgw/html/searchtitle.html
@@ -6,7 +6,7 @@
-->
<HTML>
<HEAD>
-<TITLE>Netscape Directory Server</TITLE>
+<TITLE>Directory Server</TITLE>
<LINK REL=stylesheet TYPE="text/css" HREF="/clients/dsgw/bin/lang?<!-- GCONTEXT -->&file=style.css">
</HEAD>
@@ -22,9 +22,9 @@
</tr>
<tr>
<td><img border="0" height="1" width="15" src="/clients/dsgw/bin/lang?<!-- GCONTEXT -->&file=clear.gif"></td>
- <td><a href="http://www.netscape.com"><img border="0" height="19" width="19" src="/clients/dsgw/bin/lang?<!-- GCONTEXT -->&file=netscape.gif"></a></td>
+ <td><img border="0" height="54" width="51" src="/clients/dsgw/bin/lang?<!-- GCONTEXT -->&file=rolodex.gif"></td>
<td><img border="0" height="1" width="7" src="/clients/dsgw/bin/lang?<!-- GCONTEXT -->&file=clear.gif"></td>
- <td nowrap="true" align=left width="100%" class="appName">Netscape Directory Server Gateway</td>
+ <td nowrap="true" align=left width="100%" class="appName">Directory Server Gateway</td>
</tr>
<tr>
<td colspan="4"><img border="0" height="12" width="1" src="/clients/dsgw/bin/lang?<!-- GCONTEXT -->&file=clear.gif">
diff --git a/ldap/clients/dsgw/html/style.css b/ldap/clients/dsgw/html/style.css
index d6d59a0ea..7c63f233b 100644
--- a/ldap/clients/dsgw/html/style.css
+++ b/ldap/clients/dsgw/html/style.css
@@ -27,7 +27,7 @@ th {
font-weight: bold;
color: #ffffff;
vertical-align : middle;
- background-color: #336699;
+ background-color: #666666;
}
@@ -80,9 +80,9 @@ body.Search {
td.appName {
font-family: verdana, Arial, Helvetica, sans-serif;
- font-size: 12px;
+ font-size: 16px;
vertical-align : middle;
- color: #ccffff;
+ color: #ffffff;
font-weight: bold;
}
@@ -102,17 +102,17 @@ td.appName {
.link3:Visited {color: #ccffff; font-size: 11px;}
.link3:Hover {color: #ccffff; font-size: 11px;}
-.link6 {color: #ffffff; font-size: 12px; font-weight: bold; text-decoration: none;}
-.link6:Link {color: #ffffff; font-size: 12px; font-weight: bold; text-decoration: none;}
-.link6:Hover {color: #ffffff; font-size: 12px; font-weight: bold; text-decoration: none;}
-.link6:Visited {color: #ffffff; font-size: 12px; font-weight: bold; text-decoration: none;}
-.link6:Active {color: #ffffff; font-size: 12px; font-weight: bold; text-decoration: none;}
+.link6 {color: #dddddd; font-size: 12px; font-weight: bold; text-decoration: none;}
+.link6:Link {color: #dddddd; font-size: 12px; font-weight: bold; text-decoration: none;}
+.link6:Hover {color: #dddddd; font-size: 12px; font-weight: bold; text-decoration: none;}
+.link6:Visited {color: #dddddd; font-size: 12px; font-weight: bold; text-decoration: none;}
+.link6:Active {color: #dddddd; font-size: 12px; font-weight: bold; text-decoration: none;}
-.link7 {color: #003366; font-size: 12px; font-weight: bold; text-decoration: none;}
-.link7:Link {color: #003366; font-size: 12px; font-weight: bold; text-decoration: none;}
-.link7:Hover {color: #003366; font-size: 12px; font-weight: bold; text-decoration: none;}
-.link7:Visited {color: #003366; font-size: 12px; font-weight: bold; text-decoration: none;}
-.link7:Active {color: #003366; font-size: 12px; font-weight: bold; text-decoration: none;}
+.link7 {color: #ffffff; font-size: 12px; font-weight: bold; text-decoration: none;}
+.link7:Link {color: #ffffff; font-size: 12px; font-weight: bold; text-decoration: none;}
+.link7:Hover {color: #ffffff; font-size: 12px; font-weight: bold; text-decoration: none;}
+.link7:Visited {color: #ffffff; font-size: 12px; font-weight: bold; text-decoration: none;}
+.link7:Active {color: #ffffff; font-size: 12px; font-weight: bold; text-decoration: none;}
.link14 {color: #ffffff; font-size: 11px;}
.link14:Link {color: #ffffff; font-size: 11px;}
@@ -127,16 +127,16 @@ td.appName {
.text31 {color: #000000; font-size: 14px; font-family: Verdana, Arial, Helvetica; font-weight: bold;}
.text22 {color: #000000; font-size: 11px; font-family: Verdana, Arial, Helvetica;}
-.bgColor1 {background-color: #003366;}
+.bgColor1 {background-color: #000000;}
.bgColor4 {background-color: #cccccc;}
.bgColor7 {background-color: #66ccff;}
-.bgColor9 {background-color: #336699;}
+.bgColor9 {background-color: #666666;}
.bgColor16 {background-color: #0033CC;}
/* background colors for tabs and tab area */
-.bgAtTab {background-color: #ccffff;}
+.bgAtTab {background-color: #999999;}
.bgAtTabHighlight {background-color: #ffffff;}
-.bgRegTab {background-color: #0099cc;}
-.bgRegTabHighlight {background-color: #66ccff;}
+.bgRegTab {background-color: #666666;}
+.bgRegTabHighlight {background-color: #999999;}
.bgInsideRule {background-color: #66ccff;}
.bgDarkRule {background-color: #000000;}
| 0 |
bd3daf13be804b87244e625279942c526ea9478a
|
389ds/389-ds-base
|
Bump version to 1.4.0.14
|
commit bd3daf13be804b87244e625279942c526ea9478a
Author: Mark Reynolds <[email protected]>
Date: Fri Aug 10 11:22:45 2018 -0400
Bump version to 1.4.0.14
diff --git a/VERSION.sh b/VERSION.sh
index 90e41a5b6..9d0332ae9 100644
--- a/VERSION.sh
+++ b/VERSION.sh
@@ -10,7 +10,7 @@ vendor="389 Project"
# PACKAGE_VERSION is constructed from these
VERSION_MAJOR=1
VERSION_MINOR=4
-VERSION_MAINT=0.13
+VERSION_MAINT=0.14
# NOTE: VERSION_PREREL is automatically set for builds made out of a git tree
VERSION_PREREL=
VERSION_DATE=$(date -u +%Y%m%d)
| 0 |
f75b5e24ee4ef366be1cca0b099871d3540b29fb
|
389ds/389-ds-base
|
Issue 6265 - lmdb - missing entries in range searches (#6266)
* Issue 6265 - lmdb - missing entries in range searches
Several issues seen after generating ldif with 2000 users and importing it in a replica:
1. The entryid attribute in missing in the suffix entry.
2. Access log shows that the internal search looking for "(parentid>=1)" is not returning all entries but one.
3. When initializing a replica through a replication agreement some entries are missing (because of 2)
4. Once 2. get fixed, the bulk import still fails because the default values for nsds5ReplicaFlowControlWindow and nsds5ReplicaFlowControlPause are not adapted to lmdb (supplier sent the entry faster than bdb and the target replica import them slower.
The fix is about:
1. Ensuring that the operational attribute are properly set when importing the suffix entry.
2. and 3. Avoid using database bulk operation when computing range unless we are sure that bdb is used. (rely instead on the generic dblayer database iterator - dblayer_cursor_iterate.
4. Change the default values for nsds5ReplicaFlowControlWindow and nsds5ReplicaFlowControlPause if agreement is on a lmdb backend.
Issue: #6265
Reviewed by: @vashirov, @droideck (Thanks!)
|
commit f75b5e24ee4ef366be1cca0b099871d3540b29fb
Author: progier389 <[email protected]>
Date: Thu Jul 25 09:40:15 2024 +0200
Issue 6265 - lmdb - missing entries in range searches (#6266)
* Issue 6265 - lmdb - missing entries in range searches
Several issues seen after generating ldif with 2000 users and importing it in a replica:
1. The entryid attribute in missing in the suffix entry.
2. Access log shows that the internal search looking for "(parentid>=1)" is not returning all entries but one.
3. When initializing a replica through a replication agreement some entries are missing (because of 2)
4. Once 2. get fixed, the bulk import still fails because the default values for nsds5ReplicaFlowControlWindow and nsds5ReplicaFlowControlPause are not adapted to lmdb (supplier sent the entry faster than bdb and the target replica import them slower.
The fix is about:
1. Ensuring that the operational attribute are properly set when importing the suffix entry.
2. and 3. Avoid using database bulk operation when computing range unless we are sure that bdb is used. (rely instead on the generic dblayer database iterator - dblayer_cursor_iterate.
4. Change the default values for nsds5ReplicaFlowControlWindow and nsds5ReplicaFlowControlPause if agreement is on a lmdb backend.
Issue: #6265
Reviewed by: @vashirov, @droideck (Thanks!)
diff --git a/dirsrvtests/tests/suites/replication/regression_m2_test.py b/dirsrvtests/tests/suites/replication/regression_m2_test.py
index aba72de17..21b296d18 100644
--- a/dirsrvtests/tests/suites/replication/regression_m2_test.py
+++ b/dirsrvtests/tests/suites/replication/regression_m2_test.py
@@ -15,6 +15,10 @@ import ldap
import pytest
import subprocess
import time
+import random
+import string
+from shutil import rmtree
+from lib389.dbgen import dbgen_users
from lib389.idm.user import TEST_USER_PROPERTIES, UserAccount, UserAccounts
from lib389.pwpolicy import PwPolicyManager
from lib389.utils import *
@@ -25,7 +29,7 @@ from lib389.idm.group import Groups, Group
from lib389.idm.domain import Domain
from lib389.idm.directorymanager import DirectoryManager
from lib389.idm.services import ServiceAccounts, ServiceAccount
-from lib389.replica import Replicas, ReplicationManager, ReplicaRole
+from lib389.replica import Replicas, ReplicationManager, ReplicaRole, BootstrapReplicationManager
from lib389.agreement import Agreements
from lib389 import pid_from_file
from lib389.dseldif import *
@@ -258,6 +262,46 @@ def topo_with_sigkill(request):
return topology
[email protected](scope="function")
+def preserve_topo_m2(topo_m2, request):
+ """Backup the topology and restore it at the end."""
+
+ saves = []
+
+ def fin():
+ for inst,backup_dir in saves:
+ inst.tasks.bak2db(backup_dir=backup_dir, args={TASK_WAIT: True})
+ rmtree(backup_dir)
+
+ if not DEBUGGING:
+ request.addfinalizer(fin)
+
+ bindcn = "replication manager"
+ binddn = f"cn={bindcn},cn=config"
+ bindpw = ''.join(random.choices(string.ascii_letters + string.digits, k=15))
+ for inst in topo_m2:
+ backup_dir = f'{inst.ds_paths.backup_dir}/topo_bak'
+ try:
+ rmtree(backup_dir)
+ except FileNotFoundError:
+ pass
+ inst.tasks.db2bak(backup_dir=backup_dir, args={TASK_WAIT: True})
+ # Ensure that we are not using group bind dn
+ # because the test may delete credentials
+ replmgr = BootstrapReplicationManager(inst, dn=binddn)
+ if replmgr.exists():
+ replmgr.replace('userPassword', bindpw)
+ else:
+ replmgr.create(properties={'cn':bindcn, 'userPassword':bindpw})
+ replica = Replicas(inst).get(DEFAULT_SUFFIX)
+ replica.replace(REPL_BINDDN, binddn);
+ replica.remove_all(REPL_BIND_GROUP);
+ for agmt in replica.get_agreements().list():
+ agmt.replace_many((AGMT_CRED, bindpw), (REPL_BINDDN, binddn))
+ saves.append((inst, backup_dir))
+ return topo_m2
+
+
@pytest.fixture()
def create_entry(topo_m2, request):
"""Add test entry using UserAccounts"""
@@ -1021,6 +1065,85 @@ def test_repl_after_reindex(topo_m2):
repl.wait_for_replication(m2, m1)
+def test_suffix_entryid(preserve_topo_m2):
+ """Test that entryid attribute is present in suffix entry
+
+ :id: 9201f2c8-3eb8-11ef-80cc-482ae39447e5
+ :setup: Two suppliers replicated instances
+ :steps:
+ 1. Generate LDIF file
+ 2. Import the ldif file
+ 3. Check that identry is still present in suffix entry
+ 4. Check that parentid>=1 search returns all entries but one
+ :expectedresults:
+ 1. Operation successful
+ 2. Operation successful
+ 3. Suffix id2entry should be 1
+ 4. parentid>=1 search returns all entries but one
+ """
+ s1 = preserve_topo_m2.ms["supplier1"]
+ ldif_file = f'{s1.get_ldif_dir()}/db10.ldif'
+ dbgen_users(s1, 10, ldif_file, DEFAULT_SUFFIX)
+ s1.tasks.importLDIF(benamebase=DEFAULT_BENAME,
+ input_file=ldif_file,
+ args={TASK_WAIT: True})
+ dc = Domain(s1, dn=DEFAULT_SUFFIX)
+ assert dc.get_attr_val_utf8('entryid') == '1'
+ all_entries = s1.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "(objectclass=*)", attrlist=('dn',), escapehatch='i am sure')
+ childrens = s1.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "(parentid>=1)", attrlist=('dn',), escapehatch='i am sure')
+ assert len(all_entries) == (len(childrens) + 1)
+
+
+def test_bulk_import(preserve_topo_m2):
+ """Test that bulk import is working properly
+
+ :id: 4e73254a-3ed6-11ef-aa44-482ae39447e5
+ :setup: Two suppliers replicated instances
+ :steps:
+ 1. Generate LDIF file
+ 2. Import the ldif file
+ 3. Add replication_managers group
+ 4. Perform bulk import
+ 5. Check that replication is still working
+ 6. Check that the replicas have the same number of users
+ :expectedresults:
+ 1. Operation successful
+ 2. Operation successful
+ 3. Operation successful
+ 4. Operation successful
+ 5. Replication should be in sync
+ 6. Replicas should have same number of user entries
+ """
+ s1 = preserve_topo_m2.ms["supplier1"]
+ s2 = preserve_topo_m2.ms["supplier2"]
+ ldif_file = f'{s1.get_ldif_dir()}/db2K.ldif'
+ dbgen_users(s1, 2000, ldif_file, DEFAULT_SUFFIX)
+ s1.tasks.importLDIF(benamebase=DEFAULT_BENAME,
+ input_file=ldif_file,
+ args={TASK_WAIT: True})
+
+ # Create replication_managers group so that we can use
+ # repl.test_replication_topology
+ repl = ReplicationManager(DEFAULT_SUFFIX)
+ repl._create_service_group(s1)
+ repl._create_service_account(s1, s2)
+
+ agmt = Agreements(s1).list()[0]
+ agmt.begin_reinit()
+ (done, error) = agmt.wait_reinit()
+ assert done is True
+ assert error is False
+
+ repl.test_replication_topology(preserve_topo_m2)
+
+ # Cannot use UserAccounts().list() because 'account' objectclass is missing
+ users_s1 = s1.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "(uid=*)", escapehatch='i am sure')
+ log.info(f"{len(users_s1)} user entries found on supplier1")
+ users_s2 = s2.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "(uid=*)", escapehatch='i am sure')
+ log.info(f"{len(users_s2)} user entries found on supplier2")
+ assert len(users_s1) == len(users_s2)
+
+
def test_online_reinit_may_hang(topo_with_sigkill):
"""Online reinitialization may hang when the first
entry of the DB is RUV entry instead of the suffix
diff --git a/ldap/servers/plugins/replication/repl5_agmt.c b/ldap/servers/plugins/replication/repl5_agmt.c
index e93a1b111..fd5f23a77 100644
--- a/ldap/servers/plugins/replication/repl5_agmt.c
+++ b/ldap/servers/plugins/replication/repl5_agmt.c
@@ -56,10 +56,14 @@
#include "repl5_prot_private.h"
#include "cl5_api.h"
#include "slapi-plugin.h"
+#include "../../slapd/back-ldbm/dbimpl.h" /* for dblayer_is_lmdb */
+
+#define DEFAULT_TIMEOUT 120 /* (seconds) default outbound LDAP connection */
+#define DEFAULT_FLOWCONTROL_WINDOW 1000 /* #entries sent without acknowledgment (bdb) */
+#define DEFAULT_FLOWCONTROL_PAUSE 2000 /* msec of pause when #entries sent witout acknowledgment (bdb) */
+#define LMDB_DEFAULT_FLOWCONTROL_WINDOW 50 /* #entries sent without acknowledgment (lmdb) */
+#define LMDB_DEFAULT_FLOWCONTROL_PAUSE 200 /* msec of pause when #entries sent witout acknowledgment (lmdb) */
-#define DEFAULT_TIMEOUT 120 /* (seconds) default outbound LDAP connection */
-#define DEFAULT_FLOWCONTROL_WINDOW 1000 /* #entries sent without acknowledgment */
-#define DEFAULT_FLOWCONTROL_PAUSE 2000 /* msec of pause when #entries sent witout acknowledgment */
#define STATUS_LEN 2048
#define STATUS_GOOD "green"
#define STATUS_WARNING "amber"
@@ -260,8 +264,10 @@ agmt_new_from_entry(Slapi_Entry *e)
char **denied_attrs = NULL;
const char *auto_initialize = NULL;
char *val_nsds5BeginReplicaRefresh = "start";
+ Slapi_Backend *be = NULL;
const char *val = NULL;
int64_t ptimeout = 0;
+ int use_lmdb = 0;
int rc = 0;
ra = (Repl_Agmt *)slapi_ch_calloc(1, sizeof(repl5agmt));
@@ -358,8 +364,33 @@ agmt_new_from_entry(Slapi_Entry *e)
ra->timeout = timeout;
}
+ /* DN of entry at root of replicated area */
+ tmpstr = slapi_entry_attr_get_charptr(e, type_nsds5ReplicaRoot);
+ if (NULL != tmpstr) {
+ ra->replarea = slapi_sdn_new_dn_passin(tmpstr);
+
+ /* now that we set the repl area, when can bump our agmt count */
+ if ((replica = replica_get_replica_from_dn(ra->replarea))) {
+ replica_incr_agmt_count(replica);
+ }
+ be = slapi_be_select(ra->replarea);
+ }
+ if (!be) {
+ slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name,
+ "agmt_new_from_entry - Failed to get backend for agreement %s on replicated suffix %s).\n",
+ slapi_entry_get_dn(e), tmpstr ? tmpstr : "<NULL>");
+ goto loser;
+ }
+ if (!replica) {
+ slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name,
+ "agmt_new_from_entry - Failed to get replica for agreement %s on replicated suffix %s).\n",
+ slapi_entry_get_dn(e), tmpstr ? tmpstr : "<NULL>");
+ goto loser;
+ }
+
/* flow control update window. */
- ra->flowControlWindow = DEFAULT_FLOWCONTROL_WINDOW;
+ use_lmdb = dblayer_is_lmdb(be);
+ ra->flowControlWindow = use_lmdb ? LMDB_DEFAULT_FLOWCONTROL_WINDOW : DEFAULT_FLOWCONTROL_WINDOW;
if ((val = slapi_entry_attr_get_ref(e, type_nsds5ReplicaFlowControlWindow))){
int64_t flow;
if (repl_config_valid_num(type_nsds5ReplicaFlowControlWindow, (char *)val, 0, INT_MAX, &rc, errormsg, &flow) != 0) {
@@ -369,7 +400,7 @@ agmt_new_from_entry(Slapi_Entry *e)
}
/* flow control update pause. */
- ra->flowControlPause = DEFAULT_FLOWCONTROL_PAUSE;
+ ra->flowControlPause = use_lmdb ? LMDB_DEFAULT_FLOWCONTROL_PAUSE : DEFAULT_FLOWCONTROL_PAUSE;
if ((val = slapi_entry_attr_get_ref(e, type_nsds5ReplicaFlowControlPause))){
int64_t pause;
if (repl_config_valid_num(type_nsds5ReplicaFlowControlPause, (char *)val, 0, INT_MAX, &rc, errormsg, &pause) != 0) {
@@ -391,23 +422,6 @@ agmt_new_from_entry(Slapi_Entry *e)
};
}
- /* DN of entry at root of replicated area */
- tmpstr = slapi_entry_attr_get_charptr(e, type_nsds5ReplicaRoot);
- if (NULL != tmpstr) {
- ra->replarea = slapi_sdn_new_dn_passin(tmpstr);
-
- /* now that we set the repl area, when can bump our agmt count */
- if ((replica = replica_get_replica_from_dn(ra->replarea))) {
- replica_incr_agmt_count(replica);
- }
- }
- if (!replica) {
- slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name,
- "agmt_new_from_entry - Failed to get replica for agreement %s on replicated suffix %s).\n",
- slapi_entry_get_dn(e), tmpstr ? tmpstr : "<NULL>");
- goto loser;
- }
-
/* If this agmt has its own timeout, grab it, otherwise use the replica's protocol timeout */
if ((val = slapi_entry_attr_get_ref(e, type_replicaProtocolTimeout))){
if (repl_config_valid_num(type_replicaProtocolTimeout, (char *)val, 0, INT_MAX, &rc, errormsg, &ptimeout) != 0) {
diff --git a/ldap/servers/plugins/replication/repl5_connection.c b/ldap/servers/plugins/replication/repl5_connection.c
index 809ffeeac..bcc71834a 100644
--- a/ldap/servers/plugins/replication/repl5_connection.c
+++ b/ldap/servers/plugins/replication/repl5_connection.c
@@ -636,7 +636,7 @@ check_flow_control_tot_init(Repl_Connection *conn, int optype, const char *extop
* Log it at least once to inform administrator there is
* a potential configuration issue here
*/
- slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name,
+ slapi_log_err(SLAPI_LOG_WARNING, repl_plugin_name,
"check_flow_control_tot_init - %s - Total update flow control gives time (%d msec) to the consumer before sending more entries [ msgid sent: %d, rcv: %d])\n"
"If total update fails you can try to increase %s and/or decrease %s in the replica agreement configuration\n",
agmt_get_long_name(conn->agmt),
diff --git a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c
index 7b6e2167c..07e3c53b3 100644
--- a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c
+++ b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c
@@ -3061,6 +3061,9 @@ process_entryrdn(backentry *ep, WorkerQueueData_t *wqelmnt)
prepare_ids(&wqd, pid, &id);
dbmdb_import_writeq_push(ctx, &wqd);
dbmdb_add_op_attrs(job, ep, pid); /* Before loosing the pid */
+ } else {
+ /* Update entryid */
+ add_update_entry_operational_attributes(ep, 0);
}
if (ctx->ancestorid && wqelmnt->entry_info) {
diff --git a/ldap/servers/slapd/back-ldbm/idl_new.c b/ldap/servers/slapd/back-ldbm/idl_new.c
index 9ae13dc15..9c455f5df 100644
--- a/ldap/servers/slapd/back-ldbm/idl_new.c
+++ b/ldap/servers/slapd/back-ldbm/idl_new.c
@@ -45,6 +45,35 @@ struct idl_private
int dummy;
};
+/* Used to store leftover parentid and entry ids */
+typedef struct _range_id_pair
+{
+ ID key;
+ ID id;
+} idl_range_id_pair;
+
+/* lmdb iterator callback context */
+typedef struct {
+ backend *be;
+ dbi_val_t *upperkey;
+ struct attrinfo *ai;
+ int allidslimit;
+ int sizelimit;
+ struct timespec *expire_time;
+ int lookthrough_limit;
+ int operator;
+ idl_range_id_pair *leftover;
+ size_t leftoverlen;
+ size_t leftovercnt;
+ IDList *idl;
+ int flag_err;
+ ID lastid;
+ ID suffix;
+ uint64_t count;
+ char *index_id;
+} idl_range_ctx_t;
+
+
static int idl_tune = DEFAULT_IDL_TUNE; /* tuning parameters for IDL code */
/* Currently none for new IDL code */
@@ -349,15 +378,6 @@ keycmp(dbi_val_t *L, dbi_val_t *R, value_compare_fn_type cmp_fn)
return cmp_fn(&Lv, &Rv);
}
-
-
-
-
-typedef struct _range_id_pair
-{
- ID key;
- ID id;
-} idl_range_id_pair;
/*
* Perform the range search in the idl layer instead of the index layer
* to improve the performance.
@@ -438,6 +458,16 @@ idl_new_range_fetch(
if (NEW_IDL_NOOP == *flag_err) {
return NULL;
}
+ if (slapi_is_loglevel_set(SLAPI_LOG_FILTER)) {
+ char *included = ((operator & SLAPI_OP_RANGE) == SLAPI_OP_LESS) ? "not " : "";
+ const char *sorted = (operator & SLAPI_OP_RANGE_NO_IDL_SORT) ? "not " : "";
+ slapi_log_err(SLAPI_LOG_FILTER,
+ "idl_new_range_fetch", "Getting index %s range from keys %s to %s\n",
+ index_id, (char*)lowerkey->data, (char*)upperkey->data);
+ slapi_log_err(SLAPI_LOG_FILTER, "idl_new_range_fetch",
+ "Candidate list is %ssorted. lower key is %sincluded.\n",
+ sorted, included);
+ }
dblayer_txn_init(li, &s_txn);
if (txn) {
@@ -639,6 +669,9 @@ error:
}
}
if (ret) {
+ slapi_log_err(SLAPI_LOG_ERR, "idl_new_range_fetch",
+ "Failed to build range candidate list on %s index. Error is %d\n",
+ index_id, ret);
dblayer_read_txn_abort(be, &s_txn);
} else {
dblayer_read_txn_commit(be, &s_txn);
@@ -670,9 +703,294 @@ error:
}
slapi_ch_free((void **)&leftover);
}
+ slapi_log_err(SLAPI_LOG_FILTER, "idl_new_range_fetch",
+ "Found %d candidates; error code is: %d\n",
+ idl ? idl->b_nids : 0, *flag_err);
return idl;
}
+/*
+ * Callback used by idl_lmdb_range_fetch to add a new id in the id list
+ */
+static int
+idl_range_add_id_cb(dbi_val_t *key, dbi_val_t *data, void *ctx)
+{
+ idl_range_ctx_t *rctx = ctx;
+ int idl_rc = 0;
+ ID id = 0;
+
+ if (key->data == NULL) {
+ slapi_log_err(SLAPI_LOG_TRACE, "idl_range_add_id",
+ "Unexpected empty key while iterating on %s index cursor\n", rctx->index_id);
+ return DBI_RC_NOTFOUND;
+ }
+ /* Stop iterating when reaching the upperkey */
+ if ((rctx->upperkey != NULL) && (rctx->upperkey->data != NULL)) {
+ if ((rctx->operator & SLAPI_OP_RANGE) == SLAPI_OP_LESS) {
+ if (keycmp(key, rctx->upperkey, rctx->ai->ai_key_cmp_fn) >= 0) {
+ return DBI_RC_NOTFOUND;
+ }
+ } else { /* (rctx->operator & SLAPI_OP_RANGE) == SLAPI_OP_LESS_OR_EQUAL */
+ if (keycmp(key, rctx->upperkey, rctx->ai->ai_key_cmp_fn) > 0) {
+ return DBI_RC_NOTFOUND;
+ }
+ }
+ }
+ /* Check limits */
+ if ((rctx->lookthrough_limit != -1) &&
+ (rctx->idl->b_nids > (ID)rctx->lookthrough_limit)) {
+ idl_free(&rctx->idl);
+ rctx->idl = idl_allids(rctx->be);
+ slapi_log_err(SLAPI_LOG_TRACE, "idl_range_add_id", "lookthrough_limit exceeded\n");
+ rctx->flag_err = LDAP_ADMINLIMIT_EXCEEDED;
+ return DBI_RC_NOTFOUND;
+ }
+ if ((rctx->sizelimit > 0) && (rctx->idl->b_nids > (ID)rctx->sizelimit)) {
+ slapi_log_err(SLAPI_LOG_TRACE, "idl_range_add_id", "sizelimit exceeded\n");
+ rctx->flag_err = LDAP_SIZELIMIT_EXCEEDED;
+ return DBI_RC_NOTFOUND;
+ }
+ if ((rctx->idl->b_nids & 0xff) == 0 && /* Check time every 256 candidates */
+ slapi_timespec_expire_check(rctx->expire_time) == TIMER_EXPIRED) {
+ slapi_log_err(SLAPI_LOG_TRACE, "idl_range_add_id", "timelimit exceeded\n");
+ rctx->flag_err = LDAP_TIMELIMIT_EXCEEDED;
+ return DBI_RC_NOTFOUND;
+ }
+ if (data->size != sizeof(ID)) {
+ slapi_log_err(SLAPI_LOG_ERR, "idl_range_add_id",
+ "Database %s index is corrupt; key %s has a data item with the wrong size (%ld)\n",
+ rctx->index_id, (char *)key->data, data->size);
+ rctx->flag_err = LDAP_UNWILLING_TO_PERFORM;
+ return DBI_RC_NOTFOUND;
+ }
+ memcpy(&id, data->data, sizeof(ID));
+ if (id == rctx->lastid) {
+ slapi_log_err(SLAPI_LOG_TRACE, "idl_lmdb_range_fetch",
+ "Detected duplicate id %d due to DB_MULTIPLE error - skipping\n", id);
+ return DBI_RC_SUCCESS;
+ }
+ /* we got another ID, add it to our IDL */
+ if (rctx->operator & SLAPI_OP_RANGE_NO_IDL_SORT) {
+ ID keyval = (ID)strtol((char *)key->data + 1, (char **)NULL, 10);
+ if ((rctx->count == 0) && (rctx->suffix == 0)) {
+ /* First time. Keep the suffix ID.
+ * note that 'suffix==0' mean we did not retrieve the suffix entry id
+ * from the parentid index (key '=0'), so let assume the first
+ * found entry is the one from the suffix
+ */
+ rctx->suffix = keyval;
+ idl_rc = idl_append_extend(&rctx->idl, id);
+ } else if ((keyval == rctx->suffix) || idl_id_is_in_idlist(rctx->idl, keyval)) {
+ /* the parent is the suffix or already in idl. */
+ idl_rc = idl_append_extend(&rctx->idl, id);
+ } else {
+ /* Otherwise, keep the {keyval,id} in leftover array */
+ if (!rctx->leftover) {
+ rctx->leftover = (idl_range_id_pair *)slapi_ch_calloc(rctx->leftoverlen, sizeof(idl_range_id_pair));
+ } else if (rctx->leftovercnt == rctx->leftoverlen) {
+ rctx->leftover = (idl_range_id_pair *)slapi_ch_realloc((char *)rctx->leftover, 2 * rctx->leftoverlen * sizeof(idl_range_id_pair));
+ memset(rctx->leftover + rctx->leftovercnt, 0, rctx->leftoverlen * sizeof(idl_range_id_pair));
+ rctx->leftoverlen *= 2;
+ }
+ rctx->leftover[rctx->leftovercnt].key = keyval;
+ rctx->leftover[rctx->leftovercnt].id = id;
+ rctx->leftovercnt++;
+ }
+ } else {
+ idl_rc = idl_append_extend(&rctx->idl, id);
+ }
+ if (idl_rc) {
+ slapi_log_err(SLAPI_LOG_ERR, "idl_lmdb_range_fetch",
+ "Unable to extend id list (err=%d)\n", idl_rc);
+ idl_free(&rctx->idl);
+ rctx->flag_err = LDAP_UNWILLING_TO_PERFORM;
+ return DBI_RC_NOTFOUND;
+ }
+#if defined(DB_ALLIDS_ON_READ)
+ /* enforce the allids read limit */
+ if ((NEW_IDL_NO_ALLID != rctx->flag_err) && rctx->ai && (rctx->idl != NULL) &&
+ idl_new_exceeds_allidslimit(rctx->count, rctx->ai, rctx->allidslimit)) {
+ rctx->idl->b_nids = 1;
+ rctx->idl->b_ids[0] = ALLID;
+ return DBI_RC_NOTFOUND; /* fool the code below into thinking that we finished the dups */
+ }
+#endif
+
+ rctx->count++;
+ return DBI_RC_SUCCESS;
+}
+
+/*
+ * Same as idl_new_range_fetch but without using bulk operation
+ */
+IDList *
+idl_lmdb_range_fetch(
+ backend *be,
+ dbi_db_t *db,
+ dbi_val_t *lowerkey,
+ dbi_val_t *upperkey,
+ dbi_txn_t *txn,
+ struct attrinfo *ai,
+ int *flag_err,
+ int allidslimit,
+ int sizelimit,
+ struct timespec *expire_time,
+ int lookthrough_limit,
+ int
+ operator)
+{
+ int ret = 0;
+ int ret2 = 0;
+ int idl_rc = 0;
+ dbi_cursor_t cursor = {0};
+ back_txn s_txn;
+ struct ldbminfo *li = (struct ldbminfo *)be->be_database->plg_private;
+ idl_range_ctx_t idl_range_ctx = {0};
+ char *index_id = get_index_name(be, db, ai);
+
+ if ((NULL == flag_err) || (NEW_IDL_NOOP == *flag_err)) {
+ return NULL;
+ }
+ if (slapi_is_loglevel_set(SLAPI_LOG_FILTER)) {
+ char *included = ((operator & SLAPI_OP_RANGE) == SLAPI_OP_LESS) ? "not " : "";
+ const char *sorted = (operator & SLAPI_OP_RANGE_NO_IDL_SORT) ? "not " : "";
+ slapi_log_err(SLAPI_LOG_FILTER,
+ "idl_lmdb_range_fetch", "Getting index %s range from keys %s to %s\n",
+ index_id, (char*)lowerkey->data, (char*)upperkey->data);
+ slapi_log_err(SLAPI_LOG_FILTER, "idl_lmdb_range_fetch",
+ "Candidate list is %ssorted. lower key is %sincluded.\n",
+ sorted, included);
+ }
+
+ dblayer_txn_init(li, &s_txn);
+ if (txn) {
+ dblayer_read_txn_begin(be, txn, &s_txn);
+ }
+
+ /* Make a cursor */
+ ret = dblayer_new_cursor(be, db, s_txn.back_txn_txn, &cursor);
+ if (0 != ret) {
+ ldbm_nasty("idl_lmdb_range_fetch - idl_new.c", index_id, 1, ret);
+ goto error;
+ }
+
+ /* Initialize the callnack context */
+ idl_range_ctx.be = be;
+ idl_range_ctx.upperkey = upperkey;
+ idl_range_ctx.ai = ai;
+ idl_range_ctx.allidslimit = allidslimit;
+ idl_range_ctx.sizelimit = sizelimit;
+ idl_range_ctx.expire_time = expire_time;
+ idl_range_ctx.lookthrough_limit = lookthrough_limit;
+ idl_range_ctx.operator = operator;
+ idl_range_ctx.leftover = NULL;
+ idl_range_ctx.leftoverlen = 32;
+ idl_range_ctx.leftovercnt = 0;
+ idl_range_ctx.idl = idl_alloc(IDLIST_MIN_BLOCK_SIZE);
+ idl_range_ctx.flag_err = 0;
+ idl_range_ctx.lastid = 0;
+ idl_range_ctx.count = 0;
+ idl_range_ctx.index_id = index_id;
+ if (operator & SLAPI_OP_RANGE_NO_IDL_SORT) {
+ struct _back_info_index_key bck_info;
+ /* We are doing a bulk import
+ * try to retrieve the suffix entry id from the index
+ */
+
+ bck_info.index = SLAPI_ATTR_PARENTID;
+ bck_info.key = "0";
+
+ if ((ret = slapi_back_get_info(be, BACK_INFO_INDEX_KEY, (void **)&bck_info))) {
+ slapi_log_err(SLAPI_LOG_WARNING, "idl_lmdb_range_fetch",
+ "Total update: fail to retrieve suffix entryID, continue assuming it is the first entry\n");
+ }
+ if (bck_info.key_found) {
+ idl_range_ctx.suffix = bck_info.id;
+ }
+ }
+
+ /*
+ * Iterate
+ */
+ ret = dblayer_cursor_iterate(&cursor, idl_range_add_id_cb, lowerkey, &idl_range_ctx);
+ if (DBI_RC_NOTFOUND == ret) {
+ ret = 0; /* normal case */
+ } else if (0 != ret) {
+ ldbm_nasty("idl_lmdb_range_fetch - idl_new.c", index_id, 2, ret);
+ idl_free(&idl_range_ctx.idl);
+ goto error;
+ }
+
+ slapi_log_err(SLAPI_LOG_TRACE, "idl_lmdb_range_fetch",
+ "Bulk fetch buffer nids=%" PRIu64 "\n", idl_range_ctx.count);
+
+ /* check for allids value */
+ if ((idl_range_ctx.idl->b_nids == 1) && (idl_range_ctx.idl->b_ids[0] == ALLID)) {
+ idl_free(&idl_range_ctx.idl);
+ idl_range_ctx.idl = idl_allids(be);
+ slapi_log_err(SLAPI_LOG_TRACE, "idl_lmdb_range_fetch", "%s returns allids\n",
+ index_id);
+ } else {
+ slapi_log_err(SLAPI_LOG_TRACE, "idl_lmdb_range_fetch", "%s returns nids=%lu\n",
+ index_id, (u_long)IDL_NIDS(idl_range_ctx.idl));
+ }
+
+error:
+ /* Close the cursor */
+ if (0 == idl_range_ctx.flag_err) {
+ idl_range_ctx.flag_err = ret;
+slapi_log_err(SLAPI_LOG_INFO, "idl_lmdb_range_fetch", "flag_err=%d\n", idl_range_ctx.flag_err);
+ }
+ ret = dblayer_cursor_op(&cursor, DBI_OP_CLOSE, NULL, NULL);
+ if (ret) {
+ ldbm_nasty("idl_lmdb_range_fetch - idl_new.c", index_id, 3, ret2);
+ }
+ if (ret) {
+ slapi_log_err(SLAPI_LOG_ERR, "idl_lmdb_range_fetch",
+ "Failed to build range candidate list on %s index. Error is %d\n",
+ index_id, ret);
+ dblayer_read_txn_abort(be, &s_txn);
+ } else {
+ dblayer_read_txn_commit(be, &s_txn);
+ }
+ if (0 == idl_range_ctx.flag_err) {
+ idl_range_ctx.flag_err = ret;
+slapi_log_err(SLAPI_LOG_INFO, "idl_lmdb_range_fetch", "flag_err=%d\n", idl_range_ctx.flag_err);
+ }
+
+ /* sort idl */
+ if (!ALLIDS(idl_range_ctx.idl) && !(operator&SLAPI_OP_RANGE_NO_IDL_SORT)) {
+ qsort((void *)&idl_range_ctx.idl->b_ids[0], idl_range_ctx.idl->b_nids, sizeof(ID), idl_sort_cmp);
+ }
+ if (operator&SLAPI_OP_RANGE_NO_IDL_SORT) {
+ size_t remaining = idl_range_ctx.leftovercnt;
+
+ while(remaining > 0) {
+ for (size_t i = 0; i < idl_range_ctx.leftovercnt; i++) {
+ if (idl_range_ctx.leftover[i].key > 0 &&
+ idl_id_is_in_idlist(idl_range_ctx.idl, idl_range_ctx.leftover[i].key) != 0) {
+ /* if the leftover key has its parent in the idl */
+ idl_rc = idl_append_extend(&idl_range_ctx.idl, idl_range_ctx.leftover[i].id);
+ if (idl_rc) {
+ slapi_log_err(SLAPI_LOG_ERR, "idl_lmdb_range_fetch",
+ "Unable to extend id list (err=%d)\n", idl_rc);
+ idl_free(&idl_range_ctx.idl);
+ break;
+ }
+ idl_range_ctx.leftover[i].key = 0;
+ remaining--;
+ }
+ }
+ }
+ slapi_ch_free((void **)&idl_range_ctx.leftover);
+ }
+ *flag_err = idl_range_ctx.flag_err;
+ slapi_log_err(SLAPI_LOG_FILTER, "idl_lmdb_range_fetch",
+ "Found %d candidates; error code is: %d\n",
+ idl_range_ctx.idl ? idl_range_ctx.idl->b_nids : 0, *flag_err);
+ return idl_range_ctx.idl;
+}
+
int
idl_new_insert_key(
backend *be __attribute__((unused)),
diff --git a/ldap/servers/slapd/back-ldbm/index.c b/ldap/servers/slapd/back-ldbm/index.c
index 86bc825fe..c29889524 100644
--- a/ldap/servers/slapd/back-ldbm/index.c
+++ b/ldap/servers/slapd/back-ldbm/index.c
@@ -1631,12 +1631,23 @@ index_range_read_ext(
if (operator&SLAPI_OP_RANGE_NO_ALLIDS) {
*err = NEW_IDL_NO_ALLID;
}
+
if (idl_get_idl_new()) { /* new idl */
- slapi_log_err(SLAPI_LOG_FILTER,
- "index_range_read_ext", "Getting index range from keys %s to %s.\n", (char*)cur_key.data, (char*)upperkey.data);
- idl = idl_new_range_fetch(be, db, &cur_key, &upperkey, db_txn,
- ai, err, allidslimit, sizelimit, &expire_time,
- lookthrough_limit, operator);
+ /*
+ * li->li_flags is not set when doing internal search (as in bulk import)
+ * and since idl_new_range_fetch is broken for lmdb (because of bulk read operations)
+ * better use idl_lmdb_range_fetch in that case (which work on bdb but may be a
+ * bit slower)
+ */
+ if ((li->li_flags & (LI_LMDB_IMPL|LI_BDB_IMPL)) == LI_BDB_IMPL) {
+ idl = idl_new_range_fetch(be, db, &cur_key, &upperkey, db_txn,
+ ai, err, allidslimit, sizelimit, &expire_time,
+ lookthrough_limit, operator);
+ } else {
+ idl = idl_lmdb_range_fetch(be, db, &cur_key, &upperkey, db_txn,
+ ai, err, allidslimit, sizelimit, &expire_time,
+ lookthrough_limit, operator);
+ }
} else { /* old idl */
int retry_count = 0;
while (*err == 0 &&
diff --git a/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h b/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h
index 8e8440c1e..a1e57c172 100644
--- a/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h
+++ b/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h
@@ -231,6 +231,7 @@ int idl_get_tune(void);
size_t idl_get_allidslimit(struct attrinfo *a, int allidslimit);
int idl_get_idl_new(void);
IDList *idl_new_range_fetch(backend *be, dbi_db_t *db, dbi_val_t *lowerkey, dbi_val_t *upperkey, dbi_txn_t *txn, struct attrinfo *a, int *flag_err, int allidslimit, int sizelimit, struct timespec *expire_time, int lookthrough_limit, int operator);
+IDList *idl_lmdb_range_fetch(backend *be, dbi_db_t *db, dbi_val_t *lowerkey, dbi_val_t *upperkey, dbi_txn_t *txn, struct attrinfo *a, int *flag_err, int allidslimit, int sizelimit, struct timespec *expire_time, int lookthrough_limit, int operator);
char *get_index_name(backend *be, dbi_db_t *db, struct attrinfo *a);
int64_t idl_compare(IDList *a, IDList *b);
| 0 |
f34705baa21378e18051f1f553d8d88ff0f91c61
|
389ds/389-ds-base
|
Fix instability in re-try and backoff mechanism
|
commit f34705baa21378e18051f1f553d8d88ff0f91c61
Author: David Boreham <[email protected]>
Date: Fri Apr 15 05:19:03 2005 +0000
Fix instability in re-try and backoff mechanism
diff --git a/ldap/synctools/passwordsync/build.bat b/ldap/synctools/passwordsync/build.bat
index 1c788cbef..e47af7471 100644
--- a/ldap/synctools/passwordsync/build.bat
+++ b/ldap/synctools/passwordsync/build.bat
@@ -9,12 +9,10 @@
pushd
-if NOT [%BUILD_DEBUG%] == [] (
- if [%BUILD_DEBUG%] == [optimize] (
- set LIBROOT=..\..\..\..\dist\WINNT5.0_OPT.OBJ
- ) else (
- set LIBROOT=..\..\..\..\dist\WINNT5.0_DBG.OBJ
- )
+if [%BUILD_DEBUG%] == [optimize] (
+ set LIBROOT=..\..\..\..\dist\WINNT5.0_OPT.OBJ
+) else (
+ set LIBROOT=..\..\..\..\dist\WINNT5.0_DBG.OBJ
)
echo %LIBROOT%
diff --git a/ldap/synctools/passwordsync/passhand.cpp b/ldap/synctools/passwordsync/passhand.cpp
index 01b7a005a..d50539669 100644
--- a/ldap/synctools/passwordsync/passhand.cpp
+++ b/ldap/synctools/passwordsync/passhand.cpp
@@ -139,6 +139,12 @@ int loadSet(PASS_INFO_LIST* passInfoList, char* filename)
newPair.password = (char*)malloc(passwordLen);
plainTextStream->read((char*)newPair.password, passwordLen);
+ // Backoff
+ newPair.backoffCount = 0;
+
+ // Load time
+ time(&newPair.atTime);
+
passInfoList->push_back(newPair);
}
diff --git a/ldap/synctools/passwordsync/passhand.h b/ldap/synctools/passwordsync/passhand.h
index 26f5dbf97..af2f928d7 100644
--- a/ldap/synctools/passwordsync/passhand.h
+++ b/ldap/synctools/passwordsync/passhand.h
@@ -27,6 +27,8 @@ struct PASS_INFO
{
char* username;
char* password;
+ int backoffCount;
+ time_t atTime;
};
typedef list<PASS_INFO> PASS_INFO_LIST;
diff --git a/ldap/synctools/passwordsync/passhook/passhook.cpp b/ldap/synctools/passwordsync/passhook/passhook.cpp
index d39a0abfd..fbc429690 100644
--- a/ldap/synctools/passwordsync/passhook/passhook.cpp
+++ b/ldap/synctools/passwordsync/passhook/passhook.cpp
@@ -23,9 +23,28 @@ NTSTATUS NTAPI PasswordChangeNotify(PUNICODE_STRING UserName, ULONG RelativeId,
HANDLE passhookEventHandle = OpenEvent(EVENT_MODIFY_STATE, FALSE, PASSHAND_EVENT_NAME);
PASS_INFO newPassInfo;
PASS_INFO_LIST passInfoList;
+ HKEY regKey;
+ DWORD type;
+ unsigned long buffSize;
+ char regBuff[PASSHAND_BUF_SIZE];
+ unsigned long logLevel;
fstream outLog;
- outLog.open("passhook.log", ios::out | ios::app);
+ RegOpenKey(HKEY_LOCAL_MACHINE, "SOFTWARE\\PasswordSync", ®Key);
+ buffSize = PASSHAND_BUF_SIZE;
+ if(RegQueryValueEx(regKey, "Log Level", NULL, &type, (unsigned char*)regBuff, &buffSize) == ERROR_SUCCESS)
+ {
+ logLevel = (unsigned long)atoi(regBuff);
+ }
+ else
+ {
+ logLevel = 0;
+ }
+ if(logLevel > 0)
+ {
+ outLog.open("passhook.log", ios::out | ios::app);
+ }
+ RegCloseKey(regKey);
_snprintf(singleByteUsername, PASSHAND_BUF_SIZE, "%S", UserName->Buffer);
singleByteUsername[UserName->Length / 2] = '\0';
@@ -36,6 +55,7 @@ NTSTATUS NTAPI PasswordChangeNotify(PUNICODE_STRING UserName, ULONG RelativeId,
{
timeStamp(&outLog);
outLog << "user " << singleByteUsername << " password changed" << endl;
+ //outLog << "user " << singleByteUsername << " password changed to " << singleBytePassword << endl;
}
if(loadSet(&passInfoList, "passhook.dat") == 0)
@@ -103,4 +123,4 @@ BOOL NTAPI PasswordFilter(PUNICODE_STRING UserName, PUNICODE_STRING FullName, PU
BOOL NTAPI InitializeChangeNotify()
{
return TRUE;
-}
\ No newline at end of file
+}
diff --git a/ldap/synctools/passwordsync/passhook/passhook.dsp b/ldap/synctools/passwordsync/passhook/passhook.dsp
index c7089dc0e..e7ed9354e 100644
--- a/ldap/synctools/passwordsync/passhook/passhook.dsp
+++ b/ldap/synctools/passwordsync/passhook/passhook.dsp
@@ -1,9 +1,3 @@
-#
-# BEGIN COPYRIGHT BLOCK
-# Copyright (C) 2005 Red Hat, Inc.
-# All rights reserved.
-# END COPYRIGHT BLOCK
-#
# Microsoft Developer Studio Project File - Name="passhook" - Package Owner=<4>
# Microsoft Developer Studio Generated Build File, Format Version 6.00
# ** DO NOT EDIT **
diff --git a/ldap/synctools/passwordsync/passsync.dsw b/ldap/synctools/passwordsync/passsync.dsw
index cb2a7e335..a4b105f55 100644
--- a/ldap/synctools/passwordsync/passsync.dsw
+++ b/ldap/synctools/passwordsync/passsync.dsw
@@ -1,15 +1,9 @@
-#
-# BEGIN COPYRIGHT BLOCK
-# Copyright (C) 2005 Red Hat, Inc.
-# All rights reserved.
-# END COPYRIGHT BLOCK
-#
Microsoft Developer Studio Workspace File, Format Version 6.00
# WARNING: DO NOT EDIT OR DELETE THIS WORKSPACE FILE!
###############################################################################
-Project: "passhook"=".\passhook\passhook.dsp" - Package Owner=<4>
+Project: "passhook"=.\passhook\passhook.dsp - Package Owner=<4>
Package=<5>
{{{
@@ -21,7 +15,7 @@ Package=<4>
###############################################################################
-Project: "passsync"=".\passsync\passsync.dsp" - Package Owner=<4>
+Project: "passsync"=.\passsync\passsync.dsp - Package Owner=<4>
Package=<5>
{{{
diff --git a/ldap/synctools/passwordsync/passsync/syncserv.cpp b/ldap/synctools/passwordsync/passsync/syncserv.cpp
index ffc9b3446..39ac9922e 100644
--- a/ldap/synctools/passwordsync/passsync/syncserv.cpp
+++ b/ldap/synctools/passwordsync/passsync/syncserv.cpp
@@ -38,22 +38,40 @@ char* passwdcb(PK11SlotInfo* info, PRBool retry, void* arg)
PassSyncService::PassSyncService(const TCHAR *serviceName) : CNTService(serviceName)
{
char sysPath[SYNCSERV_BUF_SIZE];
+ char tempRegBuff[SYNCSERV_BUF_SIZE];
HKEY regKey;
DWORD type;
unsigned long size;
passhookEventHandle = CreateEvent(NULL, FALSE, FALSE, PASSHAND_EVENT_NAME);
-
mainLdapConnection = NULL;
results = NULL;
currentResult = NULL;
lastLdapError = LDAP_SUCCESS;
certdbh = NULL;
- multipleModify = SYNCSERV_ALLOW_MULTI_MOD;
- isRunning = false;
-
RegOpenKey(HKEY_LOCAL_MACHINE, "SOFTWARE\\PasswordSync", ®Key);
+
+ size = SYNCSERV_BUF_SIZE;
+ if(RegQueryValueEx(regKey, "Log Level", NULL, &type, (unsigned char*)tempRegBuff, &size) == ERROR_SUCCESS)
+ {
+ logLevel = (unsigned long)atoi(tempRegBuff);
+ }
+ else
+ {
+ logLevel = 0;
+ }
+
+ size = SYNCSERV_BUF_SIZE;
+ if(RegQueryValueEx(regKey, "Time To Live", NULL, &type, (unsigned char*)tempRegBuff, &size) == ERROR_SUCCESS)
+ {
+ maxBackoffTime = (unsigned long)atoi(tempRegBuff);
+ }
+ else
+ {
+ maxBackoffTime = pow(2, 12) * SYNCSERV_BASE_BACKOFF_LEN;
+ }
+
size = SYNCSERV_BUF_SIZE;
RegQueryValueEx(regKey, "Install Path", NULL, &type, (unsigned char*)installPath, &size);
size = SYNCSERV_BUF_SIZE;
@@ -77,7 +95,10 @@ PassSyncService::PassSyncService(const TCHAR *serviceName) : CNTService(serviceN
_snprintf(logPath, SYNCSERV_BUF_SIZE, "%spasssync.log", installPath);
_snprintf(dataFilename, SYNCSERV_BUF_SIZE, "%s\\system32\\passhook.dat", sysPath);
- outLog.open(logPath, ios::out | ios::app);
+ if(logLevel > 0)
+ {
+ outLog.open(logPath, ios::out | ios::app);
+ }
if(outLog.is_open())
{
timeStamp(&outLog);
@@ -85,6 +106,8 @@ PassSyncService::PassSyncService(const TCHAR *serviceName) : CNTService(serviceN
}
PK11_SetPasswordFunc(passwdcb);
+
+ isRunning = false;
}
// ****************************************************************
@@ -100,15 +123,80 @@ PassSyncService::~PassSyncService()
outLog.close();
}
+// ****************************************************************
+//
+// ****************************************************************
+void PassSyncService::OnStop()
+{
+ isRunning = false;
+ SetEvent(passhookEventHandle);
+}
+
+// ****************************************************************
+//
+// ****************************************************************
+void PassSyncService::OnShutdown()
+{
+ isRunning = false;
+ SetEvent(passhookEventHandle);
+}
+
+// ****************************************************************
+// PassSyncService::Run
+// ****************************************************************
+void PassSyncService::Run()
+{
+ isRunning = true;
+ SyncPasswords();
+
+ while(isRunning)
+ {
+ if(passInfoList.empty())
+ {
+ WaitForSingleObject(passhookEventHandle, INFINITE);
+ }
+ else
+ {
+ WaitForSingleObject(passhookEventHandle, BackoffTime(GetMinBackoff()));
+ }
+
+ SyncPasswords();
+ UpdateBackoff();
+
+ ResetEvent(passhookEventHandle);
+ }
+
+ if(saveSet(&passInfoList, dataFilename) == 0)
+ {
+ if(outLog.is_open())
+ {
+ timeStamp(&outLog);
+ outLog << passInfoList.size() << " entries saved to file" << endl;
+ }
+ }
+ else
+ {
+ if(outLog.is_open())
+ {
+ timeStamp(&outLog);
+ outLog << "failed to save entries to file" << endl;
+ }
+ }
+
+ CloseHandle(passhookEventHandle);
+}
+
// ****************************************************************
// PassSyncService::SyncPasswords
// ****************************************************************
int PassSyncService::SyncPasswords()
{
int result = 0;
+ PASS_INFO_LIST emptyPassInfoList;
PASS_INFO_LIST_ITERATOR currentPassInfo;
PASS_INFO_LIST_ITERATOR tempPassInfo;
char* dn;
+ int tempSize = passInfoList.size();
if(Connect(&mainLdapConnection, ldapAuthUsername, ldapAuthPassword) < 0)
{
@@ -127,8 +215,9 @@ int PassSyncService::SyncPasswords()
if(outLog.is_open())
{
timeStamp(&outLog);
- outLog << passInfoList.size() << " entries loaded from file" << endl;
+ outLog << passInfoList.size() - tempSize << " new entries loaded from file" << endl;
}
+ saveSet(&emptyPassInfoList, dataFilename);
}
else
{
@@ -139,118 +228,73 @@ int PassSyncService::SyncPasswords()
}
}
- while(passInfoList.size() > 0)
+ currentPassInfo = passInfoList.begin();
+ while(currentPassInfo != passInfoList.end())
{
- currentPassInfo = passInfoList.begin();
-
- while(currentPassInfo != passInfoList.end())
+ if(QueryUsername(currentPassInfo->username) == 0)
{
- if(QueryUsername(currentPassInfo->username) != 0)
+ while((dn = GetDN()) != NULL)
{
- // log search failure.
- if(outLog.is_open())
+ if(FutureOccurrence(currentPassInfo))
{
- timeStamp(&outLog);
- outLog << "search for " << currentPassInfo->username << " failed in SyncPasswords" << endl;
+ if(outLog.is_open())
+ {
+ timeStamp(&outLog);
+ outLog << "newer modifies exist: " << currentPassInfo->username << endl;
+ }
}
- }
- else
- {
- while((dn = GetDN()) != NULL)
+ else if(MultipleResults() && !SYNCSERV_ALLOW_MULTI_MOD)
{
- if(CanBind(dn, currentPassInfo->password))
+ if(outLog.is_open())
{
- if(outLog.is_open())
- {
- timeStamp(&outLog);
- outLog << "password match, no modify preformed: " << currentPassInfo->username << endl;
- }
+ timeStamp(&outLog);
+ outLog << "multiple results not allowed: " << currentPassInfo->username << endl;
}
- else if(ModifyPassword(dn, currentPassInfo->password) != 0)
+ }
+ else if(CanBind(dn, currentPassInfo->password))
+ {
+ if(outLog.is_open())
+ {
+ timeStamp(&outLog);
+ outLog << "password match, no modify preformed: " << currentPassInfo->username << endl;
+ }
+ }
+ else if(ModifyPassword(dn, currentPassInfo->password) != 0)
+ {
+ // log modify failure.
+ if(outLog.is_open())
{
- // log modify failure.
- if(outLog.is_open())
- {
- timeStamp(&outLog);
- outLog << "modify password for " << currentPassInfo->username << " failed in SyncPasswords" << endl;
- }
+ timeStamp(&outLog);
+ outLog << "modify password for " << currentPassInfo->username << " failed in SyncPasswords" << endl;
}
- else
+ }
+ else
+ {
+ if(outLog.is_open())
{
- if(outLog.is_open())
- {
- timeStamp(&outLog);
- outLog << "password for " << currentPassInfo->username << " modified" << endl;
- outLog << "\t" << dn << endl;
- }
+ timeStamp(&outLog);
+ outLog << "password for " << currentPassInfo->username << " modified" << endl;
+ outLog << "\t" << dn << endl;
}
- } // end while((dn = GetDN()) != NULL)
+ }
+ tempPassInfo = currentPassInfo;
+ currentPassInfo++;
+ passInfoList.erase(tempPassInfo);
}
-
- tempPassInfo = currentPassInfo;
- currentPassInfo++;
- passInfoList.erase(tempPassInfo);
- } // end while(currentPassInfo != passInfoList.end())
- } // end while(passInfoList.size() > 0)
-
- if(saveSet(&passInfoList, dataFilename) == 0)
- {
- if(outLog.is_open())
- {
- timeStamp(&outLog);
- outLog << passInfoList.size() << " entries saved to file" << endl;
}
- }
- else
- {
- if(outLog.is_open())
+ else
{
- timeStamp(&outLog);
- outLog << "failed to save entries to file" << endl;
+ currentPassInfo++;
}
}
- clearSet(&passInfoList);
- Disconnect(&mainLdapConnection);
-
exit:
- return result;
-}
-
-// ****************************************************************
-//
-// ****************************************************************
-void PassSyncService::OnStop()
-{
- isRunning = false;
- SetEvent(passhookEventHandle);
-}
-
-// ****************************************************************
-//
-// ****************************************************************
-void PassSyncService::OnShutdown()
-{
- isRunning = false;
- SetEvent(passhookEventHandle);
-}
-
-// ****************************************************************
-// PassSyncService::Run
-// ****************************************************************
-void PassSyncService::Run()
-{
- isRunning = true;
- SyncPasswords();
-
- while(isRunning)
+ if(mainLdapConnection != NULL)
{
- WaitForSingleObject(passhookEventHandle, INFINITE);
- SyncPasswords();
- ResetEvent(passhookEventHandle);
+ Disconnect(&mainLdapConnection);
}
- CloseHandle(passhookEventHandle);
+ return result;
}
// ****************************************************************
@@ -266,9 +310,9 @@ int PassSyncService::Connect(LDAP** connection, char* dn, char* auth)
if(outLog.is_open())
{
- timeStamp(&outLog);
- outLog << "ldapssl_client_init failed in Connect" << endl;
- outLog << "\t" << result << ": " << ldap_err2string(result) << endl;
+ //timeStamp(&outLog);
+ //outLog << "ldapssl_client_init failed in Connect" << endl;
+ //outLog << "\t" << result << ": " << ldap_err2string(result) << endl;
}
result = GetLastError();
@@ -283,28 +327,24 @@ int PassSyncService::Connect(LDAP** connection, char* dn, char* auth)
{
if(outLog.is_open())
{
- timeStamp(&outLog);
- outLog << "ldapssl_init failed in Connect" << endl;
+ //timeStamp(&outLog);
+ //outLog << "ldapssl_init failed in Connect" << endl;
}
result = -1;
goto exit;
}
- ResetBackoff();
- while(((lastLdapError = ldap_simple_bind_s(*connection, dn, auth)) != LDAP_SUCCESS) && Backoff())
- {
- // empty
- }
+ lastLdapError = ldap_simple_bind_s(*connection, dn, auth);
if(lastLdapError != LDAP_SUCCESS)
{
// log reason for bind failure.
if(outLog.is_open())
{
- timeStamp(&outLog);
- outLog << "ldap error in Connect" << endl;
- outLog << "\t" << lastLdapError << ": " << ldap_err2string(lastLdapError) << endl;
+ //timeStamp(&outLog);
+ //outLog << "ldap error in Connect" << endl;
+ //outLog << "\t" << lastLdapError << ": " << ldap_err2string(lastLdapError) << endl;
}
result = -1;
@@ -322,7 +362,7 @@ int PassSyncService::Disconnect(LDAP** connection)
{
ldap_unbind(*connection);
- connection = NULL;
+ *connection = NULL;
return 0;
}
@@ -339,48 +379,33 @@ int PassSyncService::QueryUsername(char* username)
_snprintf(searchFilter, SYNCSERV_BUF_SIZE, "(%s=%s)", ldapUsernameField, username);
- ResetBackoff();
- while(Backoff())
- {
- lastLdapError = ldap_search_ext_s(mainLdapConnection, ldapSearchBase, LDAP_SCOPE_ONELEVEL, searchFilter, NULL, 0, NULL, NULL, NULL, -1, &results);
+ lastLdapError = ldap_search_ext_s(mainLdapConnection, ldapSearchBase, LDAP_SCOPE_ONELEVEL, searchFilter, NULL, 0, NULL, NULL, NULL, -1, &results);
- if(lastLdapError != LDAP_SUCCESS)
- {
- // log reason for search failure.
- if(outLog.is_open())
- {
- timeStamp(&outLog);
- outLog << "ldap error in QueryUsername" << endl;
- outLog << "\t" << lastLdapError << ": " << ldap_err2string(lastLdapError) << endl;
- }
- result = -1;
- EndBackoff();
- }
- else if(ldap_first_entry(mainLdapConnection, results) == NULL)
+ if(lastLdapError != LDAP_SUCCESS)
+ {
+ // log reason for search failure.
+ if(outLog.is_open())
{
- if(outLog.is_open())
- {
- timeStamp(&outLog);
- outLog << "there are no entries that match: " << username << endl;
- }
- result = -1;
+ timeStamp(&outLog);
+ outLog << "ldap error in QueryUsername" << endl;
+ outLog << "\t" << lastLdapError << ": " << ldap_err2string(lastLdapError) << endl;
}
- else if(ldap_next_entry(mainLdapConnection, ldap_first_entry(mainLdapConnection, results)) != NULL)
- {
- if(outLog.is_open())
- {
- timeStamp(&outLog);
- outLog << "there are multiple entries that match: " << username << endl;
- }
+ result = -1;
+ goto exit;
+ }
- if(!SYNCSERV_ALLOW_MULTI_MOD)
- {
- result = -1;
- EndBackoff();
- }
+ if(ldap_first_entry(mainLdapConnection, results) == NULL)
+ {
+ if(outLog.is_open())
+ {
+ timeStamp(&outLog);
+ outLog << "there are no entries that match: " << username << endl;
}
+ result = -1;
+ goto exit;
}
+exit:
return result;
}
@@ -405,27 +430,6 @@ char* PassSyncService::GetDN()
return result;
}
-// ****************************************************************
-// PassSyncService::CanBind
-// ****************************************************************
-bool PassSyncService::CanBind(char* dn, char* password)
-{
- bool result;
- LDAP* tempConnection = NULL;
-
- if(Connect(&tempConnection, dn, password) == 0)
- {
- Disconnect(&tempConnection);
- result = true;
- }
- else
- {
- result = false;
- }
-
- return result;
-}
-
// ****************************************************************
// PassSyncService::ModifyPassword
// ****************************************************************
@@ -457,42 +461,141 @@ int PassSyncService::ModifyPassword(char* dn, char* password)
}
// ****************************************************************
-// PassSyncService::ResetBackoff
+// PassSyncService::FutureOccurrence
// ****************************************************************
-void PassSyncService::ResetBackoff()
+bool PassSyncService::FutureOccurrence(PASS_INFO_LIST_ITERATOR startingPassInfo)
{
- backoffCount = 0;
+ bool result = false;
+ PASS_INFO_LIST_ITERATOR currentPassInfo;
+
+ if((startingPassInfo != NULL) && (startingPassInfo != passInfoList.end()))
+ {
+ currentPassInfo = startingPassInfo;
+ currentPassInfo++;
+
+ while((currentPassInfo != passInfoList.end()) && (!result))
+ {
+ if(strcmp(currentPassInfo->username, startingPassInfo->username) == 0)
+ {
+ result = true;
+ }
+
+ currentPassInfo++;
+ }
+ }
+
+ return result;
}
// ****************************************************************
-// PassSyncService::EndBackoff
+// PassSyncService::MultipleResults
// ****************************************************************
-void PassSyncService::EndBackoff()
+bool PassSyncService::MultipleResults()
{
- backoffCount = SYNCSERV_MAX_BACKOFF_COUNT;
+ bool result = false;
+
+ if(ldap_next_entry(mainLdapConnection, ldap_first_entry(mainLdapConnection, results)) != NULL)
+ {
+ result = true;
+ }
+
+ return result;
}
// ****************************************************************
-// PassSyncService::Backoff
+// PassSyncService::CanBind
// ****************************************************************
-bool PassSyncService::Backoff()
+bool PassSyncService::CanBind(char* dn, char* password)
{
bool result;
+ LDAP* tempConnection = NULL;
- if(backoffCount == 0)
- {
- result = true;
- }
- else if(backoffCount < SYNCSERV_MAX_BACKOFF_COUNT)
+ if(Connect(&tempConnection, dn, password) == 0)
{
- Sleep((2 ^ backoffCount) * SYNCSERV_BASE_BACKOFF_LEN);
result = true;
}
else
{
result = false;
}
+
+ if(tempConnection != NULL)
+ {
+ Disconnect(&tempConnection);
+ }
- backoffCount++;
return result;
}
+
+// ****************************************************************
+// PassSyncService::BackoffTime
+// ****************************************************************
+unsigned long PassSyncService::BackoffTime(int backoff)
+{
+ unsigned long backoffTime = 0;
+
+ if(backoff > 0)
+ {
+ backoffTime = pow(2, backoff) * SYNCSERV_BASE_BACKOFF_LEN;
+ }
+
+ return backoffTime;
+}
+
+// ****************************************************************
+// PassSyncService::UpdateBackoff
+// ****************************************************************
+void PassSyncService::UpdateBackoff()
+{
+ PASS_INFO_LIST_ITERATOR currentPassInfo;
+ PASS_INFO_LIST_ITERATOR tempPassInfo;
+ time_t currentTime;
+
+ time(¤tTime);
+
+ currentPassInfo = passInfoList.begin();
+ while(currentPassInfo != passInfoList.end())
+ {
+ if((currentPassInfo->atTime + (BackoffTime(currentPassInfo->backoffCount) / 1000)) <= currentTime)
+ {
+ currentPassInfo->backoffCount++;
+ }
+
+ if((currentTime - currentPassInfo->atTime) > (maxBackoffTime / 1000))
+ {
+ if(outLog.is_open())
+ {
+ timeStamp(&outLog);
+ outLog << "abandoning password change for " << currentPassInfo->username << ", backoff expired" << endl;
+ }
+
+ tempPassInfo = currentPassInfo;
+ currentPassInfo++;
+ passInfoList.erase(tempPassInfo);
+ }
+ else
+ {
+ currentPassInfo++;
+ }
+ }
+}
+
+// ****************************************************************
+// PassSyncService::GetMinBackoff
+// ****************************************************************
+int PassSyncService::GetMinBackoff()
+{
+ PASS_INFO_LIST_ITERATOR currentPassInfo;
+
+ unsigned long minBackoff = INFINITE;
+
+ for(currentPassInfo = passInfoList.begin(); currentPassInfo != passInfoList.end(); currentPassInfo++)
+ {
+ if(currentPassInfo->backoffCount < minBackoff)
+ {
+ minBackoff = currentPassInfo->backoffCount;
+ }
+ }
+
+ return minBackoff;
+}
diff --git a/ldap/synctools/passwordsync/passsync/syncserv.h b/ldap/synctools/passwordsync/passsync/syncserv.h
index aaf3252f5..1b28d4c36 100644
--- a/ldap/synctools/passwordsync/passsync/syncserv.h
+++ b/ldap/synctools/passwordsync/passsync/syncserv.h
@@ -9,6 +9,7 @@
#define _SYNCSERV_H_
#include <stdio.h>
+#include <math.h>
#include "ldap.h"
#include "ldap_ssl.h"
#include "ldappr.h"
@@ -18,7 +19,6 @@
#define SYNCSERV_BUF_SIZE 256
#define SYNCSERV_TIMEOUT 10000
#define SYNCSERV_ALLOW_MULTI_MOD false
-#define SYNCSERV_MAX_BACKOFF_COUNT 4
#define SYNCSERV_BASE_BACKOFF_LEN 1000
class PassSyncService : public CNTService
@@ -38,12 +38,15 @@ private:
int Disconnect(LDAP** connection);
int QueryUsername(char* username);
char* GetDN();
- bool CanBind(char* dn, char* password);
int ModifyPassword(char* dn, char* password);
- void ResetBackoff();
- void EndBackoff();
- bool Backoff();
+ bool FutureOccurrence(PASS_INFO_LIST_ITERATOR startingPassInfo);
+ bool MultipleResults();
+ bool CanBind(char* dn, char* password);
+
+ unsigned long BackoffTime(int backoff);
+ void UpdateBackoff();
+ int GetMinBackoff();
PASS_INFO_LIST passInfoList;
HANDLE passhookEventHandle;
@@ -66,9 +69,9 @@ private:
char ldapSearchBase[SYNCSERV_BUF_SIZE];
char ldapUsernameField[SYNCSERV_BUF_SIZE];
char ldapPasswordField[SYNCSERV_BUF_SIZE];
- bool multipleModify;
+ unsigned long maxBackoffTime;
+ int logLevel;
bool isRunning;
- int backoffCount;
fstream outLog;
};
| 0 |
12006251e512b02e089df8e95d7efa875511feab
|
389ds/389-ds-base
|
Bug 616500 - fix coverify Defect Type: Resource leaks issues CID 12094 - 12136
https://bugzilla.redhat.com/show_bug.cgi?id=616500
Resolves: bug 616500
Bug description: fix coverify Defect Type: Resource leaks issues CID 12121
description: Fixed resource leaks in linux_check_release().
|
commit 12006251e512b02e089df8e95d7efa875511feab
Author: Endi S. Dewata <[email protected]>
Date: Sun Jul 18 17:30:38 2010 -0500
Bug 616500 - fix coverify Defect Type: Resource leaks issues CID 12094 - 12136
https://bugzilla.redhat.com/show_bug.cgi?id=616500
Resolves: bug 616500
Bug description: fix coverify Defect Type: Resource leaks issues CID 12121
description: Fixed resource leaks in linux_check_release().
diff --git a/ldap/systools/idsktune.c b/ldap/systools/idsktune.c
index 122c41dec..cd4934d99 100644
--- a/ldap/systools/idsktune.c
+++ b/ldap/systools/idsktune.c
@@ -1114,13 +1114,13 @@ linux_check_release(void)
if (fp == NULL) {
perror("popen");
- return;
+ goto done;
}
if (fgets(osl,128,fp) == NULL) {
printf("WARNING: Cannot determine the kernel number.\n");
pclose(fp);
- return;
+ goto done;
}
pclose(fp);
@@ -1131,18 +1131,20 @@ linux_check_release(void)
if (atoi(strtok(osl, ".")) < 2) {
printf("ERROR: We support kernel version 2.4.7 and higher.\n\n");
flag_os_bad = 1;
- return;
+ goto done;
}
if (atoi(strtok(NULL, ".")) < 4) {
printf("ERROR: We support kernel version 2.4.7 and higher.\n\n");
flag_os_bad = 1;
- return;
+ goto done;
}
if (atoi(strtok(NULL, "-")) < 7) {
printf("ERROR: We support kernel version 2.4.7 and higher.\n\n");
flag_os_bad = 1;
- return;
+ goto done;
}
+done:
+ if (cmd) free(cmd);
}
#endif /* IDDS_LINUX_INCLUDE */
| 0 |
9ae0134cf5be910748c50e4ae946d77e4636c0d0
|
389ds/389-ds-base
|
Issue - 4696 - Password hash upgrade on bind (#4840)
Description:
There is an unintended side effect of the "upgrade password
on bind" feature. It causes the password policy code to be
engaged and it resets the passwordExpirationtime in the entry.
Fix description:
Only allow an external password modify operation or an extended
password modify operation update the password info.
Relates: https://github.com/389ds/389-ds-base/issues/4696
Reviewed by: @droideck, @tbordaz, @mreynolds389 (Thank you)
|
commit 9ae0134cf5be910748c50e4ae946d77e4636c0d0
Author: James Chapman <[email protected]>
Date: Thu Jul 29 14:27:09 2021 +0100
Issue - 4696 - Password hash upgrade on bind (#4840)
Description:
There is an unintended side effect of the "upgrade password
on bind" feature. It causes the password policy code to be
engaged and it resets the passwordExpirationtime in the entry.
Fix description:
Only allow an external password modify operation or an extended
password modify operation update the password info.
Relates: https://github.com/389ds/389-ds-base/issues/4696
Reviewed by: @droideck, @tbordaz, @mreynolds389 (Thank you)
diff --git a/dirsrvtests/tests/suites/password/pwd_upgrade_on_bind_test.py b/dirsrvtests/tests/suites/password/pwd_upgrade_on_bind_test.py
index 1ebfba2dd..edb2ef6f7 100644
--- a/dirsrvtests/tests/suites/password/pwd_upgrade_on_bind_test.py
+++ b/dirsrvtests/tests/suites/password/pwd_upgrade_on_bind_test.py
@@ -8,32 +8,111 @@
#
import ldap
import pytest
+from lib389.utils import *
from lib389.topologies import topology_st
from lib389.idm.user import UserAccounts
-from lib389._constants import (DEFAULT_SUFFIX, PASSWORD)
+from lib389._constants import (DEFAULT_SUFFIX, DN_CONFIG, PASSWORD, DN_DM)
pytestmark = pytest.mark.tier1
-def test_password_hash_on_upgrade(topology_st):
+CONFIG_ATTR = 'passwordSendExpiringTime'
+USER_DN = 'uid=tuser,ou=people,{}'.format(DEFAULT_SUFFIX)
+USER_RDN = 'tuser'
+USER_PASSWD = 'secret123'
+USER_ACI = '(targetattr="userpassword")(version 3.0; acl "pwp test"; allow (all) userdn="ldap:///self";)'
+
[email protected]
+def add_user(topology_st, request):
+ """Adds a user for binding"""
+
+ log.info('Add the user')
+
+ users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX)
+ user = users.create(properties={
+ 'uid': USER_RDN,
+ 'cn': USER_RDN,
+ 'sn': USER_RDN,
+ 'uidNumber': '3000',
+ 'gidNumber': '4000',
+ 'homeDirectory': '/home/user',
+ 'description': 'd_e_s_c',
+ 'userPassword': USER_PASSWD
+ })
+
+ def fin():
+ """Removes the user entry"""
+
+ log.info('Remove the user entry')
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
+ user.delete()
+
+ request.addfinalizer(fin)
+
[email protected]
+def global_policy(topology_st, request):
+ """Sets the required global
+ password policy attributes under
+ cn=config entry
+ """
+
+ attrs = {'passwordExp': '',
+ 'passwordMaxAge': '',
+ 'passwordWarning': '',
+ CONFIG_ATTR: ''}
+
+ log.info('Get the default values')
+ entry = topology_st.standalone.getEntry(DN_CONFIG, ldap.SCOPE_BASE,
+ '(objectClass=*)', attrs.keys())
+
+ for key in attrs.keys():
+ attrs[key] = entry.getValue(key)
+
+ log.info('Set the new values')
+ topology_st.standalone.config.replace_many(('passwordExp', 'on'),
+ ('passwordMaxAge', '172800'),
+ ('passwordWarning', '86400'),
+ (CONFIG_ATTR, 'on'))
+
+ def fin():
+ """Resets the defaults"""
+
+ log.info('Reset the defaults')
+ topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
+ for key in attrs.keys():
+ topology_st.standalone.config.replace(key, attrs[key])
+
+ request.addfinalizer(fin)
+ # A short sleep is required after the modifying password policy or cn=config
+ time.sleep(0.5)
+
+def test_password_hash_on_upgrade(topology_st, global_policy, add_user):
"""If a legacy password hash is present, assert that on a correct bind
the hash is "upgraded" to the latest-and-greatest hash format on the
server.
-
+
Assert also that password FAILURE does not alter the password.
+ Assert that the password expiration date, history, etc is not modified
+ as password hash upgrade on bind should be invisible to the user.
+
:id: 42cf99e6-454d-46f5-8f1c-8bb699864a07
:setup: Single instance
:steps: 1. Set a password hash in SSHA256, and hash to pbkdf2 statically
- 2. Test a faulty bind
- 3. Assert the PW is SSHA256
- 4. Test a correct bind
- 5. Assert the PW is PBKDF2
+ 2. Get initial passwordExpirationtime
+ 3. Test a faulty bind
+ 4. Assert the PW is SSHA256
+ 5. Test a correct bind
+ 6. Assert the PW is PBKDF2
+ 7. Assert the passwordExpirationtime hasnt changed after upgrade on bind
:expectedresults:
1. Successfully set the values
- 2. The bind fails
- 3. The PW is SSHA256
- 4. The bind succeeds
- 5. The PW is PBKDF2
+ 2. Successfully get the passwordExpirationtime
+ 3. The bind fails
+ 4. The PW is SSHA256
+ 5. The bind succeeds
+ 6. The PW is PBKDF2udo
+ 7. pwd expiration time hasnt been modifed
+
"""
# Make sure the server is set to pkbdf
topology_st.standalone.config.set('passwordStorageScheme', 'PBKDF2_SHA256')
@@ -41,22 +120,32 @@ def test_password_hash_on_upgrade(topology_st):
topology_st.standalone.config.set('nsslapd-enable-upgrade-hash', 'on')
users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX)
- user = users.create_test_user()
+ user = users.get(USER_RDN)
+
# Static version of "password" in SSHA256.
user.set('userPassword', "{SSHA256}9eliEQgjfc4Fcj1IXZtc/ne1GRF+OIjz/NfSTX4f7HByGMQrWHLMLA==")
+ ts1 = user.get_attr_val_utf8('passwordExpirationTime')
+
# Attempt to bind with incorrect password.
with pytest.raises(ldap.INVALID_CREDENTIALS):
badconn = user.bind('badpassword')
+
# Check the pw is SSHA256
up = user.get_attr_val_utf8('userPassword')
assert up.startswith('{SSHA256}')
- # Bind with correct.
+ # Bind with correct, trigger update on bind
+ time.sleep(1)
conn = user.bind(PASSWORD)
+
# Check the pw is now PBKDF2!
up = user.get_attr_val_utf8('userPassword')
assert up.startswith('{PBKDF2_SHA256}')
+ # Verify passwordExpirationtime has not been reset ater hash upgrade
+ ts2 = user.get_attr_val_utf8('passwordExpirationTime')
+ assert ts1 == ts2
+
def test_password_hash_on_upgrade_clearcrypt(topology_st):
"""In some deploymentes, some passwords MAY be in clear or crypt which have
specific possible application integrations allowing the read value to be
diff --git a/ldap/servers/slapd/modify.c b/ldap/servers/slapd/modify.c
index bd66c9413..0436d6f28 100644
--- a/ldap/servers/slapd/modify.c
+++ b/ldap/servers/slapd/modify.c
@@ -1034,6 +1034,7 @@ op_shared_modify(Slapi_PBlock *pb, int pw_change, char *old_pw)
/* update the password info */
update_pw_info(pb, old_pw);
}
+
slapi_pblock_get(pb, SLAPI_ENTRY_POST_OP, &pse);
do_ps_service(pse, NULL, LDAP_CHANGETYPE_MODIFY, 0);
} else {
diff --git a/ldap/servers/slapd/pw.c b/ldap/servers/slapd/pw.c
index fd5debf98..c7cfef4de 100644
--- a/ldap/servers/slapd/pw.c
+++ b/ldap/servers/slapd/pw.c
@@ -3568,7 +3568,7 @@ int32_t update_pw_encoding(Slapi_PBlock *orig_pb, Slapi_Entry *e, Slapi_DN *sdn,
NULL, /* Controls */
NULL, /* UniqueID */
pw_get_componentID(), /* PluginID */
- OP_FLAG_SKIP_MODIFIED_ATTRS &
+ OP_FLAG_SKIP_MODIFIED_ATTRS |
OP_FLAG_ACTION_SKIP_PWDPOLICY); /* Flags */
slapi_modify_internal_pb(pb);
| 0 |
9a6187a33ab034a87239672332e685e25bada374
|
389ds/389-ds-base
|
Bug 622903 - fix coverity Defect Type: Code maintainability issues
https://bugzilla.redhat.com/show_bug.cgi?id=622903
Comment:
Pointer "replica" returned by "object_get_data(prp->replica_object)"
(line 502) is never used. In "case STATE_READY_TO_ACQUIRE", Replica
object "replica" is not needed.
|
commit 9a6187a33ab034a87239672332e685e25bada374
Author: Noriko Hosoi <[email protected]>
Date: Tue Aug 10 15:42:35 2010 -0700
Bug 622903 - fix coverity Defect Type: Code maintainability issues
https://bugzilla.redhat.com/show_bug.cgi?id=622903
Comment:
Pointer "replica" returned by "object_get_data(prp->replica_object)"
(line 502) is never used. In "case STATE_READY_TO_ACQUIRE", Replica
object "replica" is not needed.
diff --git a/ldap/servers/plugins/replication/windows_inc_protocol.c b/ldap/servers/plugins/replication/windows_inc_protocol.c
index f2e9034d5..e5e67c3bb 100644
--- a/ldap/servers/plugins/replication/windows_inc_protocol.c
+++ b/ldap/servers/plugins/replication/windows_inc_protocol.c
@@ -499,7 +499,6 @@ windows_inc_run(Private_Repl_Protocol *prp)
/* ONREPL - at this state we unconditionally acquire the replica
ignoring all events. Not sure if this is good */
object_acquire(prp->replica_object);
- replica = object_get_data(prp->replica_object);
rc = windows_acquire_replica(prp, &ruv , (run_dirsync == 0) /* yes, check the consumer RUV for incremental, but not if we're going to dirsync afterwards */);
@@ -539,7 +538,7 @@ windows_inc_run(Private_Repl_Protocol *prp)
prp->last_acquire_response_code, NULL);
}
- object_release(prp->replica_object); replica = NULL;
+ object_release(prp->replica_object);
break;
case STATE_BACKOFF_START:
| 0 |
bdf955bd7400f51bad8787cc743290aaedf1ca0c
|
389ds/389-ds-base
|
Issue 4421 - Unable to build with Rust enabled in closed environment
Description: Add Makefile flags and update rpm.mk that allow updating
and downloading all the cargo/rust dependencies. This is
needed for nightly tests and upstream/downstream releases.
Fixes: https://github.com/389ds/389-ds-base/issues/4421
Reviewed by: firstyear(Thanks!)
|
commit bdf955bd7400f51bad8787cc743290aaedf1ca0c
Author: Mark Reynolds <[email protected]>
Date: Mon Dec 7 11:00:45 2020 -0500
Issue 4421 - Unable to build with Rust enabled in closed environment
Description: Add Makefile flags and update rpm.mk that allow updating
and downloading all the cargo/rust dependencies. This is
needed for nightly tests and upstream/downstream releases.
Fixes: https://github.com/389ds/389-ds-base/issues/4421
Reviewed by: firstyear(Thanks!)
diff --git a/rpm.mk b/rpm.mk
index 9cd0a0c05..f08cf61d7 100644
--- a/rpm.mk
+++ b/rpm.mk
@@ -25,7 +25,7 @@ TSAN_ON = 0
# Undefined Behaviour Sanitizer
UBSAN_ON = 0
-RUST_ON = 0
+RUST_ON = 1
COCKPIT_ON = 1
@@ -39,6 +39,7 @@ update-cargo-dependencies:
cargo update --manifest-path=./src/Cargo.toml
download-cargo-dependencies:
+ cargo update --manifest-path=./src/Cargo.toml
cargo vendor --manifest-path=./src/Cargo.toml
cargo fetch --manifest-path=./src/Cargo.toml
tar -czf vendor.tar.gz vendor
@@ -120,7 +121,7 @@ rpmbuildprep:
cp dist/sources/$(JEMALLOC_TARBALL) $(RPMBUILD)/SOURCES/ ; \
fi
-srpms: rpmroot srpmdistdir tarballs rpmbuildprep
+srpms: rpmroot srpmdistdir download-cargo-dependencies tarballs rpmbuildprep
rpmbuild --define "_topdir $(RPMBUILD)" -bs $(RPMBUILD)/SPECS/$(PACKAGE).spec
cp $(RPMBUILD)/SRPMS/$(RPM_NAME_VERSION)*.src.rpm dist/srpms/
rm -rf $(RPMBUILD)
diff --git a/rpm/389-ds-base.spec.in b/rpm/389-ds-base.spec.in
index ef05fd9fa..12d58c5ba 100644
--- a/rpm/389-ds-base.spec.in
+++ b/rpm/389-ds-base.spec.in
@@ -337,7 +337,7 @@ UBSAN_FLAGS="--enable-ubsan --enable-debug"
%endif
%if %{use_rust}
-RUST_FLAGS="--enable-rust"
+RUST_FLAGS="--enable-rust --enable-rust-offline"
%endif
%if !%{use_cockpit}
diff --git a/src/Cargo.lock b/src/Cargo.lock
index e0459f20a..32484efe3 100644
--- a/src/Cargo.lock
+++ b/src/Cargo.lock
@@ -4,828 +4,833 @@
name = "ahash"
version = "0.4.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f6789e291be47ace86a60303502173d84af8327e3627ecf334356ee0f87a164c"
dependencies = [
- "const-random 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
+ "const-random",
]
[[package]]
name = "ansi_term"
version = "0.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ee49baf6cb617b853aa8d93bf420db2383fab46d314482ca2803b40d5fde979b"
dependencies = [
- "winapi 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi",
]
[[package]]
name = "atty"
version = "0.2.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8"
dependencies = [
- "hermit-abi 0.1.17 (registry+https://github.com/rust-lang/crates.io-index)",
- "libc 0.2.80 (registry+https://github.com/rust-lang/crates.io-index)",
- "winapi 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
+ "hermit-abi",
+ "libc",
+ "winapi",
]
[[package]]
name = "autocfg"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a"
[[package]]
name = "base64"
version = "0.10.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0b25d992356d2eb0ed82172f5248873db5560c4721f564b13cb5193bda5e668e"
dependencies = [
- "byteorder 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
+ "byteorder",
]
[[package]]
name = "base64"
version = "0.13.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd"
[[package]]
name = "bitflags"
version = "1.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693"
[[package]]
name = "byteorder"
version = "1.3.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "08c48aae112d48ed9f069b33538ea9e3e90aa263cfa3d1c24309612b1f7472de"
[[package]]
name = "cbindgen"
version = "0.9.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9daec6140ab4dcd38c3dd57e580b59a621172a526ac79f1527af760a55afeafd"
dependencies = [
- "clap 2.33.3 (registry+https://github.com/rust-lang/crates.io-index)",
- "log 0.4.11 (registry+https://github.com/rust-lang/crates.io-index)",
- "proc-macro2 1.0.24 (registry+https://github.com/rust-lang/crates.io-index)",
- "quote 1.0.7 (registry+https://github.com/rust-lang/crates.io-index)",
- "serde 1.0.117 (registry+https://github.com/rust-lang/crates.io-index)",
- "serde_json 1.0.59 (registry+https://github.com/rust-lang/crates.io-index)",
- "syn 1.0.48 (registry+https://github.com/rust-lang/crates.io-index)",
- "tempfile 3.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "toml 0.5.7 (registry+https://github.com/rust-lang/crates.io-index)",
+ "clap",
+ "log",
+ "proc-macro2",
+ "quote",
+ "serde",
+ "serde_json",
+ "syn",
+ "tempfile",
+ "toml",
]
[[package]]
name = "cc"
-version = "1.0.63"
+version = "1.0.66"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4c0496836a84f8d0495758516b8621a622beb77c0fed418570e50764093ced48"
dependencies = [
- "jobserver 0.1.21 (registry+https://github.com/rust-lang/crates.io-index)",
+ "jobserver",
]
[[package]]
name = "cfg-if"
version = "0.1.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822"
[[package]]
name = "cfg-if"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
[[package]]
name = "clap"
version = "2.33.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "37e58ac78573c40708d45522f0d80fa2f01cc4f9b4e2bf749807255454312002"
dependencies = [
- "ansi_term 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "atty 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)",
- "bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "strsim 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "textwrap 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "unicode-width 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)",
- "vec_map 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)",
-]
-
-[[package]]
-name = "cloudabi"
-version = "0.1.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "ansi_term",
+ "atty",
+ "bitflags",
+ "strsim",
+ "textwrap",
+ "unicode-width",
+ "vec_map",
]
[[package]]
name = "concread"
version = "0.2.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "14fe52c39ed4e846fb3e6ad4bfe46224ef24db64ff7c5f496d2501c88c270b14"
dependencies = [
- "ahash 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
- "crossbeam 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)",
- "crossbeam-epoch 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "crossbeam-utils 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "num 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "parking_lot 0.11.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)",
- "smallvec 1.5.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "ahash",
+ "crossbeam",
+ "crossbeam-epoch",
+ "crossbeam-utils",
+ "num",
+ "parking_lot",
+ "rand",
+ "smallvec",
]
[[package]]
name = "const-random"
-version = "0.1.11"
+version = "0.1.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f590d95d011aa80b063ffe3253422ed5aa462af4e9867d43ce8337562bac77c4"
dependencies = [
- "const-random-macro 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
- "proc-macro-hack 0.5.19 (registry+https://github.com/rust-lang/crates.io-index)",
+ "const-random-macro",
+ "proc-macro-hack",
]
[[package]]
name = "const-random-macro"
-version = "0.1.11"
+version = "0.1.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "615f6e27d000a2bffbc7f2f6a8669179378fa27ee4d0a509e985dfc0a7defb40"
dependencies = [
- "getrandom 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "proc-macro-hack 0.5.19 (registry+https://github.com/rust-lang/crates.io-index)",
+ "getrandom 0.2.0",
+ "lazy_static",
+ "proc-macro-hack",
+ "tiny-keccak",
]
[[package]]
name = "crossbeam"
version = "0.7.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "69323bff1fb41c635347b8ead484a5ca6c3f11914d784170b158d8449ab07f8e"
dependencies = [
- "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
- "crossbeam-channel 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)",
- "crossbeam-deque 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)",
- "crossbeam-epoch 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "crossbeam-queue 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
- "crossbeam-utils 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "cfg-if 0.1.10",
+ "crossbeam-channel",
+ "crossbeam-deque",
+ "crossbeam-epoch",
+ "crossbeam-queue",
+ "crossbeam-utils",
]
[[package]]
name = "crossbeam-channel"
version = "0.4.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b153fe7cbef478c567df0f972e02e6d736db11affe43dfc9c56a9374d1adfb87"
dependencies = [
- "crossbeam-utils 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "maybe-uninit 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "crossbeam-utils",
+ "maybe-uninit",
]
[[package]]
name = "crossbeam-deque"
version = "0.7.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9f02af974daeee82218205558e51ec8768b48cf524bd01d550abe5573a608285"
dependencies = [
- "crossbeam-epoch 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "crossbeam-utils 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "maybe-uninit 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "crossbeam-epoch",
+ "crossbeam-utils",
+ "maybe-uninit",
]
[[package]]
name = "crossbeam-epoch"
version = "0.8.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "058ed274caafc1f60c4997b5fc07bf7dc7cca454af7c6e81edffe5f33f70dace"
dependencies = [
- "autocfg 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
- "crossbeam-utils 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "maybe-uninit 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "memoffset 0.5.6 (registry+https://github.com/rust-lang/crates.io-index)",
- "scopeguard 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "autocfg",
+ "cfg-if 0.1.10",
+ "crossbeam-utils",
+ "lazy_static",
+ "maybe-uninit",
+ "memoffset",
+ "scopeguard",
]
[[package]]
name = "crossbeam-queue"
version = "0.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "774ba60a54c213d409d5353bda12d49cd68d14e45036a285234c8d6f91f92570"
dependencies = [
- "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
- "crossbeam-utils 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "maybe-uninit 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "cfg-if 0.1.10",
+ "crossbeam-utils",
+ "maybe-uninit",
]
[[package]]
name = "crossbeam-utils"
version = "0.7.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c3c7c73a2d1e9fc0886a08b93e98eb643461230d5f1925e4036204d5f2e261a8"
dependencies = [
- "autocfg 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
- "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "autocfg",
+ "cfg-if 0.1.10",
+ "lazy_static",
]
+[[package]]
+name = "crunchy"
+version = "0.2.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7"
+
[[package]]
name = "entryuuid"
version = "0.1.0"
dependencies = [
- "cc 1.0.63 (registry+https://github.com/rust-lang/crates.io-index)",
- "libc 0.2.80 (registry+https://github.com/rust-lang/crates.io-index)",
- "paste 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)",
- "slapi_r_plugin 0.1.0",
- "uuid 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "cc",
+ "libc",
+ "paste",
+ "slapi_r_plugin",
+ "uuid",
]
[[package]]
name = "entryuuid_syntax"
version = "0.1.0"
dependencies = [
- "cc 1.0.63 (registry+https://github.com/rust-lang/crates.io-index)",
- "libc 0.2.80 (registry+https://github.com/rust-lang/crates.io-index)",
- "paste 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)",
- "slapi_r_plugin 0.1.0",
- "uuid 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "cc",
+ "libc",
+ "paste",
+ "slapi_r_plugin",
+ "uuid",
]
[[package]]
name = "fernet"
version = "0.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e7ac567fd75ce6bc28b68e63b5beaa3ce34f56bafd1122f64f8647c822e38a8b"
dependencies = [
- "base64 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "byteorder 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
- "getrandom 0.1.15 (registry+https://github.com/rust-lang/crates.io-index)",
- "openssl 0.10.30 (registry+https://github.com/rust-lang/crates.io-index)",
+ "base64 0.10.1",
+ "byteorder",
+ "getrandom 0.1.15",
+ "openssl",
]
[[package]]
name = "foreign-types"
version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1"
dependencies = [
- "foreign-types-shared 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "foreign-types-shared",
]
[[package]]
name = "foreign-types-shared"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b"
[[package]]
name = "getrandom"
version = "0.1.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fc587bc0ec293155d5bfa6b9891ec18a1e330c234f896ea47fbada4cadbe47e6"
dependencies = [
- "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
- "libc 0.2.80 (registry+https://github.com/rust-lang/crates.io-index)",
- "wasi 0.9.0+wasi-snapshot-preview1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "cfg-if 0.1.10",
+ "libc",
+ "wasi",
]
[[package]]
name = "getrandom"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ee8025cf36f917e6a52cce185b7c7177689b838b7ec138364e50cc2277a56cf4"
dependencies = [
- "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
- "libc 0.2.80 (registry+https://github.com/rust-lang/crates.io-index)",
- "wasi 0.9.0+wasi-snapshot-preview1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "cfg-if 0.1.10",
+ "libc",
+ "wasi",
]
[[package]]
name = "hermit-abi"
version = "0.1.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5aca5565f760fb5b220e499d72710ed156fdb74e631659e99377d9ebfbd13ae8"
dependencies = [
- "libc 0.2.80 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc",
]
[[package]]
name = "instant"
version = "0.1.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "61124eeebbd69b8190558df225adf7e4caafce0d743919e5d6b19652314ec5ec"
dependencies = [
- "cfg-if 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "cfg-if 1.0.0",
]
[[package]]
name = "itoa"
version = "0.4.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "dc6f3ad7b9d11a0c00842ff8de1b60ee58661048eb8049ed33c73594f359d7e6"
[[package]]
name = "jobserver"
version = "0.1.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5c71313ebb9439f74b00d9d2dcec36440beaf57a6aa0623068441dd7cd81a7f2"
dependencies = [
- "libc 0.2.80 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc",
]
[[package]]
name = "lazy_static"
version = "1.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
[[package]]
name = "libc"
-version = "0.2.80"
+version = "0.2.81"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1482821306169ec4d07f6aca392a4681f66c75c9918aa49641a2595db64053cb"
[[package]]
name = "librnsslapd"
version = "0.1.0"
dependencies = [
- "cbindgen 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "libc 0.2.80 (registry+https://github.com/rust-lang/crates.io-index)",
- "slapd 0.1.0",
+ "cbindgen",
+ "libc",
+ "slapd",
]
[[package]]
name = "librslapd"
version = "0.1.0"
dependencies = [
- "cbindgen 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "concread 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
- "libc 0.2.80 (registry+https://github.com/rust-lang/crates.io-index)",
- "slapd 0.1.0",
+ "cbindgen",
+ "concread",
+ "libc",
+ "slapd",
]
[[package]]
name = "lock_api"
version = "0.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "dd96ffd135b2fd7b973ac026d28085defbe8983df057ced3eb4f2130b0831312"
dependencies = [
- "scopeguard 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "scopeguard",
]
[[package]]
name = "log"
version = "0.4.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4fabed175da42fed1fa0746b0ea71f412aa9d35e76e95e59b192c64b9dc2bf8b"
dependencies = [
- "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
+ "cfg-if 0.1.10",
]
[[package]]
name = "maybe-uninit"
version = "2.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00"
[[package]]
name = "memoffset"
version = "0.5.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "043175f069eda7b85febe4a74abbaeff828d9f8b448515d3151a14a3542811aa"
dependencies = [
- "autocfg 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "autocfg",
]
[[package]]
name = "num"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8b7a8e9be5e039e2ff869df49155f1c06bd01ade2117ec783e56ab0932b67a8f"
dependencies = [
- "num-bigint 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "num-complex 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "num-integer 0.1.44 (registry+https://github.com/rust-lang/crates.io-index)",
- "num-iter 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)",
- "num-rational 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "num-traits 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)",
+ "num-bigint",
+ "num-complex",
+ "num-integer",
+ "num-iter",
+ "num-rational",
+ "num-traits",
]
[[package]]
name = "num-bigint"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5e9a41747ae4633fce5adffb4d2e81ffc5e89593cb19917f8fb2cc5ff76507bf"
dependencies = [
- "autocfg 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "num-integer 0.1.44 (registry+https://github.com/rust-lang/crates.io-index)",
- "num-traits 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)",
+ "autocfg",
+ "num-integer",
+ "num-traits",
]
[[package]]
name = "num-complex"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "747d632c0c558b87dbabbe6a82f3b4ae03720d0646ac5b7b4dae89394be5f2c5"
dependencies = [
- "num-traits 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)",
+ "num-traits",
]
[[package]]
name = "num-integer"
version = "0.1.44"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d2cc698a63b549a70bc047073d2949cce27cd1c7b0a4a862d08a8031bc2801db"
dependencies = [
- "autocfg 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "num-traits 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)",
+ "autocfg",
+ "num-traits",
]
[[package]]
name = "num-iter"
version = "0.1.42"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b2021c8337a54d21aca0d59a92577a029af9431cb59b909b03252b9c164fad59"
dependencies = [
- "autocfg 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "num-integer 0.1.44 (registry+https://github.com/rust-lang/crates.io-index)",
- "num-traits 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)",
+ "autocfg",
+ "num-integer",
+ "num-traits",
]
[[package]]
name = "num-rational"
version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "12ac428b1cb17fce6f731001d307d351ec70a6d202fc2e60f7d4c5e42d8f4f07"
dependencies = [
- "autocfg 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "num-bigint 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "num-integer 0.1.44 (registry+https://github.com/rust-lang/crates.io-index)",
- "num-traits 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)",
+ "autocfg",
+ "num-bigint",
+ "num-integer",
+ "num-traits",
]
[[package]]
name = "num-traits"
version = "0.2.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9a64b1ec5cda2586e284722486d802acf1f7dbdc623e2bfc57e65ca1cd099290"
dependencies = [
- "autocfg 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "autocfg",
]
[[package]]
name = "openssl"
-version = "0.10.30"
+version = "0.10.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8d008f51b1acffa0d3450a68606e6a51c123012edaacb0f4e1426bd978869187"
dependencies = [
- "bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
- "foreign-types 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "libc 0.2.80 (registry+https://github.com/rust-lang/crates.io-index)",
- "openssl-sys 0.9.58 (registry+https://github.com/rust-lang/crates.io-index)",
+ "bitflags",
+ "cfg-if 1.0.0",
+ "foreign-types",
+ "lazy_static",
+ "libc",
+ "openssl-sys",
]
[[package]]
name = "openssl-sys"
-version = "0.9.58"
+version = "0.9.59"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "de52d8eabd217311538a39bba130d7dea1f1e118010fee7a033d966845e7d5fe"
dependencies = [
- "autocfg 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "cc 1.0.63 (registry+https://github.com/rust-lang/crates.io-index)",
- "libc 0.2.80 (registry+https://github.com/rust-lang/crates.io-index)",
- "pkg-config 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)",
- "vcpkg 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)",
+ "autocfg",
+ "cc",
+ "libc",
+ "pkg-config",
+ "vcpkg",
]
[[package]]
name = "parking_lot"
version = "0.11.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6d7744ac029df22dca6284efe4e898991d28e3085c706c972bcd7da4a27a15eb"
dependencies = [
- "instant 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)",
- "lock_api 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "parking_lot_core 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "instant",
+ "lock_api",
+ "parking_lot_core",
]
[[package]]
name = "parking_lot_core"
-version = "0.8.0"
+version = "0.8.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d7c6d9b8427445284a09c55be860a15855ab580a417ccad9da88f5a06787ced0"
dependencies = [
- "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
- "cloudabi 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "instant 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)",
- "libc 0.2.80 (registry+https://github.com/rust-lang/crates.io-index)",
- "redox_syscall 0.1.57 (registry+https://github.com/rust-lang/crates.io-index)",
- "smallvec 1.5.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "winapi 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
+ "cfg-if 1.0.0",
+ "instant",
+ "libc",
+ "redox_syscall",
+ "smallvec",
+ "winapi",
]
[[package]]
name = "paste"
version = "0.1.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "45ca20c77d80be666aef2b45486da86238fabe33e38306bd3118fe4af33fa880"
dependencies = [
- "paste-impl 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)",
- "proc-macro-hack 0.5.19 (registry+https://github.com/rust-lang/crates.io-index)",
+ "paste-impl",
+ "proc-macro-hack",
]
[[package]]
name = "paste-impl"
version = "0.1.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d95a7db200b97ef370c8e6de0088252f7e0dfff7d047a28528e47456c0fc98b6"
dependencies = [
- "proc-macro-hack 0.5.19 (registry+https://github.com/rust-lang/crates.io-index)",
+ "proc-macro-hack",
]
[[package]]
name = "pkg-config"
version = "0.3.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3831453b3449ceb48b6d9c7ad7c96d5ea673e9b470a1dc578c2ce6521230884c"
[[package]]
name = "ppv-lite86"
version = "0.2.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ac74c624d6b2d21f425f752262f42188365d7b8ff1aff74c82e45136510a4857"
[[package]]
name = "proc-macro-hack"
version = "0.5.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5"
[[package]]
name = "proc-macro2"
version = "1.0.24"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1e0704ee1a7e00d7bb417d0770ea303c1bccbabf0ef1667dae92b5967f5f8a71"
dependencies = [
- "unicode-xid 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "unicode-xid",
]
[[package]]
name = "pwdchan"
version = "0.1.0"
dependencies = [
- "base64 0.13.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "cc 1.0.63 (registry+https://github.com/rust-lang/crates.io-index)",
- "libc 0.2.80 (registry+https://github.com/rust-lang/crates.io-index)",
- "openssl 0.10.30 (registry+https://github.com/rust-lang/crates.io-index)",
- "paste 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)",
- "slapi_r_plugin 0.1.0",
- "uuid 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "base64 0.13.0",
+ "cc",
+ "libc",
+ "openssl",
+ "paste",
+ "slapi_r_plugin",
+ "uuid",
]
[[package]]
name = "quote"
version = "1.0.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "aa563d17ecb180e500da1cfd2b028310ac758de548efdd203e18f283af693f37"
dependencies = [
- "proc-macro2 1.0.24 (registry+https://github.com/rust-lang/crates.io-index)",
+ "proc-macro2",
]
[[package]]
name = "rand"
version = "0.7.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03"
dependencies = [
- "getrandom 0.1.15 (registry+https://github.com/rust-lang/crates.io-index)",
- "libc 0.2.80 (registry+https://github.com/rust-lang/crates.io-index)",
- "rand_chacha 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "rand_hc 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "getrandom 0.1.15",
+ "libc",
+ "rand_chacha",
+ "rand_core",
+ "rand_hc",
]
[[package]]
name = "rand_chacha"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402"
dependencies = [
- "ppv-lite86 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)",
- "rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "ppv-lite86",
+ "rand_core",
]
[[package]]
name = "rand_core"
version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19"
dependencies = [
- "getrandom 0.1.15 (registry+https://github.com/rust-lang/crates.io-index)",
+ "getrandom 0.1.15",
]
[[package]]
name = "rand_hc"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c"
dependencies = [
- "rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rand_core",
]
[[package]]
name = "redox_syscall"
version = "0.1.57"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "41cc0f7e4d5d4544e8861606a285bb08d3e70712ccc7d2b84d7c0ccfaf4b05ce"
[[package]]
name = "remove_dir_all"
version = "0.5.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7"
dependencies = [
- "winapi 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi",
]
[[package]]
name = "ryu"
version = "1.0.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "71d301d4193d031abdd79ff7e3dd721168a9572ef3fe51a1517aba235bd8f86e"
[[package]]
name = "scopeguard"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
[[package]]
name = "serde"
-version = "1.0.117"
+version = "1.0.118"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "06c64263859d87aa2eb554587e2d23183398d617427327cf2b3d0ed8c69e4800"
dependencies = [
- "serde_derive 1.0.117 (registry+https://github.com/rust-lang/crates.io-index)",
+ "serde_derive",
]
[[package]]
name = "serde_derive"
-version = "1.0.117"
+version = "1.0.118"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c84d3526699cd55261af4b941e4e725444df67aa4f9e6a3564f18030d12672df"
dependencies = [
- "proc-macro2 1.0.24 (registry+https://github.com/rust-lang/crates.io-index)",
- "quote 1.0.7 (registry+https://github.com/rust-lang/crates.io-index)",
- "syn 1.0.48 (registry+https://github.com/rust-lang/crates.io-index)",
+ "proc-macro2",
+ "quote",
+ "syn",
]
[[package]]
name = "serde_json"
-version = "1.0.59"
+version = "1.0.60"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1500e84d27fe482ed1dc791a56eddc2f230046a040fa908c08bda1d9fb615779"
dependencies = [
- "itoa 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
- "ryu 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)",
- "serde 1.0.117 (registry+https://github.com/rust-lang/crates.io-index)",
+ "itoa",
+ "ryu",
+ "serde",
]
[[package]]
name = "slapd"
version = "0.1.0"
dependencies = [
- "fernet 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
+ "fernet",
]
[[package]]
name = "slapi_r_plugin"
version = "0.1.0"
dependencies = [
- "libc 0.2.80 (registry+https://github.com/rust-lang/crates.io-index)",
- "paste 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)",
- "uuid 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc",
+ "paste",
+ "uuid",
]
[[package]]
name = "smallvec"
-version = "1.5.0"
+version = "1.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ae524f056d7d770e174287294f562e95044c68e88dec909a00d2094805db9d75"
[[package]]
name = "strsim"
version = "0.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a"
[[package]]
name = "syn"
-version = "1.0.48"
+version = "1.0.54"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9a2af957a63d6bd42255c359c93d9bfdb97076bd3b820897ce55ffbfbf107f44"
dependencies = [
- "proc-macro2 1.0.24 (registry+https://github.com/rust-lang/crates.io-index)",
- "quote 1.0.7 (registry+https://github.com/rust-lang/crates.io-index)",
- "unicode-xid 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "proc-macro2",
+ "quote",
+ "unicode-xid",
]
[[package]]
name = "tempfile"
version = "3.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7a6e24d9338a0a5be79593e2fa15a648add6138caa803e2d5bc782c371732ca9"
dependencies = [
- "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
- "libc 0.2.80 (registry+https://github.com/rust-lang/crates.io-index)",
- "rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)",
- "redox_syscall 0.1.57 (registry+https://github.com/rust-lang/crates.io-index)",
- "remove_dir_all 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)",
- "winapi 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
+ "cfg-if 0.1.10",
+ "libc",
+ "rand",
+ "redox_syscall",
+ "remove_dir_all",
+ "winapi",
]
[[package]]
name = "textwrap"
version = "0.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060"
dependencies = [
- "unicode-width 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)",
+ "unicode-width",
+]
+
+[[package]]
+name = "tiny-keccak"
+version = "2.0.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2c9d3793400a45f954c52e73d068316d76b6f4e36977e3fcebb13a2721e80237"
+dependencies = [
+ "crunchy",
]
[[package]]
name = "toml"
version = "0.5.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "75cf45bb0bef80604d001caaec0d09da99611b3c0fd39d3080468875cdb65645"
dependencies = [
- "serde 1.0.117 (registry+https://github.com/rust-lang/crates.io-index)",
+ "serde",
]
[[package]]
name = "unicode-width"
version = "0.1.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9337591893a19b88d8d87f2cec1e73fad5cdfd10e5a6f349f498ad6ea2ffb1e3"
[[package]]
name = "unicode-xid"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f7fe0bb3479651439c9112f72b6c505038574c9fbb575ed1bf3b797fa39dd564"
[[package]]
name = "uuid"
version = "0.8.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9fde2f6a4bea1d6e007c4ad38c6839fa71cbb63b6dbf5b595aa38dc9b1093c11"
dependencies = [
- "rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rand",
]
[[package]]
name = "vcpkg"
version = "0.2.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6454029bf181f092ad1b853286f23e2c507d8e8194d01d92da4a55c274a5508c"
[[package]]
name = "vec_map"
version = "0.8.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191"
[[package]]
name = "wasi"
version = "0.9.0+wasi-snapshot-preview1"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519"
[[package]]
name = "winapi"
version = "0.3.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
dependencies = [
- "winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi-i686-pc-windows-gnu",
+ "winapi-x86_64-pc-windows-gnu",
]
[[package]]
name = "winapi-i686-pc-windows-gnu"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
[[package]]
name = "winapi-x86_64-pc-windows-gnu"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-
-[metadata]
-"checksum ahash 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)" = "f6789e291be47ace86a60303502173d84af8327e3627ecf334356ee0f87a164c"
-"checksum ansi_term 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ee49baf6cb617b853aa8d93bf420db2383fab46d314482ca2803b40d5fde979b"
-"checksum atty 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)" = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8"
-"checksum autocfg 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a"
-"checksum base64 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)" = "0b25d992356d2eb0ed82172f5248873db5560c4721f564b13cb5193bda5e668e"
-"checksum base64 0.13.0 (registry+https://github.com/rust-lang/crates.io-index)" = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd"
-"checksum bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693"
-"checksum byteorder 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "08c48aae112d48ed9f069b33538ea9e3e90aa263cfa3d1c24309612b1f7472de"
-"checksum cbindgen 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)" = "9daec6140ab4dcd38c3dd57e580b59a621172a526ac79f1527af760a55afeafd"
-"checksum cc 1.0.63 (registry+https://github.com/rust-lang/crates.io-index)" = "ad9c6140b5a2c7db40ea56eb1821245e5362b44385c05b76288b1a599934ac87"
-"checksum cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822"
-"checksum cfg-if 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
-"checksum clap 2.33.3 (registry+https://github.com/rust-lang/crates.io-index)" = "37e58ac78573c40708d45522f0d80fa2f01cc4f9b4e2bf749807255454312002"
-"checksum cloudabi 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "4344512281c643ae7638bbabc3af17a11307803ec8f0fcad9fae512a8bf36467"
-"checksum concread 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "14fe52c39ed4e846fb3e6ad4bfe46224ef24db64ff7c5f496d2501c88c270b14"
-"checksum const-random 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)" = "02dc82c12dc2ee6e1ded861cf7d582b46f66f796d1b6c93fa28b911ead95da02"
-"checksum const-random-macro 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)" = "fc757bbb9544aa296c2ae00c679e81f886b37e28e59097defe0cf524306f6685"
-"checksum crossbeam 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)" = "69323bff1fb41c635347b8ead484a5ca6c3f11914d784170b158d8449ab07f8e"
-"checksum crossbeam-channel 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)" = "b153fe7cbef478c567df0f972e02e6d736db11affe43dfc9c56a9374d1adfb87"
-"checksum crossbeam-deque 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)" = "9f02af974daeee82218205558e51ec8768b48cf524bd01d550abe5573a608285"
-"checksum crossbeam-epoch 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)" = "058ed274caafc1f60c4997b5fc07bf7dc7cca454af7c6e81edffe5f33f70dace"
-"checksum crossbeam-queue 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "774ba60a54c213d409d5353bda12d49cd68d14e45036a285234c8d6f91f92570"
-"checksum crossbeam-utils 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)" = "c3c7c73a2d1e9fc0886a08b93e98eb643461230d5f1925e4036204d5f2e261a8"
-"checksum fernet 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "e7ac567fd75ce6bc28b68e63b5beaa3ce34f56bafd1122f64f8647c822e38a8b"
-"checksum foreign-types 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1"
-"checksum foreign-types-shared 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b"
-"checksum getrandom 0.1.15 (registry+https://github.com/rust-lang/crates.io-index)" = "fc587bc0ec293155d5bfa6b9891ec18a1e330c234f896ea47fbada4cadbe47e6"
-"checksum getrandom 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ee8025cf36f917e6a52cce185b7c7177689b838b7ec138364e50cc2277a56cf4"
-"checksum hermit-abi 0.1.17 (registry+https://github.com/rust-lang/crates.io-index)" = "5aca5565f760fb5b220e499d72710ed156fdb74e631659e99377d9ebfbd13ae8"
-"checksum instant 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)" = "61124eeebbd69b8190558df225adf7e4caafce0d743919e5d6b19652314ec5ec"
-"checksum itoa 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)" = "dc6f3ad7b9d11a0c00842ff8de1b60ee58661048eb8049ed33c73594f359d7e6"
-"checksum jobserver 0.1.21 (registry+https://github.com/rust-lang/crates.io-index)" = "5c71313ebb9439f74b00d9d2dcec36440beaf57a6aa0623068441dd7cd81a7f2"
-"checksum lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
-"checksum libc 0.2.80 (registry+https://github.com/rust-lang/crates.io-index)" = "4d58d1b70b004888f764dfbf6a26a3b0342a1632d33968e4a179d8011c760614"
-"checksum lock_api 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "dd96ffd135b2fd7b973ac026d28085defbe8983df057ced3eb4f2130b0831312"
-"checksum log 0.4.11 (registry+https://github.com/rust-lang/crates.io-index)" = "4fabed175da42fed1fa0746b0ea71f412aa9d35e76e95e59b192c64b9dc2bf8b"
-"checksum maybe-uninit 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00"
-"checksum memoffset 0.5.6 (registry+https://github.com/rust-lang/crates.io-index)" = "043175f069eda7b85febe4a74abbaeff828d9f8b448515d3151a14a3542811aa"
-"checksum num 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "8b7a8e9be5e039e2ff869df49155f1c06bd01ade2117ec783e56ab0932b67a8f"
-"checksum num-bigint 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "5e9a41747ae4633fce5adffb4d2e81ffc5e89593cb19917f8fb2cc5ff76507bf"
-"checksum num-complex 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "747d632c0c558b87dbabbe6a82f3b4ae03720d0646ac5b7b4dae89394be5f2c5"
-"checksum num-integer 0.1.44 (registry+https://github.com/rust-lang/crates.io-index)" = "d2cc698a63b549a70bc047073d2949cce27cd1c7b0a4a862d08a8031bc2801db"
-"checksum num-iter 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)" = "b2021c8337a54d21aca0d59a92577a029af9431cb59b909b03252b9c164fad59"
-"checksum num-rational 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "12ac428b1cb17fce6f731001d307d351ec70a6d202fc2e60f7d4c5e42d8f4f07"
-"checksum num-traits 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)" = "9a64b1ec5cda2586e284722486d802acf1f7dbdc623e2bfc57e65ca1cd099290"
-"checksum openssl 0.10.30 (registry+https://github.com/rust-lang/crates.io-index)" = "8d575eff3665419f9b83678ff2815858ad9d11567e082f5ac1814baba4e2bcb4"
-"checksum openssl-sys 0.9.58 (registry+https://github.com/rust-lang/crates.io-index)" = "a842db4709b604f0fe5d1170ae3565899be2ad3d9cbc72dedc789ac0511f78de"
-"checksum parking_lot 0.11.1 (registry+https://github.com/rust-lang/crates.io-index)" = "6d7744ac029df22dca6284efe4e898991d28e3085c706c972bcd7da4a27a15eb"
-"checksum parking_lot_core 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c361aa727dd08437f2f1447be8b59a33b0edd15e0fcee698f935613d9efbca9b"
-"checksum paste 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)" = "45ca20c77d80be666aef2b45486da86238fabe33e38306bd3118fe4af33fa880"
-"checksum paste-impl 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)" = "d95a7db200b97ef370c8e6de0088252f7e0dfff7d047a28528e47456c0fc98b6"
-"checksum pkg-config 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)" = "3831453b3449ceb48b6d9c7ad7c96d5ea673e9b470a1dc578c2ce6521230884c"
-"checksum ppv-lite86 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)" = "ac74c624d6b2d21f425f752262f42188365d7b8ff1aff74c82e45136510a4857"
-"checksum proc-macro-hack 0.5.19 (registry+https://github.com/rust-lang/crates.io-index)" = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5"
-"checksum proc-macro2 1.0.24 (registry+https://github.com/rust-lang/crates.io-index)" = "1e0704ee1a7e00d7bb417d0770ea303c1bccbabf0ef1667dae92b5967f5f8a71"
-"checksum quote 1.0.7 (registry+https://github.com/rust-lang/crates.io-index)" = "aa563d17ecb180e500da1cfd2b028310ac758de548efdd203e18f283af693f37"
-"checksum rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)" = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03"
-"checksum rand_chacha 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402"
-"checksum rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19"
-"checksum rand_hc 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c"
-"checksum redox_syscall 0.1.57 (registry+https://github.com/rust-lang/crates.io-index)" = "41cc0f7e4d5d4544e8861606a285bb08d3e70712ccc7d2b84d7c0ccfaf4b05ce"
-"checksum remove_dir_all 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)" = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7"
-"checksum ryu 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)" = "71d301d4193d031abdd79ff7e3dd721168a9572ef3fe51a1517aba235bd8f86e"
-"checksum scopeguard 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
-"checksum serde 1.0.117 (registry+https://github.com/rust-lang/crates.io-index)" = "b88fa983de7720629c9387e9f517353ed404164b1e482c970a90c1a4aaf7dc1a"
-"checksum serde_derive 1.0.117 (registry+https://github.com/rust-lang/crates.io-index)" = "cbd1ae72adb44aab48f325a02444a5fc079349a8d804c1fc922aed3f7454c74e"
-"checksum serde_json 1.0.59 (registry+https://github.com/rust-lang/crates.io-index)" = "dcac07dbffa1c65e7f816ab9eba78eb142c6d44410f4eeba1e26e4f5dfa56b95"
-"checksum smallvec 1.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7acad6f34eb9e8a259d3283d1e8c1d34d7415943d4895f65cc73813c7396fc85"
-"checksum strsim 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a"
-"checksum syn 1.0.48 (registry+https://github.com/rust-lang/crates.io-index)" = "cc371affeffc477f42a221a1e4297aedcea33d47d19b61455588bd9d8f6b19ac"
-"checksum tempfile 3.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7a6e24d9338a0a5be79593e2fa15a648add6138caa803e2d5bc782c371732ca9"
-"checksum textwrap 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060"
-"checksum toml 0.5.7 (registry+https://github.com/rust-lang/crates.io-index)" = "75cf45bb0bef80604d001caaec0d09da99611b3c0fd39d3080468875cdb65645"
-"checksum unicode-width 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "9337591893a19b88d8d87f2cec1e73fad5cdfd10e5a6f349f498ad6ea2ffb1e3"
-"checksum unicode-xid 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "f7fe0bb3479651439c9112f72b6c505038574c9fbb575ed1bf3b797fa39dd564"
-"checksum uuid 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "9fde2f6a4bea1d6e007c4ad38c6839fa71cbb63b6dbf5b595aa38dc9b1093c11"
-"checksum vcpkg 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)" = "6454029bf181f092ad1b853286f23e2c507d8e8194d01d92da4a55c274a5508c"
-"checksum vec_map 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191"
-"checksum wasi 0.9.0+wasi-snapshot-preview1 (registry+https://github.com/rust-lang/crates.io-index)" = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519"
-"checksum winapi 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)" = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
-"checksum winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
-"checksum winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
+checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
| 0 |
2c4e574ee1103fd8b4b294c55d3398f84c3be220
|
389ds/389-ds-base
|
Resolves: 230458
Summary: Corrected out of date licensing/copyright block in a few files.
|
commit 2c4e574ee1103fd8b4b294c55d3398f84c3be220
Author: Nathan Kinder <[email protected]>
Date: Wed Feb 28 23:05:39 2007 +0000
Resolves: 230458
Summary: Corrected out of date licensing/copyright block in a few files.
diff --git a/ldap/docs/dirhlp/Makefile b/ldap/docs/dirhlp/Makefile
index f4ade877d..51796e9fc 100644
--- a/ldap/docs/dirhlp/Makefile
+++ b/ldap/docs/dirhlp/Makefile
@@ -1,10 +1,41 @@
-#
-# PROPRIETARY/CONFIDENTIAL. Use of this product is subject to
-# license terms.
-# Copyright 2001 Sun Microsystems, Inc.
-# Portions copyright 1999, 2002-2003 Netscape Communications Corporation.
+# BEGIN COPYRIGHT BLOCK
+# This Program is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free Software
+# Foundation; version 2 of the License.
+#
+# This Program is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# this Program; if not, write to the Free Software Foundation, Inc., 59 Temple
+# Place, Suite 330, Boston, MA 02111-1307 USA.
+#
+# In addition, as a special exception, Red Hat, Inc. gives You the additional
+# right to link the code of this Program with code not covered under the GNU
+# General Public License ("Non-GPL Code") and to distribute linked combinations
+# including the two, subject to the limitations in this paragraph. Non-GPL Code
+# permitted under this exception must only link to the code of this Program
+# through those well defined interfaces identified in the file named EXCEPTION
+# found in the source code files (the "Approved Interfaces"). The files of
+# Non-GPL Code may instantiate templates or use macros or inline functions from
+# the Approved Interfaces without causing the resulting work to be covered by
+# the GNU General Public License. Only Red Hat, Inc. may make changes or
+# additions to the list of Approved Interfaces. You must obey the GNU General
+# Public License in all respects for all of the Program code and other code used
+# in conjunction with the Program except the Non-GPL Code covered by this
+# exception. If you modify this file, you may extend this exception to your
+# version of the file, but you are not obligated to do so. If you do not wish to
+# provide this exception without modification, you must delete this exception
+# statement from your version and license this file solely under the GPL without
+# exception.
+#
+#
+# Copyright (C) 2001 Sun Microsystems, Inc. Used by permission.
+# Copyright (C) 2007 Red Hat, Inc.
# All rights reserved.
-#
+# END COPYRIGHT BLOCK
+
#
# GNU Makefile for Directory Server Console Help
#
diff --git a/ldap/docs/dirhlp/index.map b/ldap/docs/dirhlp/index.map
index 804231850..f5a2aab04 100644
--- a/ldap/docs/dirhlp/index.map
+++ b/ldap/docs/dirhlp/index.map
@@ -1,149 +1,180 @@
-; PROPRIETARY/CONFIDENTIAL. Use of this product is subject to
-; license terms. Copyright 2001 Sun Microsystems, Inc.
-; Portions copyright 1999, 2001-2003 Netscape Communications Corporation.
-; All rights reserved.
-;
-;----------------------------------------------------------------------
-; Last update: 22 October 2001 by Tech Pubs
-;-------------------------------------------MAPPINGS
-; UI Reference Token = help/filename.htm
-;-------------------------------------------PROGRAMS
-;
-;MENU BAR
-;
-;Help Menu from all tabs
-tasks-menubar-help = ag/contents.htm
-preferences-confirmation-help = help/confirmation_preferences.htm
-
-;TASKS TAB
-tasks-backup-help = help/backup.htm
-tasks-restore-help = help/restore.htm
-
-
-;CONFIGURATION TAB
-;Root Node
-configuration-system-settings-help = help/settings.htm
-configuration-system-performance-help = help/performance.htm
-configuration-system-encryption-help = help/encryption.htm
-configuration-system-encryption-preferences-dbox-help = help/encryption_pref.htm
-configuration-system-snmp-help = help/snmp.htm
-configuration-system-manager-help = help/manager.htm
-
-;Database Icon
-configuration-database-indexes-help = help/indexes.htm
-configuration-database-passwords-help = help/passwords.htm
-configuration-database-accountlockout-help = help/account_lockout.htm
-configuration-database-indexes-add-dbox-help = help/index_attribute.htm
-configuration-database-settings-help = help/ldbm_instance_settings.htm
-configuration-database-import-ldap-dbox-help = help/import_ldap.htm
-configuration-database-import-fastwire-dbox-help = help/import_fastwire.htm
-configuration-database-initialize-backend-dbox-help = help/initialize_backend.htm
-configuration-database-export-dbox-help = help/export_general.htm
-configuration-database-export-single-dbox-help = help/export_single.htm
-configuration-database-plugin-setting-help = help/ldbm_plugin_settings.htm
-configuration-database-default-indexes-help = help/default_indexes.htm
-
-;Chaining Database Icon
-configuration-new-chaining-instance-dbox-help = help/new_chaining_be.htm
-configuration-chaining-settings-help = help/chaining_settings.htm
-configuration-chaining-settings-ctrl-chooser-dbox-help = help/chaining_controls.htm
-configuration-chaining-settings-comp-chooser-dbox-help = help/chaining_components.htm
-configuration-chaining-default-help = help/chaining_default.htm
-configuration-chaining-connection-help = help/chaining_connection.htm
-configuration-chaining-authentication-help = help/chaining_authentication.htm
-
-;LDBM Database Instance Icon
-configuration-new-ldbm-instance-dbox-help = help/new_ldbm_instance.htm
-
-;Mapping Tree
-configuration-mapping-settings-help = help/mapping_node_settings.htm
-configuration-mapping-add-backend-dbox-help = help/mapping_backend_add.htm
-configuration-mapping-database-help = help/mapping_database.htm
-configuration-mapping-referral-help = help/mapping_referrals.htm
-configuration-new-mapping-node-dbox-help = help/new_mapping_node.htm
-
-
-;Schema Icon
-configuration-schema-objclass-help = help/object_classes.htm
-configuration-schema-objclass-create-dbox-help = help/create_objclass.htm
-configuration-schema-attr-help = help/attributes.htm
-configuration-schema-attr-create-dbox-help = help/create_attributes.htm
-configuration-schema-mrule-help = help/matching_rules.htm
-
-;Replication Agreements Icon
-configuration-replication-legacyconsumersettings-help = help/legacy_consumer_settings.htm
-configuration-replication-suppliersettings-help = help/supplier_settings.htm
-configuration-replication-replicasettings-help = help/replica_settings.htm
-configuration-replication-summary-help = help/rep_summary.htm
-configuration-replication-schedule-help = help/schedule_rep.htm
-configuration-replication-content-help = help/rep_content.htm
-configuration-replication-host-dbox-help = help/consumer_server_info.htm
-
-;Logs Icon
-configuration-logs-access-help = help/access_log.htm
-configuration-logs-error-help = help/error_log.htm
-configuration-logs-audit-help = help/audit_log.htm
-
-;Plugins Icons
-configuration-plugins-help = help/plugins.htm
-
-
-;REPLICATION AGREEMENT WIZARD
-replication-wizard-content-help = help/rep_source_destination.htm
-replication-wizard-schedule-help = help/schedule_rep_wiz.htm
-replication-wizard-consumerinit-help = help/consumer_init.htm
-replication-wizard-summary-help = help/summary_wiz.htm
-replication-wizard-legacyrmmrname-help = help/rep_agreement_name.htm
-replication-wizard-cirsirselect-help = help/rep_select_wiz.htm
-replication-wizard-cirsirname-help = help/rep_name_wiz.htm
-replication-wizard-attribute-help = help/rep_attributes_wiz.htm
-
-
-;DIRECTORY TAB
-;Role Configuration
-configuration-role-member-filtered-help = help/new_filtered_member.htm
-configuration-role-member-managed-help = help/new_managed_member.htm
-configuration-role-member-nested-help = help/new_nested_member.htm
-configuration-role-account-help = help/role_accounts.htm
-configuration-role-info-help = help/create_new_role.htm
-configuration-set-role = help/set_role.htm
-configuration-choose-role = help/role_selector.htm
-
-;CoS Configuration
-configuration-cos-attributes-help = help/cos_attributes.htm
-configuration-cos-info-help = help/cos_info.htm
-configuration-cos-template-help = help/cos_template.htm
-
-;STATUS TAB
-;Replication Icon
-status-replication-help = help/replication_status.htm
-
-;Logs Icon
-status-logs-access-help = help/access_log_status.htm
-status-logs-error-help = help/error_log_status.htm
-status-logs-audit-help = help/audit_log_status.htm
-
-;Performance Counters Icon
-status-perfcounters-server-help = help/server_performance.htm
-status-perfcounters-database-help = help/database_performance.htm
-
-
-;PROPERTY EDITOR WINDOWS
-property-main-help = help/property_editor.htm
-property-new-objectclass-dbox-help = help/add_new_objclass.htm
-property-new-attribute-dbox-help = help/add_new_attribute.htm
-
-;LDAP URL Construction Dialog
-configuration-construct-new-url-dbox-help = help/construct_ldap_url.htm
-
-;Account Inactivation in Entry Edit Dialog
-configuration-user-account-help = help/
-
-;VERY ADVANCED SEARCH DIALOG BOX
-search-dbox-veryadvanced-help = help/advanced_search.htm
-
-;INSTALL
-menubar-newinstance-dbox-help = help/new_server_instance.htm
-
-; SUBTREE SELECTION DIALOG BOX
-subtree-selection-dbox-help = help/directory_browser.htm
+; --- BEGIN COPYRIGHT BLOCK ---
+; This Program is free software; you can redistribute it and/or modify it under
+; the terms of the GNU General Public License as published by the Free Software
+; Foundation; version 2 of the License.
+;
+; This Program is distributed in the hope that it will be useful, but WITHOUT
+; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+; FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+;
+; You should have received a copy of the GNU General Public License along with
+; this Program; if not, write to the Free Software Foundation, Inc., 59 Temple
+; Place, Suite 330, Boston, MA 02111-1307 USA.
+;
+; In addition, as a special exception, Red Hat, Inc. gives You the additional
+; right to link the code of this Program with code not covered under the GNU
+; General Public License ("Non-GPL Code") and to distribute linked combinations
+; including the two, subject to the limitations in this paragraph. Non-GPL Code
+; permitted under this exception must only link to the code of this Program
+; through those well defined interfaces identified in the file named EXCEPTION
+; found in the source code files (the "Approved Interfaces"). The files of
+; Non-GPL Code may instantiate templates or use macros or inline functions from
+; the Approved Interfaces without causing the resulting work to be covered by
+; the GNU General Public License. Only Red Hat, Inc. may make changes or
+; additions to the list of Approved Interfaces. You must obey the GNU General
+; Public License in all respects for all of the Program code and other code used
+; in conjunction with the Program except the Non-GPL Code covered by this
+; exception. If you modify this file, you may extend this exception to your
+; version of the file, but you are not obligated to do so. If you do not wish to
+; provide this exception without modification, you must delete this exception
+; statement from your version and license this file solely under the GPL without
+; exception.
+;
+;
+; Copyright (C) 2001 Sun Microsystems, Inc. Used by permission.
+; Copyright (C) 2007 Red Hat, Inc.
+; All rights reserved.
+; --- END COPYRIGHT BLOCK ---
+
+;-------------------------------------------MAPPINGS
+; UI Reference Token = help/filename.htm
+;-------------------------------------------PROGRAMS
+;
+;MENU BAR
+;
+;Help Menu from all tabs
+tasks-menubar-help = ag/contents.htm
+preferences-confirmation-help = help/confirmation_preferences.htm
+
+;TASKS TAB
+tasks-backup-help = help/backup.htm
+tasks-restore-help = help/restore.htm
+
+
+;CONFIGURATION TAB
+;Root Node
+configuration-system-settings-help = help/settings.htm
+configuration-system-performance-help = help/performance.htm
+configuration-system-encryption-help = help/encryption.htm
+configuration-system-encryption-preferences-dbox-help = help/encryption_pref.htm
+configuration-system-snmp-help = help/snmp.htm
+configuration-system-manager-help = help/manager.htm
+
+;Database Icon
+configuration-database-indexes-help = help/indexes.htm
+configuration-database-passwords-help = help/passwords.htm
+configuration-database-accountlockout-help = help/account_lockout.htm
+configuration-database-indexes-add-dbox-help = help/index_attribute.htm
+configuration-database-settings-help = help/ldbm_instance_settings.htm
+configuration-database-import-ldap-dbox-help = help/import_ldap.htm
+configuration-database-import-fastwire-dbox-help = help/import_fastwire.htm
+configuration-database-initialize-backend-dbox-help = help/initialize_backend.htm
+configuration-database-export-dbox-help = help/export_general.htm
+configuration-database-export-single-dbox-help = help/export_single.htm
+configuration-database-plugin-setting-help = help/ldbm_plugin_settings.htm
+configuration-database-default-indexes-help = help/default_indexes.htm
+
+;Chaining Database Icon
+configuration-new-chaining-instance-dbox-help = help/new_chaining_be.htm
+configuration-chaining-settings-help = help/chaining_settings.htm
+configuration-chaining-settings-ctrl-chooser-dbox-help = help/chaining_controls.htm
+configuration-chaining-settings-comp-chooser-dbox-help = help/chaining_components.htm
+configuration-chaining-default-help = help/chaining_default.htm
+configuration-chaining-connection-help = help/chaining_connection.htm
+configuration-chaining-authentication-help = help/chaining_authentication.htm
+
+;LDBM Database Instance Icon
+configuration-new-ldbm-instance-dbox-help = help/new_ldbm_instance.htm
+
+;Mapping Tree
+configuration-mapping-settings-help = help/mapping_node_settings.htm
+configuration-mapping-add-backend-dbox-help = help/mapping_backend_add.htm
+configuration-mapping-database-help = help/mapping_database.htm
+configuration-mapping-referral-help = help/mapping_referrals.htm
+configuration-new-mapping-node-dbox-help = help/new_mapping_node.htm
+
+
+;Schema Icon
+configuration-schema-objclass-help = help/object_classes.htm
+configuration-schema-objclass-create-dbox-help = help/create_objclass.htm
+configuration-schema-attr-help = help/attributes.htm
+configuration-schema-attr-create-dbox-help = help/create_attributes.htm
+configuration-schema-mrule-help = help/matching_rules.htm
+
+;Replication Agreements Icon
+configuration-replication-legacyconsumersettings-help = help/legacy_consumer_settings.htm
+configuration-replication-suppliersettings-help = help/supplier_settings.htm
+configuration-replication-replicasettings-help = help/replica_settings.htm
+configuration-replication-summary-help = help/rep_summary.htm
+configuration-replication-schedule-help = help/schedule_rep.htm
+configuration-replication-content-help = help/rep_content.htm
+configuration-replication-host-dbox-help = help/consumer_server_info.htm
+
+;Logs Icon
+configuration-logs-access-help = help/access_log.htm
+configuration-logs-error-help = help/error_log.htm
+configuration-logs-audit-help = help/audit_log.htm
+
+;Plugins Icons
+configuration-plugins-help = help/plugins.htm
+
+
+;REPLICATION AGREEMENT WIZARD
+replication-wizard-content-help = help/rep_source_destination.htm
+replication-wizard-schedule-help = help/schedule_rep_wiz.htm
+replication-wizard-consumerinit-help = help/consumer_init.htm
+replication-wizard-summary-help = help/summary_wiz.htm
+replication-wizard-legacyrmmrname-help = help/rep_agreement_name.htm
+replication-wizard-cirsirselect-help = help/rep_select_wiz.htm
+replication-wizard-cirsirname-help = help/rep_name_wiz.htm
+replication-wizard-attribute-help = help/rep_attributes_wiz.htm
+
+
+;DIRECTORY TAB
+;Role Configuration
+configuration-role-member-filtered-help = help/new_filtered_member.htm
+configuration-role-member-managed-help = help/new_managed_member.htm
+configuration-role-member-nested-help = help/new_nested_member.htm
+configuration-role-account-help = help/role_accounts.htm
+configuration-role-info-help = help/create_new_role.htm
+configuration-set-role = help/set_role.htm
+configuration-choose-role = help/role_selector.htm
+
+;CoS Configuration
+configuration-cos-attributes-help = help/cos_attributes.htm
+configuration-cos-info-help = help/cos_info.htm
+configuration-cos-template-help = help/cos_template.htm
+
+;STATUS TAB
+;Replication Icon
+status-replication-help = help/replication_status.htm
+
+;Logs Icon
+status-logs-access-help = help/access_log_status.htm
+status-logs-error-help = help/error_log_status.htm
+status-logs-audit-help = help/audit_log_status.htm
+
+;Performance Counters Icon
+status-perfcounters-server-help = help/server_performance.htm
+status-perfcounters-database-help = help/database_performance.htm
+
+
+;PROPERTY EDITOR WINDOWS
+property-main-help = help/property_editor.htm
+property-new-objectclass-dbox-help = help/add_new_objclass.htm
+property-new-attribute-dbox-help = help/add_new_attribute.htm
+
+;LDAP URL Construction Dialog
+configuration-construct-new-url-dbox-help = help/construct_ldap_url.htm
+
+;Account Inactivation in Entry Edit Dialog
+configuration-user-account-help = help/
+
+;VERY ADVANCED SEARCH DIALOG BOX
+search-dbox-veryadvanced-help = help/advanced_search.htm
+
+;INSTALL
+menubar-newinstance-dbox-help = help/new_server_instance.htm
+
+; SUBTREE SELECTION DIALOG BOX
+subtree-selection-dbox-help = help/directory_browser.htm
diff --git a/ldap/docs/dirhlp/tokens.map b/ldap/docs/dirhlp/tokens.map
index 2fb8294f5..651288092 100644
--- a/ldap/docs/dirhlp/tokens.map
+++ b/ldap/docs/dirhlp/tokens.map
@@ -1,113 +1,143 @@
-;-------------------------------------------------------------------------
-; PROPRIETARY/CONFIDENTIAL. Use of this product is subject to
-; license terms. Copyright ? 2001 Sun Microsystems, Inc.
-; Portions copyright 1999, 2001-2003 Netscape Communications Corporation.
-; All rights reserved.
-;
-;-------------------------------------------------------------------------
-; Last update: 10 July 2003 by Tech Pubs
-;-------------------------------------------MAPPINGS
-; UI Reference Token = help/filename.htm
-;-------------------------------------------PROGRAMS
-;
-;MENU BAR
-;
-;Help Menu from all tabs
-framework-menubar-contents = help/redir_agtoc.htm
-preferences-confirmation-help = help/helpmenu.htm
-framework-menubar-dochome = help/redir_dochome.htm
-
-;TASKS TAB
-tasks-backup-help = help/taskstab_bkup_restore.htm
-tasks-restore-help = help/taskstab_bkup_restore2.htm
-
-
-;CONFIGURATION TAB
-;Root Node
-configuration-system-settings-help = help/configtab_rootnode.htm
-configuration-system-performance-help = help/configtab_rootnode2.htm
-configuration-system-encryption-help = help/configtab_rootnode3.htm
-configuration-system-snmp-help = help/configtab_rootnode5.htm
-configuration-system-manager-help = help/configtab_rootnode6.htm
-configuration-system-sasl-help = help/configtab_rootnode7.htm
-configuration-system-sasl-add-dbox-help = help/configtab_rootnode8.htm
-configuration-system-sasl-mod-dbox-help = help/configtab_rootnode9.htm
-
-;Database Icon
-configuration-database-indexes-help = help/configtab_db.htm
-configuration-database-passwords-help = help/configtab_db2.htm
-configuration-database-accountlockout-help = help/configtab_db3.htm
-configuration-database-indexes-add-dbox-help = help/configtab_db4.htm
-configuration-database-settings-help = help/configtab_db5.htm
-configuration-database-import-ldap-dbox-help = help/configtab_db6.htm
-configuration-database-import-fastwire-dbox-help = help/configtab_db7.htm
-configuration-database-initialize-backend-dbox-help = help/configtab_db8.htm
-configuration-database-export-dbox-help = help/configtab_db9.htm
-configuration-database-export-single-dbox-help = help/configtab_db10.htm
-configuration-database-plugin-setting-help = help/configtab_db11.htm
-configuration-database-default-indexes-help = help/configtab_db12.htm
-configuration-database-attrenc-help = help/configtab_db13.htm
-configuration-database-attrenc-add-dbox-help = help/configtab_db14.htm
-configuration-database-attrenc-method-select-dbox-help = help/configtab_db15.htm
-
-;Chaining Database Icon
-configuration-new-chaining-instance-dbox-help = help/configtab_chaindb.htm
-configuration-chaining-settings-help = help/configtab_chaindb2.htm
-configuration-chaining-settings-ctrl-chooser-dbox-help = help/configtab_chaindb3.htm
-configuration-chaining-settings-comp-chooser-dbox-help = help/configtab_chaindb4.htm
-configuration-chaining-default-help = help/configtab_chaindb5.htm
-configuration-chaining-connection-help = help/configtab_chaindb6.htm
-configuration-chaining-authentication-help = help/configtab_chaindb7.htm
-
-;LDBM Database Instance Icon
-configuration-new-ldbm-instance-dbox-help = help/configtab_ldbmdb.htm
-
-;Mapping Tree/Suffix
-configuration-mapping-setting-help = help/configtab_maptree.htm
-configuration-mapping-add-backend-dbox-help = help/configtab_maptree2.htm
-configuration-mapping-database-help = help/configtab_maptree3.htm
-configuration-mapping-referral-help = help/configtab_maptree4.htm
-configuration-new-mapping-node-dbox-help = help/configtab_maptree5.htm
-configuration-new-mapping-sub-suffix-dbox-help = help/configtab_maptree6.htm
-configuration-confirm-delete-suffix-dbox-help = help/configtab_maptree7.htm
-
-
-;Schema Icon
-configuration-schema-objclass-help = help/configtab_schema.htm
-configuration-schema-objclass-create-dbox-help = help/configtab_schema2.htm
-configuration-schema-attr-help = help/configtab_schema3.htm
-configuration-schema-attr-create-dbox-help = help/configtab_schema4.htm
-configuration-schema-mrule-help = help/configtab_schema5.htm
-
-;Replication Agreements Icon
-configuration-replication-legacyconsumersettings-help = help/configtab_replication.htm
-configuration-replication-suppliersettings-help = help/configtab_replication2.htm
-configuration-replication-replicasettings-help = help/configtab_replication3.htm
-configuration-replication-summary-help = help/configtab_replication4.htm
-configuration-replication-schedule-help = help/configtab_replication5.htm
-configuration-replication-content-help = help/configtab_replication6.htm
-configuration-replication-host-dbox-help = help/configtab_replication7.htm
-configuration-replication-export-help = help/configtab_replication8.htm
-
-;Logs Icon
-configuration-logs-access-help = help/configtab_logs.htm
-configuration-logs-error-help = help/configtab_logs2.htm
-configuration-logs-audit-help = help/configtab_logs3.htm
-
-;Plugins Icons
-configuration-plugins-help = help/configtab_plugins.htm
-
-
-;REPLICATION AGREEMENT WIZARD
-replication-wizard-content-help = help/replication_wizard.htm
-replication-wizard-schedule-help = help/replication_wizard2.htm
-replication-wizard-consumerinit-help = help/replication_wizard3.htm
-replication-wizard-summary-help = help/replication_wizard4.htm
-replication-wizard-legacyrmmrname-help = help/replication_wizard5.htm
+; --- BEGIN COPYRIGHT BLOCK ---
+; This Program is free software; you can redistribute it and/or modify it under
+; the terms of the GNU General Public License as published by the Free Software
+; Foundation; version 2 of the License.
+;
+; This Program is distributed in the hope that it will be useful, but WITHOUT
+; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+; FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+;
+; You should have received a copy of the GNU General Public License along with
+; this Program; if not, write to the Free Software Foundation, Inc., 59 Temple
+; Place, Suite 330, Boston, MA 02111-1307 USA.
+;
+; In addition, as a special exception, Red Hat, Inc. gives You the additional
+; right to link the code of this Program with code not covered under the GNU
+; General Public License ("Non-GPL Code") and to distribute linked combinations
+; including the two, subject to the limitations in this paragraph. Non-GPL Code
+; permitted under this exception must only link to the code of this Program
+; through those well defined interfaces identified in the file named EXCEPTION
+; found in the source code files (the "Approved Interfaces"). The files of
+; Non-GPL Code may instantiate templates or use macros or inline functions from
+; the Approved Interfaces without causing the resulting work to be covered by
+; the GNU General Public License. Only Red Hat, Inc. may make changes or
+; additions to the list of Approved Interfaces. You must obey the GNU General
+; Public License in all respects for all of the Program code and other code used
+; in conjunction with the Program except the Non-GPL Code covered by this
+; exception. If you modify this file, you may extend this exception to your
+; version of the file, but you are not obligated to do so. If you do not wish to
+; provide this exception without modification, you must delete this exception
+; statement from your version and license this file solely under the GPL without
+; exception.
+;
+;
+; Copyright (C) 2001 Sun Microsystems, Inc. Used by permission.
+; Copyright (C) 2007 Red Hat, Inc.
+; All rights reserved.
+; --- END COPYRIGHT BLOCK ---
+
+;-------------------------------------------MAPPINGS
+; UI Reference Token = help/filename.htm
+;-------------------------------------------PROGRAMS
+;
+;MENU BAR
+;
+;Help Menu from all tabs
+framework-menubar-contents = help/redir_agtoc.htm
+preferences-confirmation-help = help/helpmenu.htm
+framework-menubar-dochome = help/redir_dochome.htm
+
+;TASKS TAB
+tasks-backup-help = help/taskstab_bkup_restore.htm
+tasks-restore-help = help/taskstab_bkup_restore2.htm
+
+
+;CONFIGURATION TAB
+;Root Node
+configuration-system-settings-help = help/configtab_rootnode.htm
+configuration-system-performance-help = help/configtab_rootnode2.htm
+configuration-system-encryption-help = help/configtab_rootnode3.htm
+configuration-system-snmp-help = help/configtab_rootnode5.htm
+configuration-system-manager-help = help/configtab_rootnode6.htm
+configuration-system-sasl-help = help/configtab_rootnode7.htm
+configuration-system-sasl-add-dbox-help = help/configtab_rootnode8.htm
+configuration-system-sasl-mod-dbox-help = help/configtab_rootnode9.htm
+
+;Database Icon
+configuration-database-indexes-help = help/configtab_db.htm
+configuration-database-passwords-help = help/configtab_db2.htm
+configuration-database-accountlockout-help = help/configtab_db3.htm
+configuration-database-indexes-add-dbox-help = help/configtab_db4.htm
+configuration-database-settings-help = help/configtab_db5.htm
+configuration-database-import-ldap-dbox-help = help/configtab_db6.htm
+configuration-database-import-fastwire-dbox-help = help/configtab_db7.htm
+configuration-database-initialize-backend-dbox-help = help/configtab_db8.htm
+configuration-database-export-dbox-help = help/configtab_db9.htm
+configuration-database-export-single-dbox-help = help/configtab_db10.htm
+configuration-database-plugin-setting-help = help/configtab_db11.htm
+configuration-database-default-indexes-help = help/configtab_db12.htm
+configuration-database-attrenc-help = help/configtab_db13.htm
+configuration-database-attrenc-add-dbox-help = help/configtab_db14.htm
+configuration-database-attrenc-method-select-dbox-help = help/configtab_db15.htm
+
+;Chaining Database Icon
+configuration-new-chaining-instance-dbox-help = help/configtab_chaindb.htm
+configuration-chaining-settings-help = help/configtab_chaindb2.htm
+configuration-chaining-settings-ctrl-chooser-dbox-help = help/configtab_chaindb3.htm
+configuration-chaining-settings-comp-chooser-dbox-help = help/configtab_chaindb4.htm
+configuration-chaining-default-help = help/configtab_chaindb5.htm
+configuration-chaining-connection-help = help/configtab_chaindb6.htm
+configuration-chaining-authentication-help = help/configtab_chaindb7.htm
+
+;LDBM Database Instance Icon
+configuration-new-ldbm-instance-dbox-help = help/configtab_ldbmdb.htm
+
+;Mapping Tree/Suffix
+configuration-mapping-setting-help = help/configtab_maptree.htm
+configuration-mapping-add-backend-dbox-help = help/configtab_maptree2.htm
+configuration-mapping-database-help = help/configtab_maptree3.htm
+configuration-mapping-referral-help = help/configtab_maptree4.htm
+configuration-new-mapping-node-dbox-help = help/configtab_maptree5.htm
+configuration-new-mapping-sub-suffix-dbox-help = help/configtab_maptree6.htm
+configuration-confirm-delete-suffix-dbox-help = help/configtab_maptree7.htm
+
+
+;Schema Icon
+configuration-schema-objclass-help = help/configtab_schema.htm
+configuration-schema-objclass-create-dbox-help = help/configtab_schema2.htm
+configuration-schema-attr-help = help/configtab_schema3.htm
+configuration-schema-attr-create-dbox-help = help/configtab_schema4.htm
+configuration-schema-mrule-help = help/configtab_schema5.htm
+
+;Replication Agreements Icon
+configuration-replication-legacyconsumersettings-help = help/configtab_replication.htm
+configuration-replication-suppliersettings-help = help/configtab_replication2.htm
+configuration-replication-replicasettings-help = help/configtab_replication3.htm
+configuration-replication-summary-help = help/configtab_replication4.htm
+configuration-replication-schedule-help = help/configtab_replication5.htm
+configuration-replication-content-help = help/configtab_replication6.htm
+configuration-replication-host-dbox-help = help/configtab_replication7.htm
+configuration-replication-export-help = help/configtab_replication8.htm
+
+;Logs Icon
+configuration-logs-access-help = help/configtab_logs.htm
+configuration-logs-error-help = help/configtab_logs2.htm
+configuration-logs-audit-help = help/configtab_logs3.htm
+
+;Plugins Icons
+configuration-plugins-help = help/configtab_plugins.htm
+
+
+;REPLICATION AGREEMENT WIZARD
+replication-wizard-content-help = help/replication_wizard.htm
+replication-wizard-schedule-help = help/replication_wizard2.htm
+replication-wizard-consumerinit-help = help/replication_wizard3.htm
+replication-wizard-summary-help = help/replication_wizard4.htm
+replication-wizard-legacyrmmrname-help = help/replication_wizard5.htm
replication-wizard-attribute-help = help/replication_wizard6.htm
-;replication-wizard-cirsirselect-help = help/replication_wizard6.htm
-;replication-wizard-cirsirname-help = help/replication_wizard7.htm
-
+;replication-wizard-cirsirselect-help = help/replication_wizard6.htm
+;replication-wizard-cirsirname-help = help/replication_wizard7.htm
+
;WINDOWS SYNC AGREEMENT WIZARD
sync-wizard-agreement-help = help/synchronization_wizard1.htm
sync-wizard-content-help = help/synchronization_wizard2.htm
@@ -119,60 +149,60 @@ configuration-sync-summary-help = help/configtab_synchronization1.htm
configuration-sync-schedule-help = help/configtab_synchronization2.htm
configuration-sync-connection-help = help/configtab_synchronization3.htm
-
-;DIRECTORY TAB
-;Role Configuration
-configuration-role-member-filtered-help = help/dirtab_role.htm
-configuration-role-member-managed-help = help/dirtab_role2.htm
-configuration-role-member-nested-help = help/dirtab_role3.htm
-configuration-role-account-help = help/dirtab_role4.htm
-configuration-role-info-help = help/dirtab_role5.htm
-configuration-set-role = help/dirtab_role6.htm
-configuration-choose-role = help/dirtab_role7.htm
-
-;Fine-Grained Password Policy Configuration
-configuration-browser-passwords-help = help/dir_browser2.htm
-configuration-browser-accountlockout-help = help/dir_browser3.htm
-configuration-set-referral = help/dir_browser4.htm
-
-;CoS Configuration
-configuration-cos-attributes-help = help/dirtab_cos.htm
-configuration-cos-info-help = help/dirtab_cos2.htm
-configuration-cos-template-help = help/dirtab_cos3.htm
-
-;STATUS TAB
-;Replication Icon
-status-replication-help = help/statustab_replication.htm
-
-;Logs Icon
-status-logs-access-help = help/statustab_logs.htm
-status-logs-error-help = help/statustab_logs2.htm
-status-logs-audit-help = help/statustab_logs3.htm
-
-;Performance Counters Icon
-status-perfcounters-server-help = help/statustab_performance.htm
-status-perfcounters-database-help = help/statustab_performance2.htm
-
-;General Server Status
-status-server-general-help = help/statustab_general.htm
-
-;PROPERTY EDITOR WINDOWS
-property-main-help = help/property_editor.htm
-property-new-objectclass-dbox-help = help/property_editor2.htm
-property-new-attribute-dbox-help = help/property_editor3.htm
-property-main-create-help = help/property_editor4.htm
-
-;LDAP URL Construction Dialog
-configuration-construct-new-url-dbox-help = help/ldap_url.htm
-
-;Account Management Dialog
-configuration-user-account-help = help/account_mgmt.htm
-
-;VERY ADVANCED SEARCH DIALOG BOX
-search-dbox-veryadvanced-help = help/adv_search.htm
-
-;INSTALL
-menubar-newinstance-dbox-help = help/new_instance.htm
-
-; SUBTREE SELECTION DIALOG BOX
-subtree-selection-dbox-help = help/dir_browser.htm
+
+;DIRECTORY TAB
+;Role Configuration
+configuration-role-member-filtered-help = help/dirtab_role.htm
+configuration-role-member-managed-help = help/dirtab_role2.htm
+configuration-role-member-nested-help = help/dirtab_role3.htm
+configuration-role-account-help = help/dirtab_role4.htm
+configuration-role-info-help = help/dirtab_role5.htm
+configuration-set-role = help/dirtab_role6.htm
+configuration-choose-role = help/dirtab_role7.htm
+
+;Fine-Grained Password Policy Configuration
+configuration-browser-passwords-help = help/dir_browser2.htm
+configuration-browser-accountlockout-help = help/dir_browser3.htm
+configuration-set-referral = help/dir_browser4.htm
+
+;CoS Configuration
+configuration-cos-attributes-help = help/dirtab_cos.htm
+configuration-cos-info-help = help/dirtab_cos2.htm
+configuration-cos-template-help = help/dirtab_cos3.htm
+
+;STATUS TAB
+;Replication Icon
+status-replication-help = help/statustab_replication.htm
+
+;Logs Icon
+status-logs-access-help = help/statustab_logs.htm
+status-logs-error-help = help/statustab_logs2.htm
+status-logs-audit-help = help/statustab_logs3.htm
+
+;Performance Counters Icon
+status-perfcounters-server-help = help/statustab_performance.htm
+status-perfcounters-database-help = help/statustab_performance2.htm
+
+;General Server Status
+status-server-general-help = help/statustab_general.htm
+
+;PROPERTY EDITOR WINDOWS
+property-main-help = help/property_editor.htm
+property-new-objectclass-dbox-help = help/property_editor2.htm
+property-new-attribute-dbox-help = help/property_editor3.htm
+property-main-create-help = help/property_editor4.htm
+
+;LDAP URL Construction Dialog
+configuration-construct-new-url-dbox-help = help/ldap_url.htm
+
+;Account Management Dialog
+configuration-user-account-help = help/account_mgmt.htm
+
+;VERY ADVANCED SEARCH DIALOG BOX
+search-dbox-veryadvanced-help = help/adv_search.htm
+
+;INSTALL
+menubar-newinstance-dbox-help = help/new_instance.htm
+
+; SUBTREE SELECTION DIALOG BOX
+subtree-selection-dbox-help = help/dir_browser.htm
diff --git a/ldap/servers/slapd/tools/rsearch/scripts/dbgen.pl b/ldap/servers/slapd/tools/rsearch/scripts/dbgen.pl
index fd863638f..a08905d10 100755
--- a/ldap/servers/slapd/tools/rsearch/scripts/dbgen.pl
+++ b/ldap/servers/slapd/tools/rsearch/scripts/dbgen.pl
@@ -1,8 +1,42 @@
#!/usr/bin/perl
+# BEGIN COPYRIGHT BLOCK
+# This Program is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free Software
+# Foundation; version 2 of the License.
+#
+# This Program is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# this Program; if not, write to the Free Software Foundation, Inc., 59 Temple
+# Place, Suite 330, Boston, MA 02111-1307 USA.
+#
+# In addition, as a special exception, Red Hat, Inc. gives You the additional
+# right to link the code of this Program with code not covered under the GNU
+# General Public License ("Non-GPL Code") and to distribute linked combinations
+# including the two, subject to the limitations in this paragraph. Non-GPL Code
+# permitted under this exception must only link to the code of this Program
+# through those well defined interfaces identified in the file named EXCEPTION
+# found in the source code files (the "Approved Interfaces"). The files of
+# Non-GPL Code may instantiate templates or use macros or inline functions from
+# the Approved Interfaces without causing the resulting work to be covered by
+# the GNU General Public License. Only Red Hat, Inc. may make changes or
+# additions to the list of Approved Interfaces. You must obey the GNU General
+# Public License in all respects for all of the Program code and other code used
+# in conjunction with the Program except the Non-GPL Code covered by this
+# exception. If you modify this file, you may extend this exception to your
+# version of the file, but you are not obligated to do so. If you do not wish to
+# provide this exception without modification, you must delete this exception
+# statement from your version and license this file solely under the GPL without
+# exception.
+#
+#
+# Copyright (C) 2007 Red Hat, Inc.
+# All rights reserved.
+# END COPYRIGHT BLOCK
#------------------------------------------------------------------------
-## Copyright 1997 - Netscape Communications Corporation
-##
## NAME
## dbgen
##
| 0 |
4fb9a3f93e7f5d64c8ae7a7d2e0b22451c2c7efe
|
389ds/389-ds-base
|
Bug 611790 - fix coverify Defect Type: Null pointer dereferences issues 11940 - 12166
https://bugzilla.redhat.com/show_bug.cgi?id=611790
Resolves: bug 611790
Bug description: Fix coverify Defect Type: Null pointer dereferences issues 11940 - 12166
Fix description: Catch possible NULL pointer in acl_match_macro_in_target().
|
commit 4fb9a3f93e7f5d64c8ae7a7d2e0b22451c2c7efe
Author: Noriko Hosoi <[email protected]>
Date: Mon Aug 23 14:58:24 2010 -0700
Bug 611790 - fix coverify Defect Type: Null pointer dereferences issues 11940 - 12166
https://bugzilla.redhat.com/show_bug.cgi?id=611790
Resolves: bug 611790
Bug description: Fix coverify Defect Type: Null pointer dereferences issues 11940 - 12166
Fix description: Catch possible NULL pointer in acl_match_macro_in_target().
diff --git a/ldap/servers/plugins/acl/aclutil.c b/ldap/servers/plugins/acl/aclutil.c
index 496e655db..e1eb0ab14 100644
--- a/ldap/servers/plugins/acl/aclutil.c
+++ b/ldap/servers/plugins/acl/aclutil.c
@@ -286,7 +286,7 @@ aclutil_print_err (int rv , const Slapi_DN *sdn, const struct berval* val,
}
slapi_log_error( SLAPI_LOG_FATAL, plugin_name, "%s", lineptr);
- if (newline) slapi_ch_free((void **) &newline);
+ slapi_ch_free_string(&newline);
}
/***************************************************************************
@@ -549,7 +549,7 @@ aclutil_expand_paramString ( char *str, Slapi_Entry *e )
char **a_dns;
char *attrName;
char *s, *p;
- char *attrVal;
+ char *attrVal = NULL;
int i, len;
int ncomponents, type;
int rc = -1;
@@ -635,8 +635,7 @@ cleanup:
slapi_ldap_value_free ( a_dns );
slapi_ldap_value_free ( e_dns );
if ( 0 != rc ) /* error */ {
- slapi_ch_free ( (void **) &buf );
- buf = NULL;
+ slapi_ch_free_string ( &buf );
}
return buf;
@@ -779,11 +778,19 @@ acl_match_macro_in_target( const char *ndn, char * match_this,
/* we know it's got a $(dn) */
tmp_ptr = strstr(macro_prefix, ACL_TARGET_MACRO_DN_KEY);
+ if (!tmp_ptr) {
+ LDAPDebug(LDAP_DEBUG_ACL,"acl_match_macro_in_target: "
+ "Target macro DN key \"%s\" not found in \"%s\".\n",
+ ACL_TARGET_MACRO_DN_KEY, macro_prefix, 0);
+ slapi_ch_free_string(¯o_prefix);
+ return ret_val;
+ }
+
*tmp_ptr = '\0';
/* There may be a NULL prefix eg. match_this: ($dn),o=sun.com */
macro_prefix_len = strlen(macro_prefix);
if (macro_prefix_len == 0) {
- slapi_ch_free((void **) ¯o_prefix);
+ slapi_ch_free_string(¯o_prefix);
macro_prefix = NULL;
}
@@ -950,7 +957,7 @@ acl_match_macro_in_target( const char *ndn, char * match_this,
}
}
}/* contains an =* */
- slapi_ch_free((void **) ¯o_prefix);
+ slapi_ch_free_string(¯o_prefix);
}/* macro_prefix != NULL */
return(ret_val);
@@ -1119,7 +1126,7 @@ acl_match_prefix( char *macro_prefix, const char *ndn, int *exact_match) {
}
}
}
- slapi_ch_free((void **)&tmp_str);
+ slapi_ch_free_string(&tmp_str);
}
}/* while */
@@ -1211,13 +1218,13 @@ acl_strstr(char * s, char *substr) {
tmp_str = slapi_ch_strdup(s);
if ( (t = strstr(tmp_str, substr)) == NULL ) {
- slapi_ch_free((void **)&tmp_str);
+ slapi_ch_free_string(&tmp_str);
return(-1);
} else {
int l = 0;
*t = '\0';
l = strlen(tmp_str);
- slapi_ch_free((void **)&tmp_str);
+ slapi_ch_free_string(&tmp_str);
return(l);
}
}
@@ -1268,7 +1275,7 @@ acl_replace_str(char * s, char *substr, char* replace_with_str) {
strcat(patched, replace_with_str);
strcat(patched, suffix);
- slapi_ch_free((void **)&working_s);
+ slapi_ch_free_string(&working_s);
working_s = patched;
prefix = working_s;
@@ -1379,7 +1386,7 @@ void acl_ht_add_and_freeOld(acl_ht_t * acl_ht,
if ( (old_value = (char *)acl_ht_lookup( acl_ht, key)) != NULL ) {
acl_ht_remove( acl_ht, key);
- slapi_ch_free((void **)&old_value);
+ slapi_ch_free_string(&old_value);
}
PL_HashTableAdd( acl_ht, (const void *)pkey, value);
| 0 |
e6fc427ccccea996d206a60ea0cc5602cc625de7
|
389ds/389-ds-base
|
Issue 49845 - README does not contain complete information on building
Description: Update READNME.md with clearer instructions and requirements
for building the server. Also added a check for libasan
to configure.am.
relates: https://pagure.io/389-ds-base/issue/49845
Reviewed by: firstyear(Thanks!)
|
commit e6fc427ccccea996d206a60ea0cc5602cc625de7
Author: Mark Reynolds <[email protected]>
Date: Wed Feb 12 13:45:47 2020 -0500
Issue 49845 - README does not contain complete information on building
Description: Update READNME.md with clearer instructions and requirements
for building the server. Also added a check for libasan
to configure.am.
relates: https://pagure.io/389-ds-base/issue/49845
Reviewed by: firstyear(Thanks!)
diff --git a/README.md b/README.md
index a4422329b..2cfe5f441 100644
--- a/README.md
+++ b/README.md
@@ -18,7 +18,70 @@ license agreement file called LICENSE.
Late-breaking news and information on the 389 Directory Server is
available on our wiki page:
- http://www.port389.org/
+ https://www.port389.org/
+
+Build Requirements (as of 2020-02-12)
+-------------------------------------
+
+nspr-devel
+nss-devel
+perl-generators
+openldap-devel
+libdb-devel
+cyrus-sasl-devel
+icu
+libicu-devel
+pcre-devel
+cracklib-devel
+libatomic
+clang
+gcc
+gcc-c++
+net-snmp-devel
+lm_sensors-devel
+bzip2-devel
+zlib-devel
+openssl-devel
+pam-devel
+systemd-units
+systemd-devel
+libasan
+cargo
+rust
+pkgconfig
+pkgconfig(systemd)
+pkgconfig(krb5)
+autoconf
+automake
+libtool
+doxygen
+libcmocka-devel
+libevent-devel
+python3-devel
+python3-setuptools
+python3-ldap
+python3-six
+python3-pyasn1
+python3-pyasn1-modules
+python3-dateutil
+python3-argcomplete
+python3-argparse-manpage
+python3-libselinux
+python3-policycoreutils
+rsync
+npm
+nodejs
+nspr-devel
+nss-devel
+openldap-devel
+libdb-devel
+cyrus-sasl-devel
+libicu-devel
+pcre-devel
+libtalloc-devel
+libevent-devel
+libtevent-devel
+systemd-devel
Building
--------
@@ -30,6 +93,10 @@ Building
sudo make install
sudo make lib389-install
+Note: **--enable-asan** is optional, and it should only be used for debugging/development purposes.
+
+See also: <https://www.port389.org/docs/389ds/development/building.html>
+
Testing
-------
@@ -45,5 +112,5 @@ More information
Please see our contributing guide online:
- http://www.port389.org/docs/389ds/contributing.html
+ https://www.port389.org/docs/389ds/contributing.html
diff --git a/configure.ac b/configure.ac
index 95772d763..78803cc9b 100644
--- a/configure.ac
+++ b/configure.ac
@@ -150,6 +150,7 @@ AC_ARG_ENABLE(asan, AS_HELP_STRING([--enable-asan], [Enable gcc/clang address sa
[], [ enable_asan=no ])
AC_MSG_RESULT($enable_asan)
if test "$enable_asan" = yes ; then
+ PKG_CHECK_MODULES([ASAN], [libasan])
asan_cflags="-fsanitize=address -fno-omit-frame-pointer -lasan"
asan_rust_defs="-Z sanitizer=address"
else
| 0 |
027e8a4fbd4761a5c7ae4a9cc82befe4741e2dd5
|
389ds/389-ds-base
|
529909 - Update SELinux policy for SASL GSSAPI
The dirsrv SELinux policy needs some changes to allow SASL GSSAPI
authentication to work. We need to allow ns-slapd to read the
krb5.conf file and to create the in memory credentials cache. The
kerberos libraries also attempt to open the krb5.conf in write mode,
so we need to prevent those attempts from being audited.
|
commit 027e8a4fbd4761a5c7ae4a9cc82befe4741e2dd5
Author: Nathan Kinder <[email protected]>
Date: Fri Oct 30 08:44:34 2009 -0700
529909 - Update SELinux policy for SASL GSSAPI
The dirsrv SELinux policy needs some changes to allow SASL GSSAPI
authentication to work. We need to allow ns-slapd to read the
krb5.conf file and to create the in memory credentials cache. The
kerberos libraries also attempt to open the krb5.conf in write mode,
so we need to prevent those attempts from being audited.
diff --git a/selinux/dirsrv.te b/selinux/dirsrv.te
index 6dcabe1f1..60901f284 100644
--- a/selinux/dirsrv.te
+++ b/selinux/dirsrv.te
@@ -85,7 +85,7 @@ libs_use_shared_libs(dirsrv_t)
allow dirsrv_t self:fifo_file { read write };
# process stuff
-allow dirsrv_t self:process { getsched setsched signal_perms};
+allow dirsrv_t self:process { getsched setsched setfscreate signal_perms};
allow dirsrv_t self:capability { sys_nice setuid setgid chown dac_override fowner };
# semaphores
@@ -132,6 +132,10 @@ files_tmp_filetrans(dirsrv_t, dirsrv_tmp_t, { file dir })
fs_getattr_all_fs(dirsrv_t)
kernel_read_system_state(dirsrv_t)
+# kerberos config for SASL GSSAPI
+kerberos_read_config(dirsrv_t)
+kerberos_dontaudit_write_config(dirsrv_t)
+
# Networking basics
sysnet_dns_name_resolve(dirsrv_t)
corenet_all_recvfrom_unlabeled(dirsrv_t)
| 0 |
c3a69bb19ee0733027bdea5da9e4bcbb9b0cd0ba
|
389ds/389-ds-base
|
Issue 5761 - Worker thread dynamic management (#5796)
* Issue 5761 - Worker thread dynamic management
Objectives:
Allow to configure the number of worker threads without having to restart the server
Decrease the worker thread global mutex contention but removing the associated condition variable
==> Increase the "searchrate" performance
Solution: See https://github.com/389ds/389ds.github.io/blob/main/docs/389ds/design/worker-threads.md
Issue: 5761
Reviewed by: @tbordaz (Thanks!)
|
commit c3a69bb19ee0733027bdea5da9e4bcbb9b0cd0ba
Author: progier389 <[email protected]>
Date: Mon Sep 18 12:48:41 2023 +0200
Issue 5761 - Worker thread dynamic management (#5796)
* Issue 5761 - Worker thread dynamic management
Objectives:
Allow to configure the number of worker threads without having to restart the server
Decrease the worker thread global mutex contention but removing the associated condition variable
==> Increase the "searchrate" performance
Solution: See https://github.com/389ds/389ds.github.io/blob/main/docs/389ds/design/worker-threads.md
Issue: 5761
Reviewed by: @tbordaz (Thanks!)
diff --git a/dirsrvtests/tests/suites/config/config_test.py b/dirsrvtests/tests/suites/config/config_test.py
index d8bc58dd9..145d69575 100644
--- a/dirsrvtests/tests/suites/config/config_test.py
+++ b/dirsrvtests/tests/suites/config/config_test.py
@@ -9,6 +9,8 @@
import logging
import pytest
+import os
+from lib389 import pid_from_file
from lib389.tasks import *
from lib389.topologies import topology_m2, topology_st as topo
from lib389.utils import *
@@ -16,15 +18,16 @@ from lib389._constants import DN_CONFIG, DEFAULT_SUFFIX, DEFAULT_BENAME
from lib389.idm.user import UserAccounts, TEST_USER_PROPERTIES
from lib389.idm.group import Groups
from lib389.backend import *
-from lib389.config import LDBMConfig, BDB_LDBMConfig
+from lib389.config import LDBMConfig, BDB_LDBMConfig, Config
from lib389.cos import CosPointerDefinitions, CosTemplates
from lib389.backend import Backends
-from lib389.monitor import MonitorLDBM
+from lib389.monitor import MonitorLDBM, Monitor
from lib389.plugins import ReferentialIntegrityPlugin
pytestmark = pytest.mark.tier0
USER_DN = 'uid=test_user,%s' % DEFAULT_SUFFIX
+PSTACK_CMD = '/usr/bin/pstack'
logging.getLogger(__name__).setLevel(logging.INFO)
log = logging.getLogger(__name__)
@@ -590,6 +593,78 @@ def test_require_internal_index(topo):
user.delete()
+def get_pstack(pid):
+ """Get a pstack of the pid."""
+ res = subprocess.run((PSTACK_CMD, str(pid)), stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT, encoding='utf-8')
+ return str(res.stdout)
+
+def check_number_of_threads(cfgnbthreads, monitor, pid):
+ monresults = monitor.get_threads()
+ # Add waitingthreads and busythreads
+ waiting = int(monresults[3][0])
+ busy = int(monresults[4][0])
+ log.info('Number of threads: configured={cfgnbthreads} waiting={waiting} busy={busy}')
+
+ monnbthreads = int(monresults[3][0]) + int(monresults[4][0]);
+ assert monnbthreads == cfgnbthreads
+ if os.path.isfile(PSTACK_CMD):
+ pstackresult = get_pstack(pid)
+ assert pstackresult.count('connection_threadmain') == cfgnbthreads
+ else:
+ log.info('pstack is not installed ==> skipping pstack test.')
+
+def test_changing_threadnumber(topo):
+ """Test nsslapd-ignore-virtual-attrs configuration attribute
+
+ :id: 11bcf426-061c-11ee-8c22-482ae39447e5
+ :setup: Standalone instance
+ :steps:
+ 1. Check that feature is supported
+ 2 Get nsslapd-threadnumber original value
+ 3. Change nsslapd-threadnumber to 40
+ 4. Check that monitoring and pstack shows the same number than configured number of threads
+ 5. Create a user and add it a group
+ 6. Change nsslapd-threadnumber to 10
+ 7. Check that monitoring and pstack shows the same number than configured number of threads
+ 8. Set back the number of threads to the original value
+ 9. Check that monitoring and pstack shows the same number than configured number of threads
+ :expectedresults:
+ 1. Skip the test if monitoring result does not have the new attributes.
+ 2. Success
+ 3. Success
+ 4. Success
+ 5. Success
+ 6. Success
+ 7. Success
+ 8. Success
+ 9. Success
+ """
+ inst = topo.standalone
+ pid = pid_from_file(inst.pid_file())
+ assert pid != 0 and pid != None
+
+ config = Config(inst)
+ cfgattr = 'nsslapd-threadnumber'
+ cfgnbthreads = config.get_attr_vals_utf8(cfgattr)[0]
+
+ monitor = Monitor(inst)
+ monresults = monitor.get_threads()
+ if len(monresults) < 5:
+ pytest.skip("This version does not support dynamic change of nsslapd-threadnumber without restart.")
+
+ config.replace(cfgattr, '40');
+ time.sleep(3)
+ check_number_of_threads(40, monitor, pid)
+
+ config.replace(cfgattr, '10');
+ # No need to wait here (threads are closed before config change result is returned)
+ check_number_of_threads(10, monitor, pid)
+
+ config.replace(cfgattr, cfgnbthreads);
+ time.sleep(3)
+ check_number_of_threads(int(cfgnbthreads), monitor, pid)
+
if __name__ == '__main__':
# Run isolated
# -s for DEBUG mode
diff --git a/ldap/servers/plugins/sync/sync_persist.c b/ldap/servers/plugins/sync/sync_persist.c
index 298a39b39..7e5377d95 100644
--- a/ldap/servers/plugins/sync/sync_persist.c
+++ b/ldap/servers/plugins/sync/sync_persist.c
@@ -639,6 +639,8 @@ sync_persist_add(Slapi_PBlock *pb)
assert(req); /* avoid gcc_analyzer warning */
assert(pb); /* avoid gcc_analyzer warning */
slapi_pblock_get(pb, SLAPI_OPERATION, &req->req_orig_op); /* neede to access original op */
+ /* Prevent worker thread to reuse the operation as sync_send_results thread need it */
+ g_pc_do_not_reuse_operation();
req->req_pblock = sync_pblock_copy(pb);
slapi_pblock_get(pb, SLAPI_ORIGINAL_TARGET_DN, &base);
req->req_orig_base = slapi_ch_strdup(base);
diff --git a/ldap/servers/slapd/connection.c b/ldap/servers/slapd/connection.c
index a30511c97..797c389f7 100644
--- a/ldap/servers/slapd/connection.c
+++ b/ldap/servers/slapd/connection.c
@@ -23,12 +23,14 @@
#include "prlog.h" /* for PR_ASSERT */
#include "fe.h"
#include <sasl/sasl.h>
+#include "wthreads.h"
+#include "snmp_collator.h"
#if defined(LINUX)
#include <netinet/tcp.h> /* for TCP_CORK */
#endif
typedef Connection work_q_item;
-static void connection_threadmain(void *arg);
+static void *connection_threadmain(void *arg);
static void connection_add_operation(Connection *conn, Operation *op);
static void connection_free_private_buffer(Connection *conn);
static void op_copy_identity(Connection *conn, Operation *op);
@@ -38,99 +40,91 @@ static void log_ber_too_big_error(const Connection *conn,
ber_len_t ber_len,
ber_len_t maxbersize);
-static PRStack *op_stack; /* stack of Slapi_Operation * objects so we don't have to malloc/free every time */
-static PRInt32 op_stack_size; /* size of op_stack */
+static void add_work_q(work_q_item *);
-struct Slapi_op_stack
-{
- PRStackElem stackelem; /* must be first in struct for PRStack to work */
- Slapi_Operation *op;
-};
-static void add_work_q(work_q_item *, struct Slapi_op_stack *);
-static work_q_item *get_work_q(struct Slapi_op_stack **);
+static time_t conn_next_warning_time;
/*
- * We maintain a global work queue of items that have not yet
- * been handed off to an operation thread.
+ * The number of jobs that we may queue before leting the listener threads wait.
+ * Does need to queue more than OPS_RATE_MAX*FLOW_CONTROL_DELAY jobs
+ * because it would requires more than FLOW_CONTROL_DELAY to empty the queue anyway.
+ * The same way, cannot have more jobs than the number of connections
+ * (because we need to read the ber)
*/
-struct Slapi_work_q
-{
- PRStackElem stackelem; /* must be first in struct for PRStack to work */
- work_q_item *work_item;
- struct Slapi_op_stack *op_stack_obj;
- struct Slapi_work_q *next_work_item;
-};
+#define OPS_RATE_MAX 100 /* Maximum number of kilo operations per seconds */
+#define FLOW_CONTROL_DELAY 20 /* sleep delay in ms */
+#define MAX_QUEUED_JOBS min(the_connection_table->size, OPS_RATE_MAX*FLOW_CONTROL_DELAY)
-static struct Slapi_work_q *head_work_q = NULL; /* global work queue head */
-static struct Slapi_work_q *tail_work_q = NULL; /* global work queue tail */
-static pthread_mutex_t work_q_lock; /* protects head_conn_q and tail_conn_q */
-static pthread_cond_t work_q_cv; /* used by operation threads to wait for work -
- * when there is a conn in the queue waiting
- * to be processed */
-static PRInt32 work_q_size; /* size of conn_q */
-static PRInt32 work_q_size_max; /* high water mark of work_q_size */
-#define WORK_Q_EMPTY (work_q_size == 0)
-static PRStack *work_q_stack; /* stack of work_q structs so we don't have to malloc/free every time */
-static PRInt32 work_q_stack_size; /* size of work_q_stack */
-static PRInt32 work_q_stack_size_max; /* max size of work_q_stack */
-static PRInt32 op_shutdown = 0; /* if non-zero, server is shutting down */
+#define CONN_FLOW_CONTROL_MSG_TIMEOUT 60 /* Delay between warning message */
#define LDAP_SOCKET_IO_BUFFER_SIZE 512 /* Size of the buffer we give to the I/O system for reads */
-static struct Slapi_work_q *
-create_work_q(void)
+
+static inline void __attribute__((always_inline))
+ll_init(ll_list_t *elmt, void *data)
{
- struct Slapi_work_q *work_q = (struct Slapi_work_q *)PR_StackPop(work_q_stack);
- if (!work_q) {
- work_q = (struct Slapi_work_q *)slapi_ch_malloc(sizeof(struct Slapi_work_q));
- } else {
- PR_AtomicDecrement(&work_q_stack_size);
- }
- return work_q;
+ elmt->data = data;
+ elmt->head = NULL;
+ elmt->next = elmt->prev = elmt;
}
-static void
-destroy_work_q(struct Slapi_work_q **work_q)
-{
- if (work_q && *work_q) {
- (*work_q)->op_stack_obj = NULL;
- (*work_q)->work_item = NULL;
- PR_StackPush(work_q_stack, (PRStackElem *)*work_q);
- PR_AtomicIncrement(&work_q_stack_size);
- if (work_q_stack_size > work_q_stack_size_max) {
- work_q_stack_size_max = work_q_stack_size;
- }
+static inline void __attribute__((always_inline))
+ll_headinit(ll_head_t *head)
+{
+ ll_init(&head->h, NULL);
+ head->h.head = head;
+ head->size = head->hwm = 0;
+}
+
+static inline void __attribute__((always_inline))
+ll_link_before(ll_list_t *list, ll_list_t *elmt)
+{
+ elmt->prev = list->prev;
+ elmt->next = list;
+ elmt->head = list->head;
+ list->prev->next = elmt;
+ list->prev = elmt;
+ elmt->head->size++;
+ if (elmt->head->size > elmt->head->hwm) {
+ elmt->head->hwm = elmt->head->size;
}
}
-static struct Slapi_op_stack *
-connection_get_operation(void)
+static inline void __attribute__((always_inline))
+ll_link_after(ll_list_t *list, ll_list_t *elmt)
{
- struct Slapi_op_stack *stack_obj = (struct Slapi_op_stack *)PR_StackPop(op_stack);
- if (!stack_obj) {
- stack_obj = (struct Slapi_op_stack *)slapi_ch_calloc(1, sizeof(struct Slapi_op_stack));
- stack_obj->op = operation_new(plugin_build_operation_action_bitmap(0,
- plugin_get_server_plg()));
- } else {
- PR_AtomicDecrement(&op_stack_size);
- if (!stack_obj->op) {
- stack_obj->op = operation_new(plugin_build_operation_action_bitmap(0,
- plugin_get_server_plg()));
- } else {
- operation_init(stack_obj->op,
- plugin_build_operation_action_bitmap(0, plugin_get_server_plg()));
- }
+ elmt->prev = list;
+ elmt->next = list->next;
+ elmt->head = list->head;
+ list->next->prev = elmt;
+ list->next = elmt;
+ elmt->head->size++;
+ if (elmt->head->size > elmt->head->hwm) {
+ elmt->head->hwm = elmt->head->size;
}
- return stack_obj;
}
-static void
-connection_done_operation(Connection *conn, struct Slapi_op_stack *stack_obj)
+static inline void __attribute__((always_inline))
+ll_unlink(ll_list_t *elmt)
{
- operation_done(&(stack_obj->op), conn);
- PR_StackPush(op_stack, (PRStackElem *)stack_obj);
- PR_AtomicIncrement(&op_stack_size);
+ elmt->prev->next = elmt->next;
+ elmt->next->prev = elmt->prev;
+ elmt->next = elmt->prev = elmt;
+ elmt->head->size--;
+ elmt->head = NULL;
+}
+
+static inline int __attribute__((always_inline))
+ll_is_empty(ll_head_t *head)
+{
+ return head->h.next == &head->h;
+}
+
+static inline int __attribute__((always_inline))
+min(int x, int y)
+{
+ return x<y ? x : y;
}
/*
@@ -425,71 +419,194 @@ connection_reset(Connection *conn, int ns, PRNetAddr *from, int fromLen __attrib
conn->c_serveripaddr = slapi_ch_strdup(str_destip);
}
-/* Create a pool of threads for handling the operations */
+/*
+ * worker thread context destructor.
+ */
+void
+op_thread_tinfo_destroy(pc_tinfo_t *tinfo)
+{
+ /* Removing context from snmp slot */
+ pthread_mutex_lock(&g_pc.snmp.mutex);
+ if (tinfo->idx <= g_pc.snmp.nbthreads &&
+ g_pc.snmp.threads[tinfo->idx] == tinfo) {
+ g_pc.snmp.threads[tinfo->idx] = NULL;
+ }
+ pthread_mutex_unlock(&g_pc.snmp.mutex);
+ /* Unqueing context from waiting/busy queues */
+ pthread_mutex_lock(&g_pc.mutex);
+ ll_unlink(&tinfo->q);
+ pthread_mutex_unlock(&g_pc.mutex);
+ /* Clear the snmp context */
+ snmp_thread_counters_cleanup(&tinfo->snmp_vars);
+ /* Free the operation */
+ operation_free(&tinfo->op, NULL);
+ /* And destroy the context */
+ slapi_ch_free((void**)&tinfo);
+}
+
+/*
+ * Alloc and initialize a worker thread context
+ * The caller should hold g_pc.snmp.mutex
+ */
+pc_tinfo_t *
+op_thread_tinfo_init(int thread_idx)
+{
+ pc_tinfo_t *tinfo = (pc_tinfo_t*) slapi_ch_calloc(1, sizeof(*tinfo));
+ int rc = 0;
+
+ ll_init(&tinfo->q, tinfo);
+ tinfo->idx = thread_idx;
+ snmp_thread_counters_init(&tinfo->snmp_vars);
+ tinfo->op = operation_new(plugin_build_operation_action_bitmap(0, plugin_get_server_plg()));
+ if (!tinfo->op) {
+ slapi_log_err(SLAPI_LOG_ERR, "op_thread_tinfo_init",
+ "Failed to create worker thread [%d] operation.\n", thread_idx);
+ } else if ((rc = pthread_mutex_init(&tinfo->mutex, NULL))) {
+ slapi_log_err(SLAPI_LOG_ERR, "op_thread_tinfo_init",
+ "Failed to initialize worker thread [%d] lock.\n", thread_idx);
+ } else if ((rc = pthread_cond_init(&tinfo->cv, NULL))) {
+ slapi_log_err(SLAPI_LOG_ERR, "op_thread_tinfo_init",
+ "Failed to initialize worker thread [%d] condition variable.\n", thread_idx);
+ } else {
+ return tinfo;
+ }
+ op_thread_tinfo_destroy(tinfo);
+ return NULL;
+}
+
+/*
+ * Increase or decrease the number of worker threads
+ */
+void
+op_thread_set_threads_number(int threadsnumber)
+{
+ static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
+ int oldnbthreads = 0;
+ pc_tinfo_t **oldthreads = NULL;
+ int len = 0;
+ int rc = 0;
+
+ pthread_mutex_lock(&mutex);
+ /* Adjust snmp variables */
+ pthread_mutex_lock(&g_pc.snmp.mutex);
+ oldnbthreads = g_pc.snmp.nbthreads;
+ oldthreads = g_pc.snmp.threads;
+ if (threadsnumber>0 && !g_pc.shutdown) {
+ g_pc.snmp.threads = (pc_tinfo_t **) slapi_ch_calloc(threadsnumber+1, sizeof(pc_tinfo_t*));
+ if (oldthreads) {
+ len = min(oldnbthreads, threadsnumber)+1;
+ memcpy(g_pc.snmp.threads, oldthreads, len*sizeof(pc_tinfo_t*));
+ }
+ } else {
+ g_pc.snmp.threads = NULL;
+ threadsnumber = 0;
+ }
+ g_pc.snmp.nbthreads = threadsnumber;
+ /* Create tinfo for new threads and start the thread */
+ for (size_t i=oldnbthreads+1; i<=threadsnumber; i++) {
+ pc_tinfo_t *tinfo = op_thread_tinfo_init(i);
+ g_pc.snmp.threads[i] = tinfo;
+ rc = pthread_create(&tinfo->tid, NULL, connection_threadmain, tinfo);
+ if (rc) {
+ slapi_log_err(SLAPI_LOG_ERR, "init_op_threads",
+ "Worker thread creation failed, error %d (%s)\n",
+ rc, strerror(rc));
+ op_thread_tinfo_destroy(tinfo);
+ } else {
+ g_incr_active_threadcnt();
+ }
+ }
+ pthread_mutex_unlock(&g_pc.snmp.mutex);
+
+ if (threadsnumber<oldnbthreads) {
+ /*
+ * Need to close some threads
+ * Lets save the tids to join the threads
+ * (tinfo may be freed at any time once the closing flag is set
+ */
+ pthread_t *tids = (pthread_t *)slapi_ch_calloc(oldnbthreads+1, sizeof (*tids));
+ /* Mark the removed threads as closing */
+ for (size_t i=threadsnumber+1; i<=oldnbthreads; i++) {
+ pc_tinfo_t *tinfo = oldthreads[i];
+ if (tinfo) {
+ tids[i] = tinfo->tid;
+ pthread_mutex_lock(&tinfo->mutex);
+ tinfo->closing = 1;
+ pthread_cond_signal(&tinfo->cv);
+ pthread_mutex_unlock(&tinfo->mutex);
+ }
+ }
+ /* Wait until the removed threads are really finished */
+ for (size_t i=threadsnumber+1; i<=oldnbthreads; i++) {
+ pthread_join(tids[i], NULL);
+ }
+ slapi_ch_free((void**)&tids);
+ }
+ /* lets reset hwm statistics */
+ pthread_mutex_lock(&g_pc.snmp.mutex);
+ g_pc.waiting_threads.hwm = 0;
+ g_pc.busy_threads.hwm = 0;
+ g_pc.waiting_jobs.hwm = 0;
+ g_pc.jobs_free_list.hwm = 0;
+ pthread_mutex_unlock(&g_pc.snmp.mutex);
+ pthread_mutex_unlock(&mutex);
+ slapi_ch_free((void**)&oldthreads);
+}
+
+/*
+ * Collect op_threads monitoring statistics
+ */
+void
+op_thread_get_stat(op_thread_stats_t *stats)
+{
+ pthread_mutex_lock(&g_pc.mutex);
+ stats->waitingthreads = g_pc.waiting_threads.size;
+ stats->busythreads = g_pc.busy_threads.size;
+ stats->maxbusythreads = g_pc.busy_threads.hwm;
+ stats->waitingjobs = g_pc.waiting_jobs.size;
+ stats->maxwaitingjobs = g_pc.waiting_jobs.hwm;
+ pthread_mutex_unlock(&g_pc.mutex);
+}
+
+/*
+ * Create a pool of threads for handling the operations
+ */
void
init_op_threads()
{
- pthread_condattr_t condAttr;
int32_t max_threads = config_get_threadnumber();
+ size_t nb_free_slots = MAX_QUEUED_JOBS;
+ ll_list_t *free_slots;
int32_t rc;
- int32_t *threads_indexes;
/* Initialize the locks and cv */
- if ((rc = pthread_mutex_init(&work_q_lock, NULL)) != 0) {
+ if ((rc = pthread_mutex_init(&g_pc.mutex, NULL)) != 0 ||
+ (rc = pthread_mutex_init(&g_pc.snmp.mutex, NULL)) != 0) {
slapi_log_err(SLAPI_LOG_ERR, "init_op_threads",
- "Cannot create new lock. error %d (%s)\n",
+ "Failed to initialize a lock. error %d (%s)\n",
rc, strerror(rc));
exit(-1);
}
- if ((rc = pthread_condattr_init(&condAttr)) != 0) {
- slapi_log_err(SLAPI_LOG_ERR, "init_op_threads",
- "Cannot create new condition attribute variable. error %d (%s)\n",
- rc, strerror(rc));
- exit(-1);
- } else if ((rc = pthread_condattr_setclock(&condAttr, CLOCK_MONOTONIC)) != 0) {
- slapi_log_err(SLAPI_LOG_ERR, "init_op_threads",
- "Cannot set condition attr clock. error %d (%s)\n",
- rc, strerror(rc));
- exit(-1);
- } else if ((rc = pthread_cond_init(&work_q_cv, &condAttr)) != 0) {
+ ll_headinit(&g_pc.waiting_threads);
+ ll_headinit(&g_pc.busy_threads);
+ ll_headinit(&g_pc.waiting_jobs);
+ ll_headinit(&g_pc.jobs_free_list);
+ free_slots = (ll_list_t *)slapi_ch_calloc(nb_free_slots, sizeof(ll_list_t));
+ g_pc.jobs_free_list.h.data = free_slots;
+ for (size_t i=0; i<nb_free_slots; i++) {
+ ll_link_before(&g_pc.jobs_free_list.h, &free_slots[i]);
+ }
+ g_pc.threadnumber_cb = op_thread_set_threads_number;
+ g_pc.getstats_cb = op_thread_get_stat;
+ rc = pthread_key_create(&g_pc.tinfo_key, (void (*)(void*))op_thread_tinfo_destroy);
+ if (rc) {
slapi_log_err(SLAPI_LOG_ERR, "init_op_threads",
- "Cannot create new condition variable. error %d (%s)\n",
+ "Failed to initialize thread specific data key. error %d (%s)\n",
rc, strerror(rc));
exit(-1);
}
- pthread_condattr_destroy(&condAttr); /* no longer needed */
-
- work_q_stack = PR_CreateStack("connection_work_q");
- op_stack = PR_CreateStack("connection_operation");
- alloc_per_thread_snmp_vars(max_threads);
- init_thread_private_snmp_vars();
-
-
- threads_indexes = (int32_t *) slapi_ch_calloc(max_threads, sizeof(int32_t));
- for (size_t i = 0; i < max_threads; i++) {
- threads_indexes[i] = i + 1; /* idx 0 is reserved for global snmp_vars */
- }
-
- /* start the operation threads */
- for (size_t i = 0; i < max_threads; i++) {
- PR_SetConcurrency(4);
- if (PR_CreateThread(PR_USER_THREAD,
- (VFP)(void *)connection_threadmain, (void *) &threads_indexes[i],
- PR_PRIORITY_NORMAL, PR_GLOBAL_THREAD,
- PR_UNJOINABLE_THREAD,
- SLAPD_DEFAULT_THREAD_STACKSIZE) == NULL) {
- int prerr = PR_GetError();
- slapi_log_err(SLAPI_LOG_ERR, "init_op_threads",
- "PR_CreateThread failed, " SLAPI_COMPONENT_NAME_NSPR " error %d (%s)\n",
- prerr, slapd_pr_strerror(prerr));
- } else {
- g_incr_active_threadcnt();
- }
- }
- /* Here we should free thread_indexes, but because of the dynamic of the new
- * threads (connection_threadmain) we are not sure when it can be freed.
- * Let's accept that unique initialization leak (typically 32 to 64 bytes)
- */
+ g_pc.nbcpus = util_get_capped_hardware_threads(1, MAX_THREADS);
+ op_thread_set_threads_number(max_threads);
}
static void
@@ -947,14 +1064,6 @@ connection_free_private_buffer(Connection *conn)
*/
-/* Connection status values returned by
- connection_wait_for_new_work(), connection_read_operation(), etc. */
-
-#define CONN_FOUND_WORK_TO_DO 0
-#define CONN_SHUTDOWN 1
-#define CONN_NOWORK 2
-#define CONN_DONE 3
-#define CONN_TIMEDOUT 4
#define CONN_TURBO_TIMEOUT_INTERVAL 100 /* milliseconds */
#define CONN_TURBO_TIMEOUT_MAXIMUM 5 /* attempts * interval IE 2000ms with 400 * 5 */
@@ -963,57 +1072,81 @@ connection_free_private_buffer(Connection *conn)
#define CONN_TURBO_HYSTERESIS 0 /* avoid flip flopping in and out of turbo mode */
void
-connection_make_new_pb(Slapi_PBlock *pb, Connection *conn)
+connection_set_new_op_in_pb(Slapi_PBlock *pb, pc_tinfo_t *tinfo, Connection *conn)
{
- struct Slapi_op_stack *stack_obj = NULL;
- /* we used to malloc/free the pb for each operation - now, just use a local stack pb
- * in connection_threadmain, and just clear it out
+ /*
+ * Init the pblock with conn and a fresh operation
+ * Operation no more realloced or got from a stack but
+ * it is stored in worker thread context and reused afterwards
+ * Caller should held conn->c_mutex
*/
- /* *ppb = (Slapi_PBlock *) slapi_ch_calloc( 1, sizeof(Slapi_PBlock) ); */
- /* *ppb = slapi_pblock_new(); */
+ Slapi_Operation *op = tinfo->op;
+ operation_init(op, plugin_build_operation_action_bitmap(0, plugin_get_server_plg()));
slapi_pblock_set(pb, SLAPI_CONNECTION, conn);
- stack_obj = connection_get_operation();
- slapi_pblock_set(pb, SLAPI_OPERATION, stack_obj->op);
- slapi_pblock_set_op_stack_elem(pb, stack_obj);
- connection_add_operation(conn, stack_obj->op);
+
+ slapi_pblock_set(pb, SLAPI_OPERATION, op);
+ connection_add_operation(conn, op);
}
-int
-connection_wait_for_new_work(Slapi_PBlock *pb, int32_t interval)
+conn_status_t
+connection_wait_for_new_work(Slapi_PBlock *pb, pc_tinfo_t *tinfo)
{
- int ret = CONN_FOUND_WORK_TO_DO;
- work_q_item *wqitem = NULL;
- struct Slapi_op_stack *op_stack_obj = NULL;
-
- pthread_mutex_lock(&work_q_lock);
-
- while (!op_shutdown && WORK_Q_EMPTY) {
- if (interval == 0 ) {
- pthread_cond_wait(&work_q_cv, &work_q_lock);
- } else {
- struct timespec current_time = {0};
- clock_gettime(CLOCK_MONOTONIC, ¤t_time);
- current_time.tv_sec += interval;
- pthread_cond_timedwait(&work_q_cv, &work_q_lock, ¤t_time);
+ struct conn *conn = NULL;
+ ll_list_t *job = NULL;
+
+ pthread_mutex_lock(&tinfo->mutex);
+ for (;;) {
+ if (tinfo->conn) {
+ /* A job have been specifically assigned for this thread */
+ conn = tinfo->conn;
+ tinfo->conn = NULL;
+ pthread_mutex_unlock(&tinfo->mutex);
+ pthread_mutex_lock(&(conn->c_mutex));
+ connection_set_new_op_in_pb(pb, tinfo, conn);
+ pthread_mutex_unlock(&(conn->c_mutex));
+ return CONN_FOUND_WORK_TO_DO;
}
+ /* Nothing specifically assigned for this thread */
+ if (tinfo->closing) {
+ pthread_mutex_unlock(&tinfo->mutex);
+ return CONN_SHUTDOWN;
+ }
+ pthread_mutex_unlock(&tinfo->mutex);
+ pthread_mutex_lock(&g_pc.mutex);
+ if (ll_is_empty(&g_pc.waiting_jobs)) {
+ /* And nothing either in the global queue ==> lets wait. */
+ ll_unlink(&tinfo->q);
+ ll_link_after(&g_pc.waiting_threads.h, &tinfo->q);
+ pthread_mutex_unlock(&g_pc.mutex);
+ pthread_mutex_lock(&tinfo->mutex);
+ if (tinfo->conn) {
+ continue;
+ }
+ pthread_cond_wait(&tinfo->cv, &tinfo->mutex);
+ continue;
+ }
+ /*
+ * Lets be sure that thread is in busy mode
+ * It is already the case if add_q_work has set tinfo->conn
+ * but not if pthread_cond_wait get interrupted ( by SIGPIPE ? )
+ */
+ ll_unlink(&tinfo->q);
+ ll_link_after(&g_pc.busy_threads.h, &tinfo->q);
+
+ /* Lets pick first job in waiting queue */
+ job = g_pc.waiting_jobs.h.next;
+ conn = job->data;
+ job->data = NULL;
+ ll_unlink(job);
+ ll_link_after(&g_pc.jobs_free_list.h, job);
+ pthread_mutex_unlock(&g_pc.mutex);
+ pthread_mutex_lock(&(conn->c_mutex));
+ connection_set_new_op_in_pb(pb, tinfo, conn);
+ pthread_mutex_unlock(&(conn->c_mutex));
+ conn= NULL;
+ job = NULL;
+ return CONN_FOUND_WORK_TO_DO;
}
-
- if (op_shutdown) {
- slapi_log_err(SLAPI_LOG_TRACE, "connection_wait_for_new_work", "shutdown\n");
- ret = CONN_SHUTDOWN;
- } else if (NULL == (wqitem = get_work_q(&op_stack_obj))) {
- /* not sure how this can happen */
- slapi_log_err(SLAPI_LOG_TRACE, "connection_wait_for_new_work", "no work to do\n");
- ret = CONN_NOWORK;
- } else {
- /* make new pb */
- slapi_pblock_set(pb, SLAPI_CONNECTION, wqitem);
- slapi_pblock_set_op_stack_elem(pb, op_stack_obj);
- slapi_pblock_set(pb, SLAPI_OPERATION, op_stack_obj->op);
- }
-
- pthread_mutex_unlock(&work_q_lock);
- return ret;
}
#include "openldapber.h"
@@ -1322,7 +1455,7 @@ connection_read_operation(Connection *conn, Operation *op, ber_tag_t *tag, int *
/* Did we time out ? */
if (0 == ret) {
/* We timed out, should the server shutdown ? */
- if (op_shutdown) {
+ if (g_pc.shutdown) {
ret = CONN_SHUTDOWN;
goto done;
}
@@ -1591,18 +1724,17 @@ connection_enter_leave_turbo(Connection *conn, int current_turbo_flag, int *new_
*new_turbo_flag = new_mode;
}
-static void
+static void *
connection_threadmain(void *arg)
{
Slapi_PBlock *pb = slapi_pblock_new();
- int32_t *snmp_vars_idx = (int32_t *) arg;
+ pc_tinfo_t *tinfo = arg;
/* wait forever for new pb until one is available or shutdown */
- int32_t interval = 0; /* used be 10 seconds */
Connection *conn = NULL;
Operation *op;
ber_tag_t tag = 0;
int thread_turbo_flag = 0;
- int ret = 0;
+ conn_status_t ret = 0;
int more_data = 0;
int replication_connection = 0; /* If this connection is from a replication supplier, we want to ensure that operation processing is serialized */
int doshutdown = 0;
@@ -1613,18 +1745,22 @@ connection_threadmain(void *arg)
/* Arrange to ignore SIGPIPE signals. */
SIGNAL(SIGPIPE, SIG_IGN);
#endif
- thread_private_snmp_vars_set_idx(*snmp_vars_idx);
+
+ pthread_setspecific(g_pc.tinfo_key, tinfo);
+ pthread_mutex_lock(&g_pc.mutex);
+ ll_link_before(&g_pc.waiting_threads.h, &tinfo->q);
+ pthread_mutex_unlock(&g_pc.mutex);
while (1) {
int is_timedout = 0;
time_t curtime = 0;
- if (op_shutdown) {
+ if (tinfo->closing) {
slapi_log_err(SLAPI_LOG_TRACE, "connection_threadmain",
"op_thread received shutdown signal\n");
slapi_pblock_destroy(pb);
g_decr_active_threadcnt();
- return;
+ return NULL;
}
if (!thread_turbo_flag && !more_data) {
@@ -1634,18 +1770,17 @@ connection_threadmain(void *arg)
we should finish the op now. Client might be thinking it's
done sending the request and wait for the response forever.
[blackflag 624234] */
- ret = connection_wait_for_new_work(pb, interval);
+ ret = connection_wait_for_new_work(pb, tinfo);
switch (ret) {
case CONN_NOWORK:
- PR_ASSERT(interval != 0); /* this should never happen */
continue;
case CONN_SHUTDOWN:
slapi_log_err(SLAPI_LOG_TRACE, "connection_threadmain",
- "op_thread received shutdown signal\n");
+ "op_thread is closing\n");
slapi_pblock_destroy(pb);
g_decr_active_threadcnt();
- return;
+ return NULL;
case CONN_FOUND_WORK_TO_DO:
/* note - don't need to lock here - connection should only
be used by this thread - since c_gettingber is set to 1
@@ -1657,7 +1792,7 @@ connection_threadmain(void *arg)
slapi_log_err(SLAPI_LOG_ERR, "connection_threadmain", "pb_conn is NULL\n");
slapi_pblock_destroy(pb);
g_decr_active_threadcnt();
- return;
+ return NULL;
}
pthread_mutex_lock(&(pb_conn->c_mutex));
@@ -1708,7 +1843,7 @@ connection_threadmain(void *arg)
pthread_mutex_lock(&(conn->c_mutex));
/* Make our own pb in turbo mode */
- connection_make_new_pb(pb, conn);
+ connection_set_new_op_in_pb(pb, tinfo, conn);
if (connection_call_io_layer_callbacks(conn)) {
slapi_log_err(SLAPI_LOG_ERR, "connection_threadmain",
"Could not add/remove IO layers from connection\n");
@@ -1726,7 +1861,7 @@ connection_threadmain(void *arg)
slapi_log_err(SLAPI_LOG_ERR, "connection_threadmain", "NULL param: conn (0x%p) op (0x%p)\n", conn, op);
slapi_pblock_destroy(pb);
g_decr_active_threadcnt();
- return;
+ return NULL;
}
maxthreads = conn->c_max_threads_per_conn;
more_data = 0;
@@ -1762,12 +1897,15 @@ connection_threadmain(void *arg)
}
}
- /* turn off turbo mode immediately if any pb waiting in global queue */
- if (thread_turbo_flag && !WORK_Q_EMPTY) {
+ /* turn off turbo mode immediately if any pb waiting in global queue
+ * or if all cpus are busy
+ */
+ if (thread_turbo_flag &&
+ (!ll_is_empty(&g_pc.waiting_jobs) || g_pc.busy_threads.size >= g_pc.nbcpus)) {
thread_turbo_flag = 0;
slapi_log_err(SLAPI_LOG_CONNS, "connection_threadmain",
"conn %" PRIu64 " leaving turbo mode - pb_q is not empty %d\n",
- conn->c_connid, work_q_size);
+ conn->c_connid, g_pc.waiting_jobs.size);
}
#endif
@@ -1913,7 +2051,7 @@ connection_threadmain(void *arg)
pthread_mutex_unlock(&(conn->c_mutex));
signal_listner(conn->c_ct_list);
slapi_pblock_destroy(pb);
- return;
+ return NULL;
}
/*
* done with this operation. delete it from the op
@@ -2012,8 +2150,6 @@ connection_threadmain(void *arg)
int
connection_activity(Connection *conn, int maxthreads)
{
- struct Slapi_op_stack *op_stack_obj;
-
if (connection_acquire_nolock(conn) == -1) {
slapi_log_err(SLAPI_LOG_CONNS,
"connection_activity", "Could not acquire lock in connection_activity as conn %" PRIu64 " closing fd=%d\n",
@@ -2034,11 +2170,9 @@ connection_activity(Connection *conn, int maxthreads)
slapi_counter_increment(g_get_per_thread_snmp_vars()->ops_tbl.dsConnectionsInMaxThreads);
slapi_counter_increment(g_get_per_thread_snmp_vars()->ops_tbl.dsMaxThreadsHits);
}
- op_stack_obj = connection_get_operation();
- connection_add_operation(conn, op_stack_obj->op);
/* Add conn to the end of the work queue. */
/* have to do this last - add_work_q will signal waiters in connection_wait_for_new_work */
- add_work_q((work_q_item *)conn, op_stack_obj);
+ add_work_q((work_q_item *)conn);
if (!config_check_referral_mode()) {
slapi_counter_increment(g_get_per_thread_snmp_vars()->server_tbl.dsOpInitiated);
@@ -2050,117 +2184,85 @@ connection_activity(Connection *conn, int maxthreads)
/* add_work_q(): will add a work_q_item to the end of the global work queue. The work queue
is implemented as a single link list. */
-static void
-add_work_q(work_q_item *wqitem, struct Slapi_op_stack *op_stack_obj)
-{
- struct Slapi_work_q *new_work_q = NULL;
-
- slapi_log_err(SLAPI_LOG_TRACE, "add_work_q", "=>\n");
-
- new_work_q = create_work_q();
- new_work_q->work_item = wqitem;
- new_work_q->op_stack_obj = op_stack_obj;
- new_work_q->next_work_item = NULL;
-
- pthread_mutex_lock(&work_q_lock);
- if (tail_work_q == NULL) {
- tail_work_q = new_work_q;
- head_work_q = new_work_q;
- } else {
- tail_work_q->next_work_item = new_work_q;
- tail_work_q = new_work_q;
- }
- PR_AtomicIncrement(&work_q_size); /* increment q size */
- if (work_q_size > work_q_size_max) {
- work_q_size_max = work_q_size;
- }
- pthread_cond_signal(&work_q_cv); /* notify waiters in connection_wait_for_new_work */
- pthread_mutex_unlock(&work_q_lock);
-}
-
-/* get_work_q(): will get a work_q_item from the beginning of the work queue, return NULL if
- the queue is empty. This should only be called from connection_wait_for_new_work
- with the work_q_lock held */
-
-static work_q_item *
-get_work_q(struct Slapi_op_stack **op_stack_obj)
+void
+add_work_q(work_q_item *wqitem)
{
- struct Slapi_work_q *tmp = NULL;
- work_q_item *wqitem;
-
- slapi_log_err(SLAPI_LOG_TRACE, "get_work_q", "=>\n");
- if (head_work_q == NULL) {
- slapi_log_err(SLAPI_LOG_TRACE, "get_work_q", "The work queue is empty.\n");
- return NULL;
- }
-
- tmp = head_work_q;
- if (head_work_q == tail_work_q) {
- tail_work_q = NULL;
+ ll_list_t *elmt = NULL;
+ pc_tinfo_t *tinfo = NULL;
+ time_t curtime = 0;
+
+ pthread_mutex_lock(&g_pc.mutex);
+ for (;;) {
+ if (!ll_is_empty(&g_pc.waiting_threads)) {
+ /* A thread is waiting ==> wake up that thread after
+ * having assigned the job
+ */
+ elmt = g_pc.waiting_threads.h.next;
+ tinfo = elmt->data;
+ ll_unlink(elmt);
+ ll_link_before(&g_pc.busy_threads.h, elmt);
+ pthread_mutex_unlock(&g_pc.mutex);
+ pthread_mutex_lock(&tinfo->mutex);
+ tinfo->conn = wqitem;
+ pthread_cond_signal(&tinfo->cv);
+ pthread_mutex_unlock(&tinfo->mutex);
+ return;
+ }
+ if (!ll_is_empty(&g_pc.jobs_free_list)) {
+ /* No threads are available ==> Queue the job */
+ elmt = g_pc.jobs_free_list.h.next;
+ ll_unlink(elmt);
+ elmt->data = wqitem;
+ ll_link_before(&g_pc.waiting_jobs.h, elmt);
+ pthread_mutex_unlock(&g_pc.mutex);
+ return;
+ }
+ /* Job queue is full ==> wait a bit */
+ curtime = slapi_current_rel_time_t();
+ if (curtime > conn_next_warning_time) {
+ conn_next_warning_time = curtime + CONN_FLOW_CONTROL_MSG_TIMEOUT;
+ slapi_log_err(SLAPI_LOG_WARNING, "Listening threads",
+ "Warning: server may be unresponsive because the threads "
+ "are exhausted and too many operations have been queued.\n");
+ }
+ pthread_mutex_unlock(&g_pc.mutex);
+ usleep(1000*FLOW_CONTROL_DELAY);
+ pthread_mutex_lock(&g_pc.mutex);
}
- head_work_q = tmp->next_work_item;
-
- wqitem = tmp->work_item;
- *op_stack_obj = tmp->op_stack_obj;
- PR_AtomicDecrement(&work_q_size); /* decrement q size */
- /* Free the memory used by the item found. */
- destroy_work_q(&tmp);
-
- return (wqitem);
}
/* Helper functions common to both varieties of connection code: */
+
/* op_thread_cleanup() : This function is called by daemon thread when it gets
- the slapd_shutdown signal. It will set op_shutdown to 1 and notify
+ the slapd_shutdown signal. It will set g_pc.shutdown to 1 and notify
all thread waiting on op_thread_cv to terminate. */
void
op_thread_cleanup()
{
slapi_log_err(SLAPI_LOG_INFO, "op_thread_cleanup",
- "slapd shutting down - signaling operation threads - op stack size %d max work q size %d max work q stack size %d\n",
- op_stack_size, work_q_size_max, work_q_stack_size_max);
+ "slapd shutting down - signaling operation threads\n");
- PR_AtomicIncrement(&op_shutdown);
- pthread_mutex_lock(&work_q_lock);
- pthread_cond_broadcast(&work_q_cv); /* tell any thread waiting in connection_wait_for_new_work to shutdown */
- pthread_mutex_unlock(&work_q_lock);
+ pthread_mutex_lock(&g_pc.mutex);
+ g_pc.shutdown = 1;
+ pthread_mutex_unlock(&g_pc.mutex);
+ op_thread_set_threads_number(0);
}
/* do this after all worker threads have terminated */
void
connection_post_shutdown_cleanup()
{
- struct Slapi_op_stack *stack_obj;
- int stack_cnt = 0;
- struct Slapi_work_q *work_q;
- int work_cnt = 0;
-
- while ((work_q = (struct Slapi_work_q *)PR_StackPop(work_q_stack))) {
- Connection *conn = (Connection *)work_q->work_item;
- stack_obj = work_q->op_stack_obj;
- if (stack_obj) {
- if (conn) {
- connection_remove_operation(conn, stack_obj->op);
- }
- connection_done_operation(conn, stack_obj);
- }
- slapi_ch_free((void **)&work_q);
- work_cnt++;
- }
- PR_DestroyStack(work_q_stack);
- work_q_stack = NULL;
- while ((stack_obj = (struct Slapi_op_stack *)PR_StackPop(op_stack))) {
- operation_free(&stack_obj->op, NULL);
- slapi_ch_free((void **)&stack_obj);
- stack_cnt++;
- }
- PR_DestroyStack(op_stack);
- op_stack = NULL;
+ /* Free the job queue free list */
+ slapi_ch_free(&g_pc.jobs_free_list.h.data);
+ /* Cleanup snmp global variables and thread array */
+ /* remove the snmp threads array */
+ slapi_ch_free((void*)&g_pc.snmp.threads);
+ /* and snmp global counters */
+ free_global_snmp_vars();
slapi_log_err(SLAPI_LOG_INFO, "connection_post_shutdown_cleanup",
- "slapd shutting down - freed %d work q stack objects - freed %d op stack objects\n",
- work_cnt, stack_cnt);
+ "slapd shutting down\n");
}
static void
@@ -2208,8 +2310,7 @@ void
connection_remove_operation_ext(Slapi_PBlock *pb, Connection *conn, Operation *op)
{
connection_remove_operation(conn, op);
- void *op_stack_elem = slapi_pblock_get_op_stack_elem(pb);
- connection_done_operation(conn, op_stack_elem);
+ operation_done(&op, conn);
slapi_pblock_set(pb, SLAPI_OPERATION, NULL);
slapi_pblock_init(pb);
}
diff --git a/ldap/servers/slapd/libglobs.c b/ldap/servers/slapd/libglobs.c
index a06cad4ba..ce95eb208 100644
--- a/ldap/servers/slapd/libglobs.c
+++ b/ldap/servers/slapd/libglobs.c
@@ -134,6 +134,8 @@
#endif
#include <sys/resource.h>
#include <rust-slapi-private.h>
+#include "snmp_collator.h"
+#include "wthreads.h"
#define REMOVE_CHANGELOG_CMD "remove"
@@ -2033,130 +2035,75 @@ g_get_slapd_security_on(void)
return config_get_security();
}
-static struct snmp_vars_t global_snmp_vars;
-static PRUintn thread_private_snmp_vars_idx;
-/*
- * https://developer.mozilla.org/en-US/docs/Mozilla/Projects/NSPR/Reference/PR_NewThreadPrivateIndex
- * It is called each time:
- * - PR_SetThreadPrivate is called with a not NULL private value
- * - on thread exit
- */
-static void
-snmp_vars_idx_free(void *ptr)
+
+/* Worker thread framework (is needed for get snmp per thread counters) */
+pc_t g_pc; /* Worker thread framework context */
+static struct snmp_vars_t global_snmp_vars; /* snmp var used if not a worker thread */
+
+/* Get worker thread context from thread specific data */
+pc_tinfo_t *
+g_get_thread_info()
{
- int *idx = ptr;
- if (idx) {
- slapi_ch_free((void **)&idx);
+ if (g_pc.snmp.threads) {
+ return pthread_getspecific(g_pc.tinfo_key);
}
+ return NULL;
}
-/* Define a per thread private area that is used to store
- * in the (workers) thread the index in per_thread_snmp_vars
- * of the set of counters
- */
+
+/* Tell the worker to not reuse current operation */
void
-init_thread_private_snmp_vars()
+g_pc_do_not_reuse_operation()
{
- if (PR_NewThreadPrivateIndex(&thread_private_snmp_vars_idx, snmp_vars_idx_free) != PR_SUCCESS) {
- slapi_log_err(SLAPI_LOG_ALERT,
- "init_thread_private_snmp_vars", "Failure to per thread snmp counters !\n");
- PR_ASSERT(0);
+ pc_tinfo_t *tinfo = g_get_thread_info();
+ if (tinfo) {
+ tinfo->op = operation_new(plugin_build_operation_action_bitmap(0, plugin_get_server_plg()));
}
}
-int
-thread_private_snmp_vars_get_idx(void)
-{
- int *idx;
- idx = (int *) PR_GetThreadPrivate(thread_private_snmp_vars_idx);
- if (idx == NULL) {
- /* if it was not initialized set it to zero */
- return 0;
- }
- return *idx;
-}
+
+
+/* Allocated the first slot of arrays of counters
+ * The first slot contains counters that are not specific to counters
+ */
void
-thread_private_snmp_vars_set_idx(int32_t idx)
+alloc_global_snmp_vars()
{
- int *val;
- val = (int32_t *) PR_GetThreadPrivate(thread_private_snmp_vars_idx);
- if (val == NULL) {
- /* if it was not initialized set it to zero */
- val = (int *) slapi_ch_calloc(1, sizeof(int32_t));
- PR_SetThreadPrivate(thread_private_snmp_vars_idx, (void *) val);
- }
- *val = idx;
+ snmp_thread_counters_init(&global_snmp_vars);
}
-static struct snmp_vars_t *per_thread_snmp_vars = NULL; /* array of counters */
-static int max_slots_snmp_vars = 0; /* no slots array of counters */
-struct snmp_vars_t *
-g_get_per_thread_snmp_vars(void)
+void
+free_global_snmp_vars()
{
- int thread_vars = thread_private_snmp_vars_get_idx();
- if (thread_vars < 0 || thread_vars >= max_slots_snmp_vars) {
- /* fallback to the global one */
- thread_vars = 0;
- }
- return &per_thread_snmp_vars[thread_vars];
+ snmp_thread_counters_cleanup(&global_snmp_vars);
}
-
struct snmp_vars_t *
g_get_first_thread_snmp_vars(int *cookie)
{
- *cookie = 0;
- if (max_slots_snmp_vars == 0) {
- /* not yet initialized */
- return NULL;
- }
- return &per_thread_snmp_vars[0];
+ *cookie = 1;
+ return &global_snmp_vars;
}
struct snmp_vars_t *
g_get_next_thread_snmp_vars(int *cookie)
{
+ /* Caller should held g_pc.snmp.mutex */
int index = *cookie;
- if (index < 0 || index >= (max_slots_snmp_vars - 1)) {
+ if (index < 0 || index >= g_pc.snmp.nbthreads) {
return NULL;
}
- *cookie = index + 1;
- return &per_thread_snmp_vars[index + 1];
-}
-
-/* Allocated the first slot of arrays of counters
- * The first slot contains counters that are not specific to counters
- */
-void
-alloc_global_snmp_vars()
-{
- PR_ASSERT(max_slots_snmp_vars == 0);
- if (max_slots_snmp_vars == 0) {
- max_slots_snmp_vars = 1;
- per_thread_snmp_vars = (struct snmp_vars_t *) slapi_ch_calloc(max_slots_snmp_vars, sizeof(struct snmp_vars_t));
- }
-
-}
-
-/* Allocated the next slots of the arrays of counters
- * with a slot per worker thread
- */
-void
-alloc_per_thread_snmp_vars(int32_t maxthread)
-{
- PR_ASSERT(max_slots_snmp_vars == 1);
- if (max_slots_snmp_vars == 1) {
- max_slots_snmp_vars += maxthread; /* one extra slot for the global counters */
- per_thread_snmp_vars = (struct snmp_vars_t *) slapi_ch_realloc((char *) per_thread_snmp_vars,
- max_slots_snmp_vars * sizeof (struct snmp_vars_t));
-
- /* make sure to zeroed the new alloacted counters */
- memset(&per_thread_snmp_vars[1], 0, (max_slots_snmp_vars - 1) * sizeof (struct snmp_vars_t));
- }
+ *cookie = index+1;
+ return (index == 0) ? &global_snmp_vars : &g_pc.snmp.threads[index]->snmp_vars;
}
struct snmp_vars_t *
-g_get_global_snmp_vars(void)
+g_get_per_thread_snmp_vars(void)
{
- return &global_snmp_vars;
+ pc_tinfo_t *tinfo = g_get_thread_info();
+ if (tinfo) {
+ return &tinfo->snmp_vars;
+ } else {
+ return &global_snmp_vars;
+ }
}
static slapdEntryPoints *sep = NULL;
@@ -4971,7 +4918,7 @@ config_set_threadnumber(const char *attrname, char *value, char *errorbuf, int a
"set it to \"-1\" and the server will tune it according to the "
"system hardware\n",
threadnum, hw_threadnum);
- }
+ }
}
if (*endp != '\0' || errno == ERANGE || threadnum < 1 || threadnum > 65535) {
@@ -4981,6 +4928,9 @@ config_set_threadnumber(const char *attrname, char *value, char *errorbuf, int a
}
if (apply) {
slapi_atomic_store_32(&(slapdFrontendConfig->threadnumber), threadnum, __ATOMIC_RELAXED);
+ if (g_pc.threadnumber_cb) {
+ g_pc.threadnumber_cb(threadnum);
+ }
}
return retVal;
}
diff --git a/ldap/servers/slapd/monitor.c b/ldap/servers/slapd/monitor.c
index a0781a848..f601bfb94 100644
--- a/ldap/servers/slapd/monitor.c
+++ b/ldap/servers/slapd/monitor.c
@@ -30,6 +30,7 @@
#include <sys/socket.h>
#include "slap.h"
#include "fe.h"
+#include "wthreads.h"
int32_t
monitor_info(Slapi_PBlock *pb __attribute__((unused)),
@@ -46,6 +47,7 @@ monitor_info(Slapi_PBlock *pb __attribute__((unused)),
struct tm utm;
Slapi_Backend *be;
char *cookie;
+ op_thread_stats_t wthreads;
vals[0] = &val;
vals[1] = NULL;
@@ -94,6 +96,30 @@ monitor_info(Slapi_PBlock *pb __attribute__((unused)),
val.bv_val = buf;
attrlist_replace(&e->e_attrs, "nbackends", vals);
+ if (g_pc.getstats_cb) {
+ g_pc.getstats_cb(&wthreads);
+
+ val.bv_len = snprintf(buf, sizeof(buf), "%d", wthreads.waitingthreads);
+ val.bv_val = buf;
+ attrlist_replace(&e->e_attrs, "waitingthreads", vals);
+
+ val.bv_len = snprintf(buf, sizeof(buf), "%d", wthreads.busythreads);
+ val.bv_val = buf;
+ attrlist_replace(&e->e_attrs, "busythreads", vals);
+
+ val.bv_len = snprintf(buf, sizeof(buf), "%d", wthreads.maxbusythreads);
+ val.bv_val = buf;
+ attrlist_replace(&e->e_attrs, "maxbusythreads", vals);
+
+ val.bv_len = snprintf(buf, sizeof(buf), "%d", wthreads.waitingjobs);
+ val.bv_val = buf;
+ attrlist_replace(&e->e_attrs, "waitingjobs", vals);
+
+ val.bv_len = snprintf(buf, sizeof(buf), "%d", wthreads.maxwaitingjobs);
+ val.bv_val = buf;
+ attrlist_replace(&e->e_attrs, "maxwaitingjobs", vals);
+ }
+
/*
* Loop through the backends, and stuff the monitor dn's
* into the entry we're sending back
diff --git a/ldap/servers/slapd/proto-slap.h b/ldap/servers/slapd/proto-slap.h
index adb914a05..e569e0cc3 100644
--- a/ldap/servers/slapd/proto-slap.h
+++ b/ldap/servers/slapd/proto-slap.h
@@ -1309,8 +1309,10 @@ int g_get_global_lastmod(void);
/* Ref_Array *g_get_global_referrals(void); */
struct snmp_vars_t *g_get_global_snmp_vars(void);
void alloc_global_snmp_vars(void);
+void free_global_snmp_vars(void);
void alloc_per_thread_snmp_vars(int32_t maxthread);
void thread_private_snmp_vars_set_idx(int32_t idx);
+void g_pc_do_not_reuse_operation(void);
struct snmp_vars_t *g_get_per_thread_snmp_vars(void);
struct snmp_vars_t *g_get_first_thread_snmp_vars(int *cookie);
struct snmp_vars_t *g_get_next_thread_snmp_vars(int *cookie);
diff --git a/ldap/servers/slapd/psearch.c b/ldap/servers/slapd/psearch.c
index 9059d141a..414c69078 100644
--- a/ldap/servers/slapd/psearch.c
+++ b/ldap/servers/slapd/psearch.c
@@ -164,6 +164,12 @@ ps_add(Slapi_PBlock *pb, ber_int_t changetypes, int send_entchg_controls)
if (!ps) {
return; /* Error is logged by psearch_alloc */
}
+ /*
+ * The new thread use the operation so tell worker thread
+ * not to reuse it.
+ */
+ g_pc_do_not_reuse_operation();
+
ps->ps_pblock = slapi_pblock_clone(pb);
ps->ps_changetypes = changetypes;
ps->ps_send_entchg_controls = send_entchg_controls;
@@ -413,6 +419,13 @@ ps_send_results(void *arg)
conn->c_connid, pb_op ? pb_op->o_opid : -1);
/* Delete this op from the connection's list */
connection_remove_operation_ext(ps->ps_pblock, conn, pb_op);
+ /*
+ * Then free the operation: connection_remove_operation_ext
+ * calls operation_done and unlink op from pblock
+ * so operation should be explictly freed
+ */
+ operation_free(&pb_op, NULL);
+
/* Decrement the connection refcnt */
if (conn_acq_flag == 0) { /* we acquired it, so release it */
diff --git a/ldap/servers/slapd/snmp_collator.c b/ldap/servers/slapd/snmp_collator.c
index 3dc84b7e6..f36761a38 100644
--- a/ldap/servers/slapd/snmp_collator.c
+++ b/ldap/servers/slapd/snmp_collator.c
@@ -30,6 +30,7 @@
#include "prerror.h"
#include "prcvar.h"
#include "plstr.h"
+#include "wthreads.h"
#ifdef HPUX
/* HP-UX doesn't define SEM_FAILED like other platforms, so
@@ -87,71 +88,110 @@ static sem_t *stats_sem = NULL;
*
************************************************************************************/
-static int
-snmp_collator_init(void)
+void
+snmp_thread_counters_cleanup(struct snmp_vars_t *snmp_vars)
{
- int i;
- int cookie;
- struct snmp_vars_t *snmp_vars;
+ slapi_counter_destroy(&snmp_vars->ops_tbl.dsAnonymousBinds);
+ slapi_counter_destroy(&snmp_vars->ops_tbl.dsUnAuthBinds);
+ slapi_counter_destroy(&snmp_vars->ops_tbl.dsSimpleAuthBinds);
+ slapi_counter_destroy(&snmp_vars->ops_tbl.dsStrongAuthBinds);
+ slapi_counter_destroy(&snmp_vars->ops_tbl.dsBindSecurityErrors);
+ slapi_counter_destroy(&snmp_vars->ops_tbl.dsInOps);
+ slapi_counter_destroy(&snmp_vars->ops_tbl.dsReadOps);
+ slapi_counter_destroy(&snmp_vars->ops_tbl.dsCompareOps);
+ slapi_counter_destroy(&snmp_vars->ops_tbl.dsAddEntryOps);
+ slapi_counter_destroy(&snmp_vars->ops_tbl.dsRemoveEntryOps);
+ slapi_counter_destroy(&snmp_vars->ops_tbl.dsModifyEntryOps);
+ slapi_counter_destroy(&snmp_vars->ops_tbl.dsModifyRDNOps);
+ slapi_counter_destroy(&snmp_vars->ops_tbl.dsListOps);
+ slapi_counter_destroy(&snmp_vars->ops_tbl.dsSearchOps);
+ slapi_counter_destroy(&snmp_vars->ops_tbl.dsOneLevelSearchOps);
+ slapi_counter_destroy(&snmp_vars->ops_tbl.dsWholeSubtreeSearchOps);
+ slapi_counter_destroy(&snmp_vars->ops_tbl.dsReferrals);
+ slapi_counter_destroy(&snmp_vars->ops_tbl.dsChainings);
+ slapi_counter_destroy(&snmp_vars->ops_tbl.dsSecurityErrors);
+ slapi_counter_destroy(&snmp_vars->ops_tbl.dsErrors);
+ slapi_counter_destroy(&snmp_vars->ops_tbl.dsConnections);
+ slapi_counter_destroy(&snmp_vars->ops_tbl.dsConnectionSeq);
+ slapi_counter_destroy(&snmp_vars->ops_tbl.dsBytesRecv);
+ slapi_counter_destroy(&snmp_vars->ops_tbl.dsBytesSent);
+ slapi_counter_destroy(&snmp_vars->ops_tbl.dsEntriesReturned);
+ slapi_counter_destroy(&snmp_vars->ops_tbl.dsReferralsReturned);
+ slapi_counter_destroy(&snmp_vars->ops_tbl.dsConnectionsInMaxThreads);
+ slapi_counter_destroy(&snmp_vars->ops_tbl.dsMaxThreadsHits);
+ slapi_counter_destroy(&snmp_vars->entries_tbl.dsSupplierEntries);
+ slapi_counter_destroy(&snmp_vars->entries_tbl.dsCopyEntries);
+ slapi_counter_destroy(&snmp_vars->entries_tbl.dsCacheEntries);
+ slapi_counter_destroy(&snmp_vars->entries_tbl.dsCacheHits);
+ slapi_counter_destroy(&snmp_vars->entries_tbl.dsConsumerHits);
+ slapi_counter_destroy(&snmp_vars->server_tbl.dsOpInitiated);
+ slapi_counter_destroy(&snmp_vars->server_tbl.dsOpCompleted);
+ slapi_counter_destroy(&snmp_vars->server_tbl.dsEntriesSent);
+ slapi_counter_destroy(&snmp_vars->server_tbl.dsBytesSent);
+}
- /*
- * Create the per threads SNMP counters
- */
- for (snmp_vars = g_get_first_thread_snmp_vars(&cookie); snmp_vars; snmp_vars = g_get_next_thread_snmp_vars(&cookie)) {
- snmp_vars->ops_tbl.dsAnonymousBinds = slapi_counter_new();
- snmp_vars->ops_tbl.dsUnAuthBinds = slapi_counter_new();
- snmp_vars->ops_tbl.dsSimpleAuthBinds = slapi_counter_new();
- snmp_vars->ops_tbl.dsStrongAuthBinds = slapi_counter_new();
- snmp_vars->ops_tbl.dsBindSecurityErrors = slapi_counter_new();
- snmp_vars->ops_tbl.dsInOps = slapi_counter_new();
- snmp_vars->ops_tbl.dsReadOps = slapi_counter_new();
- snmp_vars->ops_tbl.dsCompareOps = slapi_counter_new();
- snmp_vars->ops_tbl.dsAddEntryOps = slapi_counter_new();
- snmp_vars->ops_tbl.dsRemoveEntryOps = slapi_counter_new();
- snmp_vars->ops_tbl.dsModifyEntryOps = slapi_counter_new();
- snmp_vars->ops_tbl.dsModifyRDNOps = slapi_counter_new();
- snmp_vars->ops_tbl.dsListOps = slapi_counter_new();
- snmp_vars->ops_tbl.dsSearchOps = slapi_counter_new();
- snmp_vars->ops_tbl.dsOneLevelSearchOps = slapi_counter_new();
- snmp_vars->ops_tbl.dsWholeSubtreeSearchOps = slapi_counter_new();
- snmp_vars->ops_tbl.dsReferrals = slapi_counter_new();
- snmp_vars->ops_tbl.dsChainings = slapi_counter_new();
- snmp_vars->ops_tbl.dsSecurityErrors = slapi_counter_new();
- snmp_vars->ops_tbl.dsErrors = slapi_counter_new();
- snmp_vars->ops_tbl.dsConnections = slapi_counter_new();
- snmp_vars->ops_tbl.dsConnectionSeq = slapi_counter_new();
- snmp_vars->ops_tbl.dsBytesRecv = slapi_counter_new();
- snmp_vars->ops_tbl.dsBytesSent = slapi_counter_new();
- snmp_vars->ops_tbl.dsEntriesReturned = slapi_counter_new();
- snmp_vars->ops_tbl.dsReferralsReturned = slapi_counter_new();
- snmp_vars->ops_tbl.dsConnectionsInMaxThreads = slapi_counter_new();
- snmp_vars->ops_tbl.dsMaxThreadsHits = slapi_counter_new();
- snmp_vars->entries_tbl.dsSupplierEntries = slapi_counter_new();
- snmp_vars->entries_tbl.dsCopyEntries = slapi_counter_new();
- snmp_vars->entries_tbl.dsCacheEntries = slapi_counter_new();
- snmp_vars->entries_tbl.dsCacheHits = slapi_counter_new();
- snmp_vars->entries_tbl.dsConsumerHits = slapi_counter_new();
- snmp_vars->server_tbl.dsOpInitiated = slapi_counter_new();
- snmp_vars->server_tbl.dsOpCompleted = slapi_counter_new();
- snmp_vars->server_tbl.dsEntriesSent = slapi_counter_new();
- snmp_vars->server_tbl.dsBytesSent = slapi_counter_new();
-
- /* Initialize the global interaction table */
- for (i = 0; i < NUM_SNMP_INT_TBL_ROWS; i++) {
- snmp_vars->int_tbl[i].dsIntIndex = i + 1;
- PL_strncpyz(snmp_vars->int_tbl[i].dsName, "Not Available",
- sizeof (snmp_vars->int_tbl[i].dsName));
- snmp_vars->int_tbl[i].dsTimeOfCreation = 0;
- snmp_vars->int_tbl[i].dsTimeOfLastAttempt = 0;
- snmp_vars->int_tbl[i].dsTimeOfLastSuccess = 0;
- snmp_vars->int_tbl[i].dsFailuresSinceLastSuccess = 0;
- snmp_vars->int_tbl[i].dsFailures = 0;
- snmp_vars->int_tbl[i].dsSuccesses = 0;
- PL_strncpyz(snmp_vars->int_tbl[i].dsURL, "Not Available",
- sizeof (snmp_vars->int_tbl[i].dsURL));
- }
+void
+snmp_thread_counters_init(struct snmp_vars_t *snmp_vars)
+{
+ snmp_vars->ops_tbl.dsAnonymousBinds = slapi_counter_new();
+ snmp_vars->ops_tbl.dsUnAuthBinds = slapi_counter_new();
+ snmp_vars->ops_tbl.dsSimpleAuthBinds = slapi_counter_new();
+ snmp_vars->ops_tbl.dsStrongAuthBinds = slapi_counter_new();
+ snmp_vars->ops_tbl.dsBindSecurityErrors = slapi_counter_new();
+ snmp_vars->ops_tbl.dsInOps = slapi_counter_new();
+ snmp_vars->ops_tbl.dsReadOps = slapi_counter_new();
+ snmp_vars->ops_tbl.dsCompareOps = slapi_counter_new();
+ snmp_vars->ops_tbl.dsAddEntryOps = slapi_counter_new();
+ snmp_vars->ops_tbl.dsRemoveEntryOps = slapi_counter_new();
+ snmp_vars->ops_tbl.dsModifyEntryOps = slapi_counter_new();
+ snmp_vars->ops_tbl.dsModifyRDNOps = slapi_counter_new();
+ snmp_vars->ops_tbl.dsListOps = slapi_counter_new();
+ snmp_vars->ops_tbl.dsSearchOps = slapi_counter_new();
+ snmp_vars->ops_tbl.dsOneLevelSearchOps = slapi_counter_new();
+ snmp_vars->ops_tbl.dsWholeSubtreeSearchOps = slapi_counter_new();
+ snmp_vars->ops_tbl.dsReferrals = slapi_counter_new();
+ snmp_vars->ops_tbl.dsChainings = slapi_counter_new();
+ snmp_vars->ops_tbl.dsSecurityErrors = slapi_counter_new();
+ snmp_vars->ops_tbl.dsErrors = slapi_counter_new();
+ snmp_vars->ops_tbl.dsConnections = slapi_counter_new();
+ snmp_vars->ops_tbl.dsConnectionSeq = slapi_counter_new();
+ snmp_vars->ops_tbl.dsBytesRecv = slapi_counter_new();
+ snmp_vars->ops_tbl.dsBytesSent = slapi_counter_new();
+ snmp_vars->ops_tbl.dsEntriesReturned = slapi_counter_new();
+ snmp_vars->ops_tbl.dsReferralsReturned = slapi_counter_new();
+ snmp_vars->ops_tbl.dsConnectionsInMaxThreads = slapi_counter_new();
+ snmp_vars->ops_tbl.dsMaxThreadsHits = slapi_counter_new();
+ snmp_vars->entries_tbl.dsSupplierEntries = slapi_counter_new();
+ snmp_vars->entries_tbl.dsCopyEntries = slapi_counter_new();
+ snmp_vars->entries_tbl.dsCacheEntries = slapi_counter_new();
+ snmp_vars->entries_tbl.dsCacheHits = slapi_counter_new();
+ snmp_vars->entries_tbl.dsConsumerHits = slapi_counter_new();
+ snmp_vars->server_tbl.dsOpInitiated = slapi_counter_new();
+ snmp_vars->server_tbl.dsOpCompleted = slapi_counter_new();
+ snmp_vars->server_tbl.dsEntriesSent = slapi_counter_new();
+ snmp_vars->server_tbl.dsBytesSent = slapi_counter_new();
+
+ /* Initialize the global interaction table */
+ for (size_t i = 0; i < NUM_SNMP_INT_TBL_ROWS; i++) {
+ snmp_vars->int_tbl[i].dsIntIndex = i + 1;
+ PL_strncpyz(snmp_vars->int_tbl[i].dsName, "Not Available",
+ sizeof (snmp_vars->int_tbl[i].dsName));
+ snmp_vars->int_tbl[i].dsTimeOfCreation = 0;
+ snmp_vars->int_tbl[i].dsTimeOfLastAttempt = 0;
+ snmp_vars->int_tbl[i].dsTimeOfLastSuccess = 0;
+ snmp_vars->int_tbl[i].dsFailuresSinceLastSuccess = 0;
+ snmp_vars->int_tbl[i].dsFailures = 0;
+ snmp_vars->int_tbl[i].dsSuccesses = 0;
+ PL_strncpyz(snmp_vars->int_tbl[i].dsURL, "Not Available",
+ sizeof (snmp_vars->int_tbl[i].dsURL));
}
+}
+
+
+static int
+snmp_collator_init(void)
+{
/* Get the semaphore */
snmp_collator_sem_wait();
@@ -596,6 +636,7 @@ snmp_update_ops_table(void)
struct snmp_vars_t *snmp_vars;
int32_t total;
+ pthread_mutex_lock(&g_pc.snmp.mutex);
for (total = 0, snmp_vars = g_get_first_thread_snmp_vars(&cookie); snmp_vars; snmp_vars = g_get_next_thread_snmp_vars(&cookie)) {
total += slapi_counter_get_value(snmp_vars->ops_tbl.dsAnonymousBinds);
}
@@ -735,6 +776,7 @@ snmp_update_ops_table(void)
total += slapi_counter_get_value(snmp_vars->ops_tbl.dsReferralsReturned);
}
stats->ops_stats.dsReferralsReturned = total;
+ pthread_mutex_unlock(&g_pc.snmp.mutex);
}
/*
@@ -750,6 +792,7 @@ snmp_update_entries_table(void)
struct snmp_vars_t *snmp_vars;
int32_t total;
+ pthread_mutex_lock(&g_pc.snmp.mutex);
for (total = 0, snmp_vars = g_get_first_thread_snmp_vars(&cookie); snmp_vars; snmp_vars = g_get_next_thread_snmp_vars(&cookie)) {
total += slapi_counter_get_value(snmp_vars->entries_tbl.dsSupplierEntries);
}
@@ -774,6 +817,7 @@ snmp_update_entries_table(void)
total += slapi_counter_get_value(snmp_vars->entries_tbl.dsConsumerHits);
}
stats->entries_stats.dsConsumerHits = total;
+ pthread_mutex_unlock(&g_pc.snmp.mutex);
}
/*
@@ -889,7 +933,8 @@ snmp_as_entry(Slapi_Entry *e)
int cookie;
uint64_t total;
struct snmp_vars_t *snmp_vars;
-
+
+ pthread_mutex_lock(&g_pc.snmp.mutex);
for (total = 0, snmp_vars = g_get_first_thread_snmp_vars(&cookie); snmp_vars; snmp_vars = g_get_next_thread_snmp_vars(&cookie)) {
total += slapi_counter_get_value(snmp_vars->ops_tbl.dsAnonymousBinds);
}
@@ -1054,6 +1099,7 @@ snmp_as_entry(Slapi_Entry *e)
total += slapi_counter_get_value(snmp_vars->entries_tbl.dsConsumerHits);
}
add_counter_to_value(e, "ConsumerHits", total);
+ pthread_mutex_unlock(&g_pc.snmp.mutex);
}
/*
diff --git a/ldap/servers/slapd/snmp_collator.h b/ldap/servers/slapd/snmp_collator.h
index c368347dc..3f41538e6 100644
--- a/ldap/servers/slapd/snmp_collator.h
+++ b/ldap/servers/slapd/snmp_collator.h
@@ -33,3 +33,5 @@ int snmp_collator_start(void);
int snmp_collator_stop(void);
void set_snmp_interaction_row(char *host, int port, int error);
void snmp_collator_update(time_t, void *);
+void snmp_thread_counters_cleanup(struct snmp_vars_t *);
+void snmp_thread_counters_init(struct snmp_vars_t *);
diff --git a/ldap/servers/slapd/wthreads.h b/ldap/servers/slapd/wthreads.h
new file mode 100644
index 000000000..583a5c136
--- /dev/null
+++ b/ldap/servers/slapd/wthreads.h
@@ -0,0 +1,89 @@
+/** BEGIN COPYRIGHT BLOCK
+ * Copyright (C) 2023 Red Hat, Inc.
+ * All rights reserved.
+ *
+ * License: GPL (version 3 or any later version).
+ * See LICENSE for details.
+ * END COPYRIGHT BLOCK **/
+
+/* This file contains the data about the worker threads handling the operations */
+
+#ifndef _WTHREADS_H_
+#define _WTHREADS_H_
+
+#include "slap.h"
+
+
+
+/* List element */
+typedef struct ll_list_t {
+ struct ll_head_t *head;
+ void *data;
+ struct ll_list_t *prev;
+ struct ll_list_t *next;
+} ll_list_t;
+
+/* List header */
+typedef struct ll_head_t {
+ ll_list_t h;
+ int size;
+ int hwm; /* Size high water mark */
+} ll_head_t;
+
+/* Worker thread context */
+typedef struct {
+ pthread_mutex_t mutex;
+ pthread_cond_t cv;
+ pthread_t tid;
+ ll_list_t q; /* The element chained in waiting_threads/busy_threads */
+ struct conn *conn; /* The connection on which there are job to process */
+ Slapi_Operation *op;
+ int closing;
+ int idx;
+ struct snmp_vars_t snmp_vars; /* The snmp counters */
+} pc_tinfo_t;
+
+/* Monitoring statistics */
+typedef struct {
+ int waitingthreads;
+ int busythreads;
+ int maxbusythreads;
+ int waitingjobs;
+ int maxwaitingjobs;
+} op_thread_stats_t;
+
+/* Operation worker thread Producer/Consumer global context */
+typedef struct {
+ pthread_mutex_t mutex;
+ ll_head_t waiting_threads;
+ ll_head_t busy_threads;
+ ll_head_t waiting_jobs;
+ ll_head_t jobs_free_list;
+ int shutdown;
+ int nbcpus;
+ pthread_key_t tinfo_key;
+ void (*threadnumber_cb)(int);
+ void (*getstats_cb)(op_thread_stats_t*);
+ struct {
+ pthread_mutex_t mutex;
+ int nbthreads;
+ pc_tinfo_t **threads;
+ } snmp;
+} pc_t;
+
+/* Connection status values returned by
+ connection_wait_for_new_work(), connection_read_operation(), etc. */
+typedef enum {
+ CONN_FOUND_WORK_TO_DO,
+ CONN_SHUTDOWN,
+ CONN_NOWORK,
+ CONN_DONE,
+ CONN_TIMEDOUT,
+} conn_status_t;
+
+/* Defined in libglobs.c */
+extern pc_t g_pc;
+pc_tinfo_t *g_get_thread_info();
+
+
+#endif /* _WTHREADS_H_ */
diff --git a/src/lib389/lib389/monitor.py b/src/lib389/lib389/monitor.py
index 8f9d0001b..79273d6fe 100644
--- a/src/lib389/lib389/monitor.py
+++ b/src/lib389/lib389/monitor.py
@@ -46,13 +46,27 @@ class Monitor(DSLdapObject):
def get_threads(self):
"""Get thread related attributes value for cn=monitor
- :returns: Values of threads, currentconnectionsatmaxthreads, and
- maxthreadsperconnhits attributes of cn=monitor
+ :returns: Values of following attributes of cn=monitor:
+ threads,
+ currentconnectionsatmaxthreads,
+ maxthreadsperconnhits,
+ waitingthreads,
+ busythreads,
+ maxbusythreads,
+ waitingjobs,
+ maxwaitingjobs
"""
- threads = self.get_attr_vals_utf8('threads')
- currentconnectionsatmaxthreads = self.get_attr_vals_utf8('currentconnectionsatmaxthreads')
- maxthreadsperconnhits = self.get_attr_vals_utf8('maxthreadsperconnhits')
- return (threads, currentconnectionsatmaxthreads, maxthreadsperconnhits)
+ attrnames = (
+ 'threads',
+ 'currentconnectionsatmaxthreads',
+ 'maxthreadsperconnhits',
+ 'waitingthreads',
+ 'busythreads',
+ 'maxbusythreads',
+ 'waitingjobs',
+ 'maxwaitingjobs',
+ )
+ return tuple(self.get_attr_vals_utf8(attr) for attr in attrnames)
def get_backends(self):
"""Get backends related attributes value for cn=monitor
@@ -104,6 +118,11 @@ class Monitor(DSLdapObject):
'currenttime',
'starttime',
'nbackends',
+ 'waitingthreads',
+ 'busythreads',
+ 'maxbusythreads',
+ 'waitingjobs',
+ 'maxwaitingjobs',
])
| 0 |
f2eb45bdd4fa8a0ad56c398ea8c7f370da8870f5
|
389ds/389-ds-base
|
Ticket 47810 - investigate betxn plugins to ensure they
return the correct error code
Bug Description: Prior to backend transaction plugin types, postop error
codes didn't really have an effect, so often times a success
result was implicitly being returned.
Fix Description: Alawys return the correct error code for all backend transaction
plugins.
https://fedorahosted.org/389/ticket/47810
Jenkins: Passed
TET: Passed
Reviewed by: nhosoi(Thanks!)
|
commit f2eb45bdd4fa8a0ad56c398ea8c7f370da8870f5
Author: Mark Reynolds <[email protected]>
Date: Wed Jun 18 08:38:26 2014 -0400
Ticket 47810 - investigate betxn plugins to ensure they
return the correct error code
Bug Description: Prior to backend transaction plugin types, postop error
codes didn't really have an effect, so often times a success
result was implicitly being returned.
Fix Description: Alawys return the correct error code for all backend transaction
plugins.
https://fedorahosted.org/389/ticket/47810
Jenkins: Passed
TET: Passed
Reviewed by: nhosoi(Thanks!)
diff --git a/ldap/servers/plugins/linkedattrs/linked_attrs.c b/ldap/servers/plugins/linkedattrs/linked_attrs.c
index 73247e5fa..20bb9fa23 100644
--- a/ldap/servers/plugins/linkedattrs/linked_attrs.c
+++ b/ldap/servers/plugins/linkedattrs/linked_attrs.c
@@ -55,6 +55,8 @@ static Slapi_RWLock *g_config_lock;
static void *_PluginID = NULL;
static char *_PluginDN = NULL;
int plugin_is_betxn = 0;
+/* For future use - enforce all linked attribute operations succeed */
+static int strict_results = 0;
static Slapi_PluginDesc pdesc = { LINK_FEATURE_DESC,
VENDOR,
@@ -106,13 +108,13 @@ static int linked_attrs_config_exists_reverse(struct configEntry *entry);
static int linked_attrs_oktodo(Slapi_PBlock *pb);
void linked_attrs_load_array(Slapi_Value **array, Slapi_Attr *attr);
int linked_attrs_compare(const void *a, const void *b);
-static void linked_attrs_add_backpointers(char *linkdn, struct configEntry *config,
+static int linked_attrs_add_backpointers(char *linkdn, struct configEntry *config,
Slapi_Mod *smod);
-static void linked_attrs_del_backpointers(Slapi_PBlock *pb, char *linkdn,
+static int linked_attrs_del_backpointers(Slapi_PBlock *pb, char *linkdn,
struct configEntry *config, Slapi_Mod *smod);
-static void linked_attrs_replace_backpointers(Slapi_PBlock *pb, char *linkdn,
+static int linked_attrs_replace_backpointers(Slapi_PBlock *pb, char *linkdn,
struct configEntry *config, Slapi_Mod *smod);
-static void linked_attrs_mod_backpointers(char *linkdn, char *type, char *scope,
+static int linked_attrs_mod_backpointers(char *linkdn, char *type, char *scope,
int modop, Slapi_ValueSet *targetvals);
/*
@@ -1247,17 +1249,19 @@ linked_attrs_compare(const void *a, const void *b)
* Adds backpointers pointing to dn to the entries referred to
* by the values in smod.
*/
-static void
+static int
linked_attrs_add_backpointers(char *linkdn, struct configEntry *config,
Slapi_Mod *smod)
{
Slapi_ValueSet *vals = slapi_valueset_new();
+ int rc = LDAP_SUCCESS;
slapi_valueset_set_from_smod(vals, smod);
- linked_attrs_mod_backpointers(linkdn, config->managedtype, config->scope,
+ rc = linked_attrs_mod_backpointers(linkdn, config->managedtype, config->scope,
LDAP_MOD_ADD, vals);
slapi_valueset_free(vals);
+ return rc;
}
/*
@@ -1266,11 +1270,12 @@ linked_attrs_add_backpointers(char *linkdn, struct configEntry *config,
* Remove backpointers pointing to linkdn in the entries referred
* to by the values in smod.
*/
-static void
+static int
linked_attrs_del_backpointers(Slapi_PBlock *pb, char *linkdn,
struct configEntry *config, Slapi_Mod *smod)
{
Slapi_ValueSet *vals = NULL;
+ int rc = LDAP_SUCCESS;
/* If no values are listed in the smod, we need to get
* a list of all of the values that were deleted by
@@ -1287,10 +1292,11 @@ linked_attrs_del_backpointers(Slapi_PBlock *pb, char *linkdn,
slapi_valueset_set_from_smod(vals, smod);
}
- linked_attrs_mod_backpointers(linkdn, config->managedtype, config->scope,
+ rc = linked_attrs_mod_backpointers(linkdn, config->managedtype, config->scope,
LDAP_MOD_DELETE, vals);
-
slapi_valueset_free(vals);
+
+ return rc;
}
/*
@@ -1301,7 +1307,7 @@ linked_attrs_del_backpointers(Slapi_PBlock *pb, char *linkdn,
* for any new values that were added as a part of the
* replace operation.
*/
-static void
+static int
linked_attrs_replace_backpointers(Slapi_PBlock *pb, char *linkdn,
struct configEntry *config, Slapi_Mod *smod)
{
@@ -1309,6 +1315,7 @@ linked_attrs_replace_backpointers(Slapi_PBlock *pb, char *linkdn,
Slapi_Entry *post_e = NULL;
Slapi_Attr *pre_attr = 0;
Slapi_Attr *post_attr = 0;
+ int rc = LDAP_SUCCESS;
/* Get the pre and post copy of the entry to see
* what values have been added and removed. */
@@ -1404,13 +1411,13 @@ linked_attrs_replace_backpointers(Slapi_PBlock *pb, char *linkdn,
/* Perform the actual updates to the target entries. */
if (delvals) {
- linked_attrs_mod_backpointers(linkdn, config->managedtype,
+ rc = linked_attrs_mod_backpointers(linkdn, config->managedtype,
config->scope, LDAP_MOD_DELETE, delvals);
slapi_valueset_free(delvals);
}
- if (addvals) {
- linked_attrs_mod_backpointers(linkdn, config->managedtype,
+ if (rc == LDAP_SUCCESS && addvals) {
+ rc = linked_attrs_mod_backpointers(linkdn, config->managedtype,
config->scope, LDAP_MOD_ADD, addvals);
slapi_valueset_free(addvals);
}
@@ -1418,6 +1425,8 @@ linked_attrs_replace_backpointers(Slapi_PBlock *pb, char *linkdn,
slapi_ch_free((void **)&pre_array);
slapi_ch_free((void **)&post_array);
}
+
+ return rc;
}
/*
@@ -1425,7 +1434,7 @@ linked_attrs_replace_backpointers(Slapi_PBlock *pb, char *linkdn,
*
* Performs backpointer management.
*/
-static void
+static int
linked_attrs_mod_backpointers(char *linkdn, char *type,
char *scope, int modop, Slapi_ValueSet *targetvals)
{
@@ -1435,6 +1444,7 @@ linked_attrs_mod_backpointers(char *linkdn, char *type,
LDAPMod mod;
LDAPMod *mods[2];
Slapi_Value *targetval = NULL;
+ int rc = LDAP_SUCCESS;
/* Setup the modify operation. Only the target will
* change, so we only need to do this once. */
@@ -1481,7 +1491,14 @@ linked_attrs_mod_backpointers(char *linkdn, char *type,
slapi_modify_internal_set_pb_ext(mod_pb, targetsdn, mods, 0, 0,
linked_attrs_get_plugin_id(), 0);
slapi_modify_internal_pb(mod_pb);
-
+ if (strict_results){
+ /* we are enforcing strict results, so return the error */
+ slapi_pblock_get(mod_pb, SLAPI_PLUGIN_INTOP_RESULT, &rc);
+ if(rc != LDAP_SUCCESS){
+ slapi_sdn_free(&targetsdn);
+ break;
+ }
+ }
/* Initialize the pblock so we can reuse it. */
slapi_pblock_init(mod_pb);
}
@@ -1491,6 +1508,8 @@ linked_attrs_mod_backpointers(char *linkdn, char *type,
}
slapi_pblock_destroy(mod_pb);
+
+ return rc;
}
@@ -1607,6 +1626,7 @@ linked_attrs_mod_post_op(Slapi_PBlock *pb)
char *dn = NULL;
struct configEntry *config = NULL;
void *caller_id = NULL;
+ int rc = SLAPI_PLUGIN_SUCCESS;
slapi_log_error(SLAPI_LOG_TRACE, LINK_PLUGIN_SUBSYSTEM,
"--> linked_attrs_mod_post_op\n");
@@ -1662,27 +1682,36 @@ linked_attrs_mod_post_op(Slapi_PBlock *pb)
case LDAP_MOD_ADD:
/* Find the entries pointed to by the new
* values and add the backpointers. */
- linked_attrs_add_backpointers(dn, config, smod);
+ rc = linked_attrs_add_backpointers(dn, config, smod);
break;
case LDAP_MOD_DELETE:
/* Find the entries pointed to by the deleted
* values and remove the backpointers. */
- linked_attrs_del_backpointers(pb, dn, config, smod);
+ rc = linked_attrs_del_backpointers(pb, dn, config, smod);
break;
case LDAP_MOD_REPLACE:
/* Find the entries pointed to by the deleted
* values and remove the backpointers. If
* any new values are being added, find those
* entries and add the backpointers. */
- linked_attrs_replace_backpointers(pb, dn, config, smod);
+ rc = linked_attrs_replace_backpointers(pb, dn, config, smod);
break;
default:
slapi_log_error(SLAPI_LOG_PLUGIN, LINK_PLUGIN_SUBSYSTEM,
"linked_attrs_mod_post_op: unknown mod type\n" );
+ rc = SLAPI_PLUGIN_FAILURE;
break;
}
slapi_unlock_mutex(config->lock);
+ if(rc != LDAP_SUCCESS){
+ slapi_log_error(SLAPI_LOG_FATAL, LINK_PLUGIN_SUBSYSTEM,
+ "linked_attrs_mod_post_op - update failed (%d)\n",rc);
+ linked_attrs_unlock();
+ slapi_mod_done(next_mod);
+ break;
+ }
+
}
config = NULL;
@@ -1695,10 +1724,14 @@ linked_attrs_mod_post_op(Slapi_PBlock *pb)
slapi_mods_free(&smods);
}
+ if (rc) {
+ slapi_pblock_set(pb, SLAPI_RESULT_CODE, &rc);
+ rc = SLAPI_PLUGIN_FAILURE;
+ }
slapi_log_error(SLAPI_LOG_TRACE, LINK_PLUGIN_SUBSYSTEM,
- "<-- linked_attrs_mod_post_op\n");
+ "<-- linked_attrs_mod_post_op (%d)\n", rc);
- return SLAPI_PLUGIN_SUCCESS;
+ return rc;
}
static int
@@ -1706,6 +1739,7 @@ linked_attrs_add_post_op(Slapi_PBlock *pb)
{
Slapi_Entry *e = NULL;
char *dn = NULL;
+ int rc = SLAPI_PLUGIN_SUCCESS;
slapi_log_error(SLAPI_LOG_TRACE, LINK_PLUGIN_SUBSYSTEM,
"--> linked_attrs_add_post_op\n");
@@ -1748,15 +1782,19 @@ linked_attrs_add_post_op(Slapi_PBlock *pb)
Slapi_ValueSet *vals = NULL;
slapi_attr_get_valueset(attr, &vals);
-
slapi_lock_mutex(config->lock);
- linked_attrs_mod_backpointers(dn, config->managedtype,
+ rc = linked_attrs_mod_backpointers(dn, config->managedtype,
config->scope, LDAP_MOD_ADD, vals);
slapi_unlock_mutex(config->lock);
-
slapi_valueset_free(vals);
+ if(rc != LDAP_SUCCESS){
+ slapi_log_error(SLAPI_LOG_FATAL, LINK_PLUGIN_SUBSYSTEM,
+ "linked_attrs_add_post_op: update failed (%d)\n",rc);
+ linked_attrs_unlock();
+ break;
+ }
}
config = NULL;
@@ -1770,10 +1808,14 @@ linked_attrs_add_post_op(Slapi_PBlock *pb)
"retrieving post-op entry %s\n", dn);
}
+ if (rc) {
+ slapi_pblock_set(pb, SLAPI_RESULT_CODE, &rc);
+ rc = SLAPI_PLUGIN_FAILURE;
+ }
slapi_log_error(SLAPI_LOG_TRACE, LINK_PLUGIN_SUBSYSTEM,
"<-- linked_attrs_add_post_op\n");
- return SLAPI_PLUGIN_SUCCESS;
+ return rc;
}
static int
@@ -1781,13 +1823,14 @@ linked_attrs_del_post_op(Slapi_PBlock *pb)
{
char *dn = NULL;
Slapi_Entry *e = NULL;
+ int rc = SLAPI_PLUGIN_SUCCESS;
slapi_log_error(SLAPI_LOG_TRACE, LINK_PLUGIN_SUBSYSTEM,
"--> linked_attrs_del_post_op\n");
/* Just bail if we aren't ready to service requests yet. */
if (!linked_attrs_oktodo(pb)){
- return SLAPI_PLUGIN_SUCCESS;
+ return rc;
}
/* Reload config if a config entry was deleted. */
@@ -1818,7 +1861,7 @@ linked_attrs_del_post_op(Slapi_PBlock *pb)
/* Bail out if the plug-in close function was just called. */
if (!slapi_plugin_running(pb)) {
linked_attrs_unlock();
- return SLAPI_PLUGIN_SUCCESS;
+ return rc;
}
linked_attrs_find_config(dn, type, &config);
@@ -1828,15 +1871,19 @@ linked_attrs_del_post_op(Slapi_PBlock *pb)
Slapi_ValueSet *vals = NULL;
slapi_attr_get_valueset(attr, &vals);
-
slapi_lock_mutex(config->lock);
- linked_attrs_mod_backpointers(dn, config->managedtype,
+ rc = linked_attrs_mod_backpointers(dn, config->managedtype,
config->scope, LDAP_MOD_DELETE, vals);
slapi_unlock_mutex(config->lock);
-
slapi_valueset_free(vals);
+ if (rc != LDAP_SUCCESS){
+ slapi_log_error(SLAPI_LOG_FATAL, LINK_PLUGIN_SUBSYSTEM,
+ "linked_attrs_del_post_op - update failed (%d)\n",rc);
+ linked_attrs_unlock();
+ break;
+ }
}
config = NULL;
@@ -1855,18 +1902,23 @@ linked_attrs_del_post_op(Slapi_PBlock *pb)
if (config) {
Slapi_ValueSet *vals = slapi_valueset_new();
- slapi_valueset_add_value(vals, val);
+ slapi_valueset_add_value(vals, val);
slapi_lock_mutex(config->lock);
/* Delete forward link value. */
- linked_attrs_mod_backpointers(dn, config->linktype,
- config->scope, LDAP_MOD_DELETE, vals);
+ rc = linked_attrs_mod_backpointers(dn, config->linktype,
+ config->scope, LDAP_MOD_DELETE, vals);
slapi_unlock_mutex(config->lock);
-
slapi_valueset_free(vals);
config = NULL;
+ if(rc != LDAP_SUCCESS){
+ slapi_log_error(SLAPI_LOG_FATAL, LINK_PLUGIN_SUBSYSTEM,
+ "linked_attrs_del_post_op: update failed (%d)\n",rc);
+ linked_attrs_unlock();
+ goto bail;
+ }
}
hint = slapi_attr_next_value(attr, hint, &val);
@@ -1878,15 +1930,21 @@ linked_attrs_del_post_op(Slapi_PBlock *pb)
slapi_entry_next_attr(e, attr, &attr);
}
} else {
- slapi_log_error(SLAPI_LOG_PLUGIN, LINK_PLUGIN_SUBSYSTEM,
+ slapi_log_error(SLAPI_LOG_FATAL, LINK_PLUGIN_SUBSYSTEM,
"linked_attrs_del_post_op: Error "
"retrieving pre-op entry %s\n", dn);
+ rc = SLAPI_PLUGIN_FAILURE;
}
+bail:
+ if (rc) {
+ slapi_pblock_set(pb, SLAPI_RESULT_CODE, &rc);
+ rc = SLAPI_PLUGIN_FAILURE;
+ }
slapi_log_error(SLAPI_LOG_TRACE, LINK_PLUGIN_SUBSYSTEM,
"<-- linked_attrs_del_post_op\n");
- return SLAPI_PLUGIN_SUCCESS;
+ return rc;
}
static int
@@ -1955,17 +2013,21 @@ linked_attrs_modrdn_post_op(Slapi_PBlock *pb)
Slapi_ValueSet *vals = NULL;
slapi_attr_get_valueset(attr, &vals);
-
slapi_lock_mutex(config->lock);
/* Delete old dn value. */
- linked_attrs_mod_backpointers(old_dn, config->managedtype,
+ rc = linked_attrs_mod_backpointers(old_dn, config->managedtype,
config->scope, LDAP_MOD_DELETE, vals);
slapi_unlock_mutex(config->lock);
-
slapi_valueset_free(vals);
config = NULL;
+ if(rc != LDAP_SUCCESS){
+ slapi_log_error(SLAPI_LOG_FATAL, LINK_PLUGIN_SUBSYSTEM,
+ "linked_attrs_modrdn_post_op: update failed(old type) (%d)\n",rc);
+ linked_attrs_unlock();
+ break;
+ }
}
linked_attrs_find_config(new_dn, type, &config);
@@ -1978,17 +2040,21 @@ linked_attrs_modrdn_post_op(Slapi_PBlock *pb)
Slapi_ValueSet *vals = NULL;
slapi_attr_get_valueset(attr, &vals);
-
slapi_lock_mutex(config->lock);
/* Add new dn value. */
- linked_attrs_mod_backpointers(new_dn, config->managedtype,
- config->scope, LDAP_MOD_ADD, vals);
+ rc = linked_attrs_mod_backpointers(new_dn, config->managedtype,
+ config->scope, LDAP_MOD_ADD, vals);
slapi_unlock_mutex(config->lock);
-
slapi_valueset_free(vals);
config = NULL;
+ if(rc != LDAP_SUCCESS){
+ slapi_log_error(SLAPI_LOG_FATAL, LINK_PLUGIN_SUBSYSTEM,
+ "linked_attrs_modrdn_post_op: update failed(new type) (%d)\n",rc);
+ linked_attrs_unlock();
+ break;
+ }
}
/* See if any of the values for this attribute are managed
@@ -2006,22 +2072,35 @@ linked_attrs_modrdn_post_op(Slapi_PBlock *pb)
/* If the new DN is within scope, we should fixup the forward links. */
if (config && slapi_dn_issuffix(new_dn, (config->scope))) {
Slapi_ValueSet *vals = slapi_valueset_new();
- slapi_valueset_add_value(vals, val);
+ slapi_valueset_add_value(vals, val);
slapi_lock_mutex(config->lock);
/* Delete old dn value. */
- linked_attrs_mod_backpointers(old_dn, config->linktype,
+ rc = linked_attrs_mod_backpointers(old_dn, config->linktype,
config->scope, LDAP_MOD_DELETE, vals);
+ if(rc != LDAP_SUCCESS){
+ slapi_log_error(SLAPI_LOG_FATAL, LINK_PLUGIN_SUBSYSTEM,
+ "linked_attrs_modrdn_post_op: update failed(old dn) (%d)\n",rc);
+ slapi_unlock_mutex(config->lock);
+ slapi_valueset_free(vals);
+ linked_attrs_unlock();
+ goto done;
+ }
/* Add new dn value. */
- linked_attrs_mod_backpointers(new_dn, config->linktype,
- config->scope, LDAP_MOD_ADD, vals);
+ rc = linked_attrs_mod_backpointers(new_dn, config->linktype,
+ config->scope, LDAP_MOD_ADD, vals);
slapi_unlock_mutex(config->lock);
-
slapi_valueset_free(vals);
config = NULL;
+ if(rc != LDAP_SUCCESS){
+ slapi_log_error(SLAPI_LOG_FATAL, LINK_PLUGIN_SUBSYSTEM,
+ "linked_attrs_modrdn_post_op: update failed(new dn) (%d)\n",rc);
+ linked_attrs_unlock();
+ goto done;
+ }
}
hint = slapi_attr_next_value(attr, hint, &val);
@@ -2032,6 +2111,7 @@ linked_attrs_modrdn_post_op(Slapi_PBlock *pb)
slapi_entry_next_attr(post_e, attr, &attr);
}
+
done:
slapi_log_error(SLAPI_LOG_TRACE, LINK_PLUGIN_SUBSYSTEM,
"<-- linked_attrs_modrdn_post_op\n");
diff --git a/ldap/servers/plugins/memberof/memberof.c b/ldap/servers/plugins/memberof/memberof.c
index 1073b8e37..11ac96585 100644
--- a/ldap/servers/plugins/memberof/memberof.c
+++ b/ldap/servers/plugins/memberof/memberof.c
@@ -147,7 +147,7 @@ static void memberof_set_plugin_id(void * plugin_id);
static int memberof_compare(MemberOfConfig *config, const void *a, const void *b);
static int memberof_qsort_compare(const void *a, const void *b);
static void memberof_load_array(Slapi_Value **array, Slapi_Attr *attr);
-static void memberof_del_dn_from_groups(Slapi_PBlock *pb, MemberOfConfig *config, Slapi_DN *sdn);
+static int memberof_del_dn_from_groups(Slapi_PBlock *pb, MemberOfConfig *config, Slapi_DN *sdn);
static int memberof_call_foreach_dn(Slapi_PBlock *pb, Slapi_DN *sdn,
char **types, plugin_search_entry_callback callback, void *callback_data);
static int memberof_is_direct_member(MemberOfConfig *config, Slapi_Value *groupdn,
@@ -162,7 +162,7 @@ static int memberof_test_membership(Slapi_PBlock *pb, MemberOfConfig *config,
static int memberof_test_membership_callback(Slapi_Entry *e, void *callback_data);
static int memberof_del_dn_type_callback(Slapi_Entry *e, void *callback_data);
static int memberof_replace_dn_type_callback(Slapi_Entry *e, void *callback_data);
-static void memberof_replace_dn_from_groups(Slapi_PBlock *pb, MemberOfConfig *config,
+static int memberof_replace_dn_from_groups(Slapi_PBlock *pb, MemberOfConfig *config,
Slapi_DN *pre_sdn, Slapi_DN *post_sdn);
static int memberof_modop_one_replace_r(Slapi_PBlock *pb, MemberOfConfig *config,
int mod_op, Slapi_DN *group_sdn, Slapi_DN *op_this_sdn,
@@ -556,7 +556,14 @@ int memberof_postop_del(Slapi_PBlock *pb)
/* remove this DN from the
* membership lists of groups
*/
- memberof_del_dn_from_groups(pb, &configCopy, sdn);
+ if((ret = memberof_del_dn_from_groups(pb, &configCopy, sdn))){
+ slapi_log_error(SLAPI_LOG_FATAL, MEMBEROF_PLUGIN_SUBSYSTEM,
+ "memberof_postop_del: error deleting dn (%s) from group. Error (%d)",
+ slapi_sdn_get_dn(sdn),ret);
+ memberof_unlock();
+ memberof_free_config(&configCopy);
+ goto bail;
+ }
/* is the entry of interest as a group? */
if(e && configCopy.group_filter && !slapi_filter_test_simple(e, configCopy.group_filter))
@@ -565,20 +572,28 @@ int memberof_postop_del(Slapi_PBlock *pb)
Slapi_Attr *attr = 0;
/* Loop through to find each grouping attribute separately. */
- for (i = 0; configCopy.groupattrs[i]; i++)
+ for (i = 0; configCopy.groupattrs[i] && ret == LDAP_SUCCESS; i++)
{
if (0 == slapi_entry_attr_find(e, configCopy.groupattrs[i], &attr))
{
- memberof_del_attr_list(pb, &configCopy, sdn, attr);
+ if((ret = memberof_del_attr_list(pb, &configCopy, sdn, attr))){
+ slapi_log_error( SLAPI_LOG_FATAL, MEMBEROF_PLUGIN_SUBSYSTEM,
+ "memberof_postop_del: error deleting attr list - dn (%s). Error (%d)",
+ slapi_sdn_get_dn(sdn),ret);
+ }
+
}
}
}
-
memberof_unlock();
-
memberof_free_config(&configCopy);
}
+bail:
+ if(ret){
+ slapi_pblock_set(pb, SLAPI_RESULT_CODE, &ret);
+ ret = SLAPI_PLUGIN_FAILURE;
+ }
slapi_log_error( SLAPI_LOG_TRACE, MEMBEROF_PLUGIN_SUBSYSTEM,
"<-- memberof_postop_del\n" );
return ret;
@@ -591,28 +606,32 @@ typedef struct _memberof_del_dn_data
} memberof_del_dn_data;
/* Deletes a member dn from all groups that refer to it. */
-static void
+static int
memberof_del_dn_from_groups(Slapi_PBlock *pb, MemberOfConfig *config, Slapi_DN *sdn)
{
int i = 0;
char *groupattrs[2] = {0, 0};
+ int rc = LDAP_SUCCESS;
/* Loop through each grouping attribute to find groups that have
* dn as a member. For any matches, delete the dn value from the
* same grouping attribute. */
- for (i = 0; config->groupattrs && config->groupattrs[i]; i++)
+ for (i = 0; config->groupattrs && config->groupattrs[i] && rc == LDAP_SUCCESS; i++)
{
memberof_del_dn_data data = {(char *)slapi_sdn_get_dn(sdn),
config->groupattrs[i]};
groupattrs[0] = config->groupattrs[i];
- memberof_call_foreach_dn(pb, sdn, groupattrs,
- memberof_del_dn_type_callback, &data);
+ rc = memberof_call_foreach_dn(pb, sdn, groupattrs,
+ memberof_del_dn_type_callback, &data);
}
+
+ return rc;
}
-int memberof_del_dn_type_callback(Slapi_Entry *e, void *callback_data)
+int
+memberof_del_dn_type_callback(Slapi_Entry *e, void *callback_data)
{
int rc = 0;
LDAPMod mod;
@@ -654,7 +673,8 @@ int memberof_del_dn_type_callback(Slapi_Entry *e, void *callback_data)
* is a user, you'd want "type" to be "member". If "dn" is a group, you
* could want type to be either "member" or "memberOf" depending on the case.
*/
-int memberof_call_foreach_dn(Slapi_PBlock *pb, Slapi_DN *sdn,
+int
+memberof_call_foreach_dn(Slapi_PBlock *pb, Slapi_DN *sdn,
char **types, plugin_search_entry_callback callback, void *callback_data)
{
Slapi_PBlock *search_pb = NULL;
@@ -763,6 +783,11 @@ int memberof_call_foreach_dn(Slapi_PBlock *pb, Slapi_DN *sdn,
slapi_search_internal_set_pb(search_pb, slapi_sdn_get_dn(base_sdn),
LDAP_SCOPE_SUBTREE, filter_str, 0, 0, 0, 0, memberof_get_plugin_id(), 0);
slapi_search_internal_callback_pb(search_pb, callback_data, 0, callback, 0);
+ slapi_pblock_get(search_pb, SLAPI_PLUGIN_INTOP_RESULT, &rc);
+ if(rc != LDAP_SUCCESS){
+ break;
+ }
+
if(!all_backends){
break;
@@ -841,8 +866,12 @@ int memberof_postop_modrdn(Slapi_PBlock *pb)
{
if(0 == slapi_entry_attr_find(post_e, configCopy.groupattrs[i], &attr))
{
- if(memberof_moddn_attr_list(pb, &configCopy, pre_sdn,
- post_sdn, attr) != 0){
+ if((ret = memberof_moddn_attr_list(pb, &configCopy, pre_sdn,
+ post_sdn, attr) != 0))
+ {
+ slapi_log_error( SLAPI_LOG_FATAL, MEMBEROF_PLUGIN_SUBSYSTEM,
+ "memberof_postop_modrdn - update failed for (%s), error (%d)\n",
+ slapi_sdn_get_dn(pre_sdn), ret);
break;
}
}
@@ -852,13 +881,25 @@ int memberof_postop_modrdn(Slapi_PBlock *pb)
/* It's possible that this is an entry who is a member
* of other group entries. We need to update any member
* attributes to refer to the new name. */
- if (pre_sdn && post_sdn) {
+ if (ret == LDAP_SUCCESS && pre_sdn && post_sdn) {
if (entry_scope && !slapi_sdn_issuffix(post_sdn, entry_scope)) {
memberof_del_dn_data del_data = {0, configCopy.memberof_attr};
- memberof_del_dn_from_groups(pb, &configCopy, pre_sdn);
- memberof_del_dn_type_callback(post_e, &del_data);
+ if((ret = memberof_del_dn_from_groups(pb, &configCopy, pre_sdn))){
+ slapi_log_error( SLAPI_LOG_FATAL, MEMBEROF_PLUGIN_SUBSYSTEM,
+ "memberof_postop_modrdn - delete dn failed for (%s), error (%d)\n",
+ slapi_sdn_get_dn(pre_sdn), ret);
+ }
+ if(ret == LDAP_SUCCESS && (ret = memberof_del_dn_type_callback(post_e, &del_data))){
+ slapi_log_error( SLAPI_LOG_FATAL, MEMBEROF_PLUGIN_SUBSYSTEM,
+ "memberof_postop_modrdn - delete dn callback failed for (%s), error (%d)\n",
+ slapi_entry_get_dn(post_e), ret);
+ }
} else {
- memberof_replace_dn_from_groups(pb, &configCopy, pre_sdn, post_sdn);
+ if((ret = memberof_replace_dn_from_groups(pb, &configCopy, pre_sdn, post_sdn))){
+ slapi_log_error( SLAPI_LOG_FATAL, MEMBEROF_PLUGIN_SUBSYSTEM,
+ "memberof_postop_modrdn - replace dne failed for (%s), error (%d)\n",
+ slapi_sdn_get_dn(pre_sdn), ret);
+ }
}
}
@@ -866,6 +907,10 @@ int memberof_postop_modrdn(Slapi_PBlock *pb)
memberof_free_config(&configCopy);
}
+ if(ret){
+ slapi_pblock_set(pb, SLAPI_RESULT_CODE, &ret);
+ ret = SLAPI_PLUGIN_FAILURE;
+ }
slapi_log_error( SLAPI_LOG_TRACE, MEMBEROF_PLUGIN_SUBSYSTEM,
"<-- memberof_postop_modrdn\n" );
return ret;
@@ -881,12 +926,13 @@ typedef struct _replace_dn_data
/* Finds any groups that have pre_dn as a member and modifies them to
* to use post_dn instead. */
-static void
+static int
memberof_replace_dn_from_groups(Slapi_PBlock *pb, MemberOfConfig *config,
Slapi_DN *pre_sdn, Slapi_DN *post_sdn)
{
int i = 0;
char *groupattrs[2] = {0, 0};
+ int ret = LDAP_SUCCESS;
/* Loop through each grouping attribute to find groups that have
* pre_dn as a member. For any matches, replace pre_dn with post_dn
@@ -899,9 +945,15 @@ memberof_replace_dn_from_groups(Slapi_PBlock *pb, MemberOfConfig *config,
groupattrs[0] = config->groupattrs[i];
- memberof_call_foreach_dn(pb, pre_sdn, groupattrs,
- memberof_replace_dn_type_callback, &data);
+ if((ret = memberof_call_foreach_dn(pb, pre_sdn, groupattrs,
+ memberof_replace_dn_type_callback,
+ &data)))
+ {
+ break;
+ }
}
+
+ return ret;
}
@@ -1068,7 +1120,14 @@ int memberof_postop_modify(Slapi_PBlock *pb)
case LDAP_MOD_ADD:
{
/* add group DN to targets */
- memberof_add_smod_list(pb, &configCopy, sdn, smod);
+ if((ret = memberof_add_smod_list(pb, &configCopy, sdn, smod))){
+ slapi_log_error( SLAPI_LOG_FATAL, MEMBEROF_PLUGIN_SUBSYSTEM,
+ "memberof_postop_modify: failed to add dn (%s) to target. "
+ "Error (%d)\n", slapi_sdn_get_dn(sdn), ret );
+ slapi_mod_done(next_mod);
+ memberof_unlock();
+ goto bail;
+ }
break;
}
@@ -1080,12 +1139,26 @@ int memberof_postop_modify(Slapi_PBlock *pb)
* entry, which the replace code deals with. */
if (slapi_mod_get_num_values(smod) == 0)
{
- memberof_replace_list(pb, &configCopy, sdn);
+ if((ret = memberof_replace_list(pb, &configCopy, sdn))){
+ slapi_log_error( SLAPI_LOG_FATAL, MEMBEROF_PLUGIN_SUBSYSTEM,
+ "memberof_postop_modify: failed to replace list (%s). "
+ "Error (%d)\n", slapi_sdn_get_dn(sdn), ret );
+ slapi_mod_done(next_mod);
+ memberof_unlock();
+ goto bail;
+ }
}
else
{
/* remove group DN from target values in smod*/
- memberof_del_smod_list(pb, &configCopy, sdn, smod);
+ if((ret = memberof_del_smod_list(pb, &configCopy, sdn, smod))){
+ slapi_log_error( SLAPI_LOG_FATAL, MEMBEROF_PLUGIN_SUBSYSTEM,
+ "memberof_postop_modify: failed to remove dn (%s). "
+ "Error (%d)\n", slapi_sdn_get_dn(sdn), ret );
+ slapi_mod_done(next_mod);
+ memberof_unlock();
+ goto bail;
+ }
}
break;
}
@@ -1093,16 +1166,24 @@ int memberof_postop_modify(Slapi_PBlock *pb)
case LDAP_MOD_REPLACE:
{
/* replace current values */
- memberof_replace_list(pb, &configCopy, sdn);
+ if((ret = memberof_replace_list(pb, &configCopy, sdn))){
+ slapi_log_error( SLAPI_LOG_FATAL, MEMBEROF_PLUGIN_SUBSYSTEM,
+ "memberof_postop_modify: failed to replace values in dn (%s). "
+ "Error (%d)\n", slapi_sdn_get_dn(sdn), ret );
+ slapi_mod_done(next_mod);
+ memberof_unlock();
+ goto bail;
+ }
break;
}
default:
{
slapi_log_error(
- SLAPI_LOG_PLUGIN,
+ SLAPI_LOG_FATAL,
MEMBEROF_PLUGIN_SUBSYSTEM,
"memberof_postop_modify: unknown mod type\n" );
+ ret = SLAPI_PLUGIN_FAILURE;
break;
}
}
@@ -1114,6 +1195,7 @@ int memberof_postop_modify(Slapi_PBlock *pb)
smod = slapi_mods_get_next_smod(smods, next_mod);
}
+bail:
if (config_copied)
{
memberof_free_config(&configCopy);
@@ -1124,6 +1206,11 @@ int memberof_postop_modify(Slapi_PBlock *pb)
}
done:
+ if(ret){
+ slapi_pblock_set(pb, SLAPI_RESULT_CODE, &ret);
+ ret = SLAPI_PLUGIN_FAILURE;
+ }
+
slapi_log_error( SLAPI_LOG_TRACE, MEMBEROF_PLUGIN_SUBSYSTEM,
"<-- memberof_postop_modify\n" );
return ret;
@@ -1186,7 +1273,12 @@ int memberof_postop_add(Slapi_PBlock *pb)
{
if(0 == slapi_entry_attr_find(e, configCopy.groupattrs[i], &attr))
{
- memberof_add_attr_list(pb, &configCopy, sdn, attr);
+ if((ret = memberof_add_attr_list(pb, &configCopy, sdn, attr))){
+ slapi_log_error( SLAPI_LOG_FATAL, MEMBEROF_PLUGIN_SUBSYSTEM,
+ "memberof_postop_add: failed to add dn(%s), error (%d)",
+ slapi_sdn_get_dn(sdn), ret);
+ break;
+ }
}
}
@@ -1196,6 +1288,11 @@ int memberof_postop_add(Slapi_PBlock *pb)
}
}
+ if(ret){
+ slapi_pblock_set(pb, SLAPI_RESULT_CODE, &ret);
+ ret = SLAPI_PLUGIN_FAILURE;
+ }
+
slapi_log_error( SLAPI_LOG_TRACE, MEMBEROF_PLUGIN_SUBSYSTEM,
"<-- memberof_postop_add\n" );
@@ -1542,7 +1639,7 @@ memberof_modop_one_replace_r(Slapi_PBlock *pb, MemberOfConfig *config,
if(LDAP_MOD_DELETE == mod_op || LDAP_MOD_ADD == mod_op)
{
/* find parent groups and replace our member attr */
- memberof_fix_memberof_callback(e, config);
+ rc = memberof_fix_memberof_callback(e, config);
} else {
/* single entry - do mod */
mod_pb = slapi_pblock_new();
@@ -1664,7 +1761,9 @@ int memberof_mod_smod_list(Slapi_PBlock *pb, MemberOfConfig *config, int mod,
strncpy(dn_str, bv->bv_val, (size_t)bv->bv_len);
slapi_sdn_set_dn_byref(sdn, dn_str);
- memberof_modop_one(pb, config, mod, group_sdn, sdn);
+ if((rc = memberof_modop_one(pb, config, mod, group_sdn, sdn))){
+ break;
+ }
bv = slapi_mod_get_next_value(smod);
}
@@ -1743,7 +1842,7 @@ int memberof_mod_attr_list_r(Slapi_PBlock *pb, MemberOfConfig *config, int mod,
op_this_val = slapi_value_new_string(slapi_sdn_get_ndn(op_this_sdn));
slapi_value_set_flags(op_this_val, SLAPI_ATTR_FLAG_NORMALIZED_CIS);
- while(val)
+ while(val && rc == 0)
{
char *dn_str = 0;
struct berval *bv = 0;
@@ -1779,13 +1878,13 @@ int memberof_mod_attr_list_r(Slapi_PBlock *pb, MemberOfConfig *config, int mod,
slapi_sdn_set_normdn_byref(sdn, dn_str); /* dn_str is normalized */
if(mod == LDAP_MOD_REPLACE)
{
- memberof_modop_one_replace_r(pb, config, mod, group_sdn,
+ rc = memberof_modop_one_replace_r(pb, config, mod, group_sdn,
op_this_sdn, group_sdn,
sdn, stack);
}
else
{
- memberof_modop_one_r(pb, config, mod, group_sdn,
+ rc = memberof_modop_one_r(pb, config, mod, group_sdn,
op_this_sdn, sdn, stack);
}
}
diff --git a/ldap/servers/plugins/mep/mep.c b/ldap/servers/plugins/mep/mep.c
index 76ba827e6..f0b9baefd 100644
--- a/ldap/servers/plugins/mep/mep.c
+++ b/ldap/servers/plugins/mep/mep.c
@@ -108,7 +108,7 @@ static Slapi_Entry *mep_create_managed_entry(struct configEntry *config,
Slapi_Entry *origin);
static int mep_add_managed_entry(struct configEntry *config,
Slapi_Entry *origin);
-static void mep_rename_managed_entry(Slapi_Entry *origin,
+static int mep_rename_managed_entry(Slapi_Entry *origin,
Slapi_DN *new_dn, Slapi_DN *old_dn);
static Slapi_Mods *mep_get_mapped_mods(struct configEntry *config,
Slapi_Entry *origin, char **mapped_dn);
@@ -1450,9 +1450,8 @@ mep_add_managed_entry(struct configEntry *config,
slapi_pblock_init(mod_pb);
/*
- * Add the origin entry objectclass. Do not check the result
- * as we could be here because of a modrdn operation - in which
- * case the objectclass already exists.
+ * Add the origin entry objectclass. A modrdn might result in
+ * an err 20 (type or value exists), in which case just ignore it.
*/
oc_vals[0] = MEP_ORIGIN_OC;
oc_vals[1] = 0;
@@ -1517,7 +1516,7 @@ bail:
* Renames a managed entry and updates the pointer in the
* origin entry.
*/
-static void
+static int
mep_rename_managed_entry(Slapi_Entry *origin,
Slapi_DN *new_dn, Slapi_DN *old_dn)
{
@@ -1582,6 +1581,8 @@ mep_rename_managed_entry(Slapi_Entry *origin,
bail:
slapi_rdn_free(&srdn);
slapi_pblock_destroy(mep_pb);
+
+ return result;
}
/*
@@ -2311,7 +2312,7 @@ mep_mod_post_op(Slapi_PBlock *pb)
char *mapped_dn = NULL;
Slapi_DN *mapped_sdn = NULL;
struct configEntry *config = NULL;
- int result = 0;
+ int result = SLAPI_PLUGIN_SUCCESS;
LDAPMod **mods = NULL;
int i, abort_mod = 1;
@@ -2333,8 +2334,9 @@ mep_mod_post_op(Slapi_PBlock *pb)
* backend, so don't treat the message as fatal. */
slapi_pblock_get(pb, SLAPI_ENTRY_POST_OP, &e);
if (e == NULL) {
- slapi_log_error(SLAPI_LOG_PLUGIN, MEP_PLUGIN_SUBSYSTEM,
+ slapi_log_error(SLAPI_LOG_FATAL, MEP_PLUGIN_SUBSYSTEM,
"mep_mod_post_op: Unable to fetch postop entry.\n");
+ result = SLAPI_PLUGIN_FAILURE;
goto bail;
}
@@ -2408,12 +2410,12 @@ mep_mod_post_op(Slapi_PBlock *pb)
}
/* Check if we need to rename the managed entry. */
- if (mapped_dn) {
+ if (result == SLAPI_PLUGIN_SUCCESS && mapped_dn) {
mapped_sdn = slapi_sdn_new_normdn_passin(mapped_dn);
managed_sdn = slapi_sdn_new_normdn_byref(managed_dn);
if (slapi_sdn_compare(managed_sdn, mapped_sdn) != 0) {
- mep_rename_managed_entry(e, mapped_sdn, managed_sdn);
+ result = mep_rename_managed_entry(e, mapped_sdn, managed_sdn);
}
slapi_sdn_free(&mapped_sdn);
@@ -2428,12 +2430,16 @@ mep_mod_post_op(Slapi_PBlock *pb)
}
}
- bail:
+bail:
+ if(result){
+ slapi_pblock_set(pb, SLAPI_RESULT_CODE, &result);
+ result = SLAPI_PLUGIN_FAILURE;
+ }
slapi_ch_free_string(&managed_dn);
slapi_log_error(SLAPI_LOG_TRACE, MEP_PLUGIN_SUBSYSTEM,
"<-- mep_mod_post_op\n");
- return SLAPI_PLUGIN_SUCCESS;
+ return result;
}
static int
@@ -2514,6 +2520,7 @@ mep_del_post_op(Slapi_PBlock *pb)
{
Slapi_Entry *e = NULL;
Slapi_DN *sdn = NULL;
+ int result = SLAPI_PLUGIN_SUCCESS;
slapi_log_error(SLAPI_LOG_TRACE, MEP_PLUGIN_SUBSYSTEM,
"--> mep_del_post_op\n");
@@ -2557,7 +2564,12 @@ mep_del_post_op(Slapi_PBlock *pb)
slapi_delete_internal_set_pb(mep_pb, managed_dn, NULL,
NULL, mep_get_plugin_id(), 0);
slapi_delete_internal_pb(mep_pb);
-
+ slapi_pblock_get(mep_pb, SLAPI_PLUGIN_INTOP_RESULT, &result);
+ if(result){
+ slapi_log_error(SLAPI_LOG_FATAL, MEP_PLUGIN_SUBSYSTEM,
+ "mep_del_post_op: failed to delete managed entry "
+ "(%s) - error (%d)\n",managed_dn, result);
+ }
slapi_ch_free_string(&managed_dn);
slapi_pblock_destroy(mep_pb);
}
@@ -2567,10 +2579,15 @@ mep_del_post_op(Slapi_PBlock *pb)
"retrieving pre-op entry %s\n", slapi_sdn_get_dn(sdn));
}
+ if(result){
+ slapi_pblock_set(pb, SLAPI_RESULT_CODE, &result);
+ result = SLAPI_PLUGIN_FAILURE;
+ }
+
slapi_log_error(SLAPI_LOG_TRACE, MEP_PLUGIN_SUBSYSTEM,
"<-- mep_del_post_op\n");
- return SLAPI_PLUGIN_SUCCESS;
+ return result;
}
static int
@@ -2599,10 +2616,10 @@ mep_modrdn_post_op(Slapi_PBlock *pb)
new_sdn = slapi_entry_get_sdn(post_e);
new_dn = slapi_sdn_get_dn(new_sdn);
} else {
- slapi_log_error(SLAPI_LOG_PLUGIN, MEP_PLUGIN_SUBSYSTEM,
+ slapi_log_error(SLAPI_LOG_FATAL, MEP_PLUGIN_SUBSYSTEM,
"mep_modrdn_post_op: Error "
"retrieving post-op entry\n");
- return SLAPI_PLUGIN_SUCCESS;
+ return SLAPI_PLUGIN_FAILURE;
}
if ((old_sdn = mep_get_sdn(pb))) {
@@ -2660,7 +2677,13 @@ mep_modrdn_post_op(Slapi_PBlock *pb)
slapi_delete_internal_set_pb (mep_pb, managed_dn, NULL, NULL,
mep_get_plugin_id(), 0);
slapi_delete_internal_pb(mep_pb);
-
+ slapi_pblock_get(mep_pb, SLAPI_PLUGIN_INTOP_RESULT, &result);
+ if(result){
+ slapi_log_error(SLAPI_LOG_FATAL, MEP_PLUGIN_SUBSYSTEM,
+ "mep_modrdn_post_op: failed to delete managed entry "
+ "(%s) - error (%d)\n",managed_dn, result);
+ goto bailmod;
+ }
/* Clear out the pblock for reuse. */
slapi_pblock_init(mep_pb);
@@ -2760,9 +2783,12 @@ mep_modrdn_post_op(Slapi_PBlock *pb)
"entry \"%s\".\n ", managed_dn,
slapi_entry_get_dn(new_managed_entry),
slapi_sdn_get_dn(old_sdn));
- mep_rename_managed_entry(post_e,
+ if((result = mep_rename_managed_entry(post_e,
slapi_entry_get_sdn(new_managed_entry),
- managed_sdn);
+ managed_sdn)))
+ {
+ goto bailmod;
+ }
}
/* Update all of the mapped attributes
@@ -2824,18 +2850,22 @@ bailmod:
if (config) {
if(mep_add_managed_entry(config, post_e)){
char errtxt[SLAPI_DSE_RETURNTEXT_SIZE];
- int rc = LDAP_UNWILLING_TO_PERFORM;
+ result = LDAP_UNWILLING_TO_PERFORM;
PR_snprintf(errtxt, SLAPI_DSE_RETURNTEXT_SIZE,
"Managed Entry Plugin rejected modrdn operation (see errors log).\n");
slapi_pblock_set(pb, SLAPI_PB_RESULT_TEXT, &errtxt);
- slapi_pblock_set(pb, SLAPI_RESULT_CODE, &rc);
- result = SLAPI_PLUGIN_FAILURE;
+
}
}
mep_config_unlock();
}
+
+ if(result){
+ slapi_pblock_set(pb, SLAPI_RESULT_CODE, &result);
+ result = SLAPI_PLUGIN_FAILURE;
+ }
slapi_log_error(SLAPI_LOG_TRACE, MEP_PLUGIN_SUBSYSTEM,
"<-- mep_modrdn_post_op\n");
diff --git a/ldap/servers/plugins/pam_passthru/pam_ptpreop.c b/ldap/servers/plugins/pam_passthru/pam_ptpreop.c
index a25b9fa2e..476d83413 100644
--- a/ldap/servers/plugins/pam_passthru/pam_ptpreop.c
+++ b/ldap/servers/plugins/pam_passthru/pam_ptpreop.c
@@ -673,6 +673,7 @@ pam_passthru_postop(Slapi_PBlock *pb)
/* Make sure the operation succeeded and bail if it didn't. */
slapi_pblock_get(pb, SLAPI_PLUGIN_OPRETURN, &oprc);
if (oprc != 0) {
+ ret = oprc;
goto bail;
}
@@ -681,6 +682,7 @@ pam_passthru_postop(Slapi_PBlock *pb)
if (!sdn) {
slapi_log_error(SLAPI_LOG_FATAL, PAM_PASSTHRU_PLUGIN_SUBSYSTEM,
"pam_passthru_postop: unale to fetch target SDN.\n");
+ ret = SLAPI_PLUGIN_FAILURE;
goto bail;
}
@@ -695,6 +697,7 @@ pam_passthru_postop(Slapi_PBlock *pb)
slapi_log_error(SLAPI_LOG_FATAL, PAM_PASSTHRU_PLUGIN_SUBSYSTEM,
"pam_passthru_postop: unable to fetch post-op "
"entry for rename operation.\n");
+ ret = SLAPI_PLUGIN_FAILURE;
goto bail;
}
}
diff --git a/ldap/servers/plugins/retrocl/retrocl_po.c b/ldap/servers/plugins/retrocl/retrocl_po.c
index 0e32d061a..04687da60 100644
--- a/ldap/servers/plugins/retrocl/retrocl_po.c
+++ b/ldap/servers/plugins/retrocl/retrocl_po.c
@@ -150,11 +150,11 @@ static lenstr *make_changes_string(LDAPMod **ldm, const char **includeattrs)
* log_m - pointer to the actual change operation on a modify
* flag - only used by modrdn operations - value of deleteoldrdn
* curtime - the current time
- * Returns: nothing
+ * Returns: error code
* Description: Given a change, construct an entry which is to be added to the
* changelog database.
*/
-static void
+static int
write_replog_db(
Slapi_PBlock *pb,
int optype,
@@ -168,20 +168,21 @@ write_replog_db(
const char *newsuperior
)
{
- char *edn;
- struct berval *vals[ 2 ];
- struct berval val;
- Slapi_Entry *e;
- char chnobuf[ 20 ];
- int err;
- Slapi_PBlock *newPb = NULL;
+ Slapi_PBlock *newPb = NULL;
changeNumber changenum;
- int i;
- int extensibleObject = 0;
+ struct berval *vals[ 2 ];
+ struct berval val;
+ Slapi_Entry *e;
+ char chnobuf[ 20 ];
+ char *edn;
+ int extensibleObject = 0;
+ int err = 0;
+ int ret = LDAP_SUCCESS;
+ int i;
if (!dn) {
slapi_log_error( SLAPI_LOG_PLUGIN, RETROCL_PLUGIN_NAME, "write_replog_db: NULL dn\n");
- return;
+ return ret;
}
PR_Lock(retrocl_internal_lock);
@@ -321,78 +322,79 @@ write_replog_db(
* Finish constructing the entry. How to do it depends on the type
* of modification being logged.
*/
- err = 0;
switch ( optype ) {
case OP_ADD:
- if ( entry2reple( e, log_e, OP_ADD ) != 0 ) {
- err = 1;
- }
- break;
+ if ( entry2reple( e, log_e, OP_ADD ) != 0 ) {
+ err = SLAPI_PLUGIN_FAILURE;
+ }
+ break;
case OP_MODIFY:
- if ( mods2reple( e, log_m ) != 0 ) {
- err = 1;
- }
- break;
+ if ( mods2reple( e, log_m ) != 0 ) {
+ err = SLAPI_PLUGIN_FAILURE;
+ }
+ break;
case OP_MODRDN:
- if ( modrdn2reple( e, newrdn, flag, modrdn_mods, newsuperior ) != 0 ) {
- err = 1;
- }
- break;
+ if ( modrdn2reple( e, newrdn, flag, modrdn_mods, newsuperior ) != 0 ) {
+ err = SLAPI_PLUGIN_FAILURE;
+ }
+ break;
case OP_DELETE:
- if (log_e) {
- /* we have to log the full entry */
- if ( entry2reple( e, log_e, OP_DELETE ) != 0 ) {
- err = 1;
- }
- } else {
- /* Set the changetype attribute */
- val.bv_val = "delete";
- val.bv_len = 6;
- slapi_entry_add_values( e, attr_changetype, vals );
- }
- break;
+ if (log_e) {
+ /* we have to log the full entry */
+ if ( entry2reple( e, log_e, OP_DELETE ) != 0 ) {
+ err = SLAPI_PLUGIN_FAILURE;
+ }
+ } else {
+ /* Set the changetype attribute */
+ val.bv_val = "delete";
+ val.bv_len = 6;
+ slapi_entry_add_values( e, attr_changetype, vals );
+ }
+ break;
+
default:
- slapi_log_error( SLAPI_LOG_FATAL, RETROCL_PLUGIN_NAME, "replog: Unknown LDAP operation type "
- "%d.\n", optype );
- err = 1;
+ slapi_log_error( SLAPI_LOG_FATAL, RETROCL_PLUGIN_NAME,
+ "replog: Unknown LDAP operation type %d.\n", optype );
+ err = SLAPI_PLUGIN_FAILURE;
}
/* Call the repl backend to add this entry */
if ( 0 == err ) {
- int rc;
-
- newPb = slapi_pblock_new ();
- slapi_add_entry_internal_set_pb( newPb, e, NULL /* controls */,
- g_plg_identity[PLUGIN_RETROCL],
- /* dont leave entry in cache if main oparation is aborted */
- SLAPI_OP_FLAG_NEVER_CACHE);
- slapi_add_internal_pb (newPb);
- slapi_pblock_get( newPb, SLAPI_PLUGIN_INTOP_RESULT, &rc );
- slapi_pblock_destroy(newPb);
- if ( 0 != rc ) {
- slapi_log_error( SLAPI_LOG_FATAL, RETROCL_PLUGIN_NAME,
- "replog: an error occured while adding change "
- "number %lu, dn = %s: %s. \n",
- changenum, edn, ldap_err2string( rc ));
- retrocl_release_changenumber();
- } else {
- /* Tell the change numbering system this one's committed to disk */
- retrocl_commit_changenumber();
- }
+ newPb = slapi_pblock_new ();
+ slapi_add_entry_internal_set_pb( newPb, e, NULL /* controls */,
+ g_plg_identity[PLUGIN_RETROCL],
+ /* dont leave entry in cache if main oparation is aborted */
+ SLAPI_OP_FLAG_NEVER_CACHE);
+ slapi_add_internal_pb (newPb);
+ slapi_pblock_get( newPb, SLAPI_PLUGIN_INTOP_RESULT, &ret );
+ slapi_pblock_destroy(newPb);
+ if ( 0 != ret ) {
+ slapi_log_error( SLAPI_LOG_FATAL, RETROCL_PLUGIN_NAME,
+ "replog: an error occured while adding change "
+ "number %lu, dn = %s: %s. \n",
+ changenum, edn, ldap_err2string( ret ));
+ retrocl_release_changenumber();
+
+ } else {
+ /* Tell the change numbering system this one's committed to disk */
+ retrocl_commit_changenumber();
+ }
} else {
- slapi_log_error( SLAPI_LOG_FATAL, RETROCL_PLUGIN_NAME,
- "An error occurred while constructing "
- "change record number %ld.\n", changenum );
- retrocl_release_changenumber();
+ slapi_log_error( SLAPI_LOG_FATAL, RETROCL_PLUGIN_NAME,
+ "An error occurred while constructing "
+ "change record number %ld.\n", changenum );
+ retrocl_release_changenumber();
+ ret = err;
}
PR_Unlock(retrocl_internal_lock);
if ( NULL != edn ) {
- slapi_ch_free((void **) &edn);
+ slapi_ch_free((void **) &edn);
}
+ return ret;
}
@@ -585,7 +587,7 @@ int retrocl_postob (Slapi_PBlock *pb, int optype)
Slapi_DN *newsuperior = NULL;
Slapi_Backend *be = NULL;
time_t curtime;
- int rc;
+ int rc = SLAPI_PLUGIN_SUCCESS;
/*
* Check to see if the change was made to the replication backend db.
@@ -608,10 +610,10 @@ int retrocl_postob (Slapi_PBlock *pb, int optype)
if (rc != LDAP_SUCCESS) {
LDAPDebug1Arg(LDAP_DEBUG_TRACE,"not applying change if op failed %d\n",rc);
- /* this could also mean that the changenumber is no longer correct
- * set a flag to check at next assignment
- */
- retrocl_set_check_changenumber();
+ /* this could also mean that the changenumber is no longer correct
+ * set a flag to check at next assignment
+ */
+ retrocl_set_check_changenumber();
return SLAPI_PLUGIN_SUCCESS;
}
@@ -642,35 +644,44 @@ int retrocl_postob (Slapi_PBlock *pb, int optype)
switch ( optype ) {
case OP_MODIFY:
- (void)slapi_pblock_get( pb, SLAPI_MODIFY_MODS, &log_m );
- break;
+ (void)slapi_pblock_get( pb, SLAPI_MODIFY_MODS, &log_m );
+ break;
case OP_ADD:
- /*
- * For adds, we want the unnormalized dn, so we can preserve
- * spacing, case, when replicating it.
- */
- (void)slapi_pblock_get( pb, SLAPI_ADD_ENTRY, &te );
- if ( NULL != te ) {
- dn = slapi_entry_get_dn( te );
- }
- break;
+ /*
+ * For adds, we want the unnormalized dn, so we can preserve
+ * spacing, case, when replicating it.
+ */
+ (void)slapi_pblock_get( pb, SLAPI_ADD_ENTRY, &te );
+ if ( NULL != te ) {
+ dn = slapi_entry_get_dn( te );
+ }
+ break;
case OP_DELETE:
- if (retrocl_log_deleted)
- (void)slapi_pblock_get(pb, SLAPI_ENTRY_PRE_OP, &te);
+ if (retrocl_log_deleted)
+ (void)slapi_pblock_get(pb, SLAPI_ENTRY_PRE_OP, &te);
break;
case OP_MODRDN:
- /* newrdn is used just for logging; no need to be normalized */
- (void)slapi_pblock_get( pb, SLAPI_MODRDN_NEWRDN, &newrdn );
- (void)slapi_pblock_get( pb, SLAPI_MODRDN_DELOLDRDN, &flag );
- (void)slapi_pblock_get( pb, SLAPI_MODIFY_MODS, &modrdn_mods );
- (void)slapi_pblock_get( pb, SLAPI_MODRDN_NEWSUPERIOR_SDN, &newsuperior );
- break;
+ /* newrdn is used just for logging; no need to be normalized */
+ (void)slapi_pblock_get( pb, SLAPI_MODRDN_NEWRDN, &newrdn );
+ (void)slapi_pblock_get( pb, SLAPI_MODRDN_DELOLDRDN, &flag );
+ (void)slapi_pblock_get( pb, SLAPI_MODIFY_MODS, &modrdn_mods );
+ (void)slapi_pblock_get( pb, SLAPI_MODRDN_NEWSUPERIOR_SDN, &newsuperior );
+ break;
}
/* check if we should log change to retro changelog, and
* if so, do it here */
- write_replog_db( pb, optype, dn, log_m, flag, curtime, te,
- newrdn, modrdn_mods, slapi_sdn_get_dn(newsuperior) );
+ if((rc = write_replog_db( pb, optype, dn, log_m, flag, curtime, te,
+ newrdn, modrdn_mods, slapi_sdn_get_dn(newsuperior) )))
+ {
+ slapi_log_error(SLAPI_LOG_FATAL, "retrocl-plugin",
+ "retrocl_postob: operation failure [%d]\n", rc);
+ if(rc < 0){
+ rc = LDAP_OPERATIONS_ERROR;
+ }
+ slapi_pblock_set(pb, SLAPI_RESULT_CODE, &rc);
+ rc = SLAPI_PLUGIN_FAILURE;
+ }
- return 0;
+ return rc;
}
| 0 |
7afaf4974625c0d80d81cfbedbfe9635f21d5a57
|
389ds/389-ds-base
|
Resolves: #253069
Summary: cyclic dependency from getpwnam() in log rotation code
Description: Moved getpwnam call to the startup time, store the info in
slapdFrontendConfig to reuse.
|
commit 7afaf4974625c0d80d81cfbedbfe9635f21d5a57
Author: Noriko Hosoi <[email protected]>
Date: Fri Aug 17 02:12:37 2007 +0000
Resolves: #253069
Summary: cyclic dependency from getpwnam() in log rotation code
Description: Moved getpwnam call to the startup time, store the info in
slapdFrontendConfig to reuse.
diff --git a/ldap/servers/slapd/libglobs.c b/ldap/servers/slapd/libglobs.c
index 2e03e2972..f91d15516 100644
--- a/ldap/servers/slapd/libglobs.c
+++ b/ldap/servers/slapd/libglobs.c
@@ -2500,9 +2500,20 @@ config_set_localuser( const char *attrname, char *value, char *errorbuf, int app
}
if (apply) {
+ struct passwd *pw = NULL;
CFG_LOCK_WRITE(slapdFrontendConfig);
slapi_ch_free ( (void **) &slapdFrontendConfig->localuser );
slapdFrontendConfig->localuser = slapi_ch_strdup ( value );
+ if (slapdFrontendConfig->localuserinfo != NULL) {
+ slapi_ch_free ( (void **) &(slapdFrontendConfig->localuserinfo) );
+ }
+ pw = getpwnam( value );
+ if ( pw ) {
+ slapdFrontendConfig->localuserinfo =
+ (struct passwd *)slapi_ch_malloc(sizeof(struct passwd));
+ memcpy(slapdFrontendConfig->localuserinfo, pw, sizeof(struct passwd));
+ }
+
CFG_UNLOCK_WRITE(slapdFrontendConfig);
}
return retVal;
diff --git a/ldap/servers/slapd/log.c b/ldap/servers/slapd/log.c
index 1e2efa858..4480bf020 100644
--- a/ldap/servers/slapd/log.c
+++ b/ldap/servers/slapd/log.c
@@ -3480,9 +3480,9 @@ log__open_errorlogfile(int logfile_state, int locked)
slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
#ifndef _WIN32
- if ( slapdFrontendConfig->localuser != NULL ) {
- if ( (pw = getpwnam( slapdFrontendConfig->localuser )) == NULL )
- return LOG_UNABLE_TO_OPENFILE;
+ if ( slapdFrontendConfig->localuser != NULL &&
+ slapdFrontendConfig->localuserinfo != NULL ) {
+ pw = slapdFrontendConfig->localuserinfo;
}
else {
return LOG_UNABLE_TO_OPENFILE;
diff --git a/ldap/servers/slapd/main.c b/ldap/servers/slapd/main.c
index 9a5987dcf..5723db446 100644
--- a/ldap/servers/slapd/main.c
+++ b/ldap/servers/slapd/main.c
@@ -247,8 +247,8 @@ chown_dir_files(char *name, struct passwd *pw, PRBool strip_fn)
/* change the owner for each of the files in the dir */
while( (entry = PR_ReadDir(dir , PR_SKIP_BOTH )) !=NULL )
{
- PR_snprintf(file,MAXPATHLEN+1,"%s/%s",log,entry->name);
- slapd_chown_if_not_owner( file, pw->pw_uid, -1 );
+ PR_snprintf(file,MAXPATHLEN+1,"%s/%s",log,entry->name);
+ slapd_chown_if_not_owner( file, pw->pw_uid, -1 );
}
PR_CloseDir( dir );
}
@@ -267,14 +267,23 @@ fix_ownership()
slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
-
- if ( slapdFrontendConfig->localuser != NULL ) {
- if ( (pw = getpwnam( slapdFrontendConfig->localuser )) == NULL )
- return;
- }
- else {
- return;
+ if (slapdFrontendConfig->localuser != NULL) {
+ if (slapdFrontendConfig->localuserinfo == NULL) {
+ pw = getpwnam( slapdFrontendConfig->localuser );
+ if ( NULL == pw ) {
+ LDAPDebug(LDAP_DEBUG_ANY,
+ "Unable to find user %s in system account database, "
+ "errno %d (%s)\n",
+ slapdFrontendConfig->localuser, errno, strerror(errno));
+ return;
+ }
+ slapdFrontendConfig->localuserinfo =
+ (struct passwd *)slapi_ch_malloc(sizeof(struct passwd));
+ memcpy(slapdFrontendConfig->localuserinfo, pw, sizeof(struct passwd));
+ }
+ pw = slapdFrontendConfig->localuserinfo;
}
+
/* config directory needs to be owned by the local user */
if (slapdFrontendConfig->configdir) {
chown_dir_files(slapdFrontendConfig->configdir, pw, PR_FALSE);
diff --git a/ldap/servers/slapd/protect_db.c b/ldap/servers/slapd/protect_db.c
index e234450a1..ccef1bb3a 100644
--- a/ldap/servers/slapd/protect_db.c
+++ b/ldap/servers/slapd/protect_db.c
@@ -201,17 +201,16 @@ make_sure_dir_exists(char *dir)
}
/* Make sure it's owned by the correct user */
- if (slapdFrontendConfig->localuser != NULL) {
- if ( (pw = getpwnam(slapdFrontendConfig->localuser)) == NULL ) {
- LDAPDebug(LDAP_DEBUG_ANY, GETPWNAM_WARNING, slapdFrontendConfig->localuser, errno, strerror(errno));
- } else {
+ if (slapdFrontendConfig->localuser != NULL &&
+ slapdFrontendConfig->localuserinfo != NULL) {
+ pw = slapdFrontendConfig->localuserinfo;
if (chown(dir, pw->pw_uid, -1) == -1) {
stat(dir, &stat_buffer);
if (stat_buffer.st_uid != pw->pw_uid) {
LDAPDebug(LDAP_DEBUG_ANY, CHOWN_WARNING, dir, 0, 0);
+ return 1;
}
}
- } /* else */
}
return 0;
@@ -233,24 +232,23 @@ add_this_process_to(char *dir_name)
file_name[sizeof(file_name)-1] = (char)0;
if ((prfd = PR_Open(file_name, PR_RDWR | PR_CREATE_FILE, 0666)) == NULL) {
- LDAPDebug(LDAP_DEBUG_ANY, FILE_CREATE_WARNING, file_name, 0, 0);
- return;
+ LDAPDebug(LDAP_DEBUG_ANY, FILE_CREATE_WARNING, file_name, 0, 0);
+ return;
}
/* Make sure the owner is of the file is the user the server
* runs as. */
- if (slapdFrontendConfig->localuser != NULL) {
- if ( (pw = getpwnam(slapdFrontendConfig->localuser)) == NULL ) {
- LDAPDebug(LDAP_DEBUG_ANY, GETPWNAM_WARNING, slapdFrontendConfig->localuser, errno, strerror(errno));
- } else {
+ if (slapdFrontendConfig->localuser != NULL &&
+ slapdFrontendConfig->localuserinfo != NULL) {
+ pw = slapdFrontendConfig->localuserinfo;
if (chown(file_name, pw->pw_uid, -1) == -1) {
stat(file_name, &stat_buffer);
if (stat_buffer.st_uid != pw->pw_uid) {
LDAPDebug(LDAP_DEBUG_ANY, CHOWN_WARNING, file_name, 0, 0);
}
}
- } /* else */
}
+bail:
PR_Close(prfd);
}
diff --git a/ldap/servers/slapd/slap.h b/ldap/servers/slapd/slap.h
index 4f9c91acd..2a9753b0b 100644
--- a/ldap/servers/slapd/slap.h
+++ b/ldap/servers/slapd/slap.h
@@ -1925,6 +1925,9 @@ typedef struct _slapdFrontendConfig {
char *ldapi_gidnumber_type; /* type that contains gid number */
char *ldapi_search_base_dn; /* base dn to search for mapped entries */
char *ldapi_auto_dn_suffix; /* suffix to be appended to auto gen DNs */
+#ifndef _WIN32
+ struct passwd *localuserinfo; /* userinfo of localuser */
+#endif /* _WIN32 */
} slapdFrontendConfig_t;
#define SLAPD_FULL 0
| 0 |
e66c4cecc47eff659a72a51c1e1722fb41c1dfbc
|
389ds/389-ds-base
|
Ticket #47596 attrcrypt fails to find unlocked key
https://fedorahosted.org/389/ticket/47596
Reviewed by: nkinder (Thanks!)
Branch: master
Fix Description: Additional fix to the previous fix. As it turns out, the
function PK11_IsLoggedIn() only returns true if the slot has been unlocked
with a pin or password. If the slot does not need a login at all, because
the cert/key db has no password, PK11_IsLoggedIn will return false. The code
must check for PK11_NeedLogin too.
Platforms tested: RHEL6 x86_64
Flag Day: no
Doc impact: no
|
commit e66c4cecc47eff659a72a51c1e1722fb41c1dfbc
Author: Rich Megginson <[email protected]>
Date: Tue Nov 26 08:14:07 2013 -0700
Ticket #47596 attrcrypt fails to find unlocked key
https://fedorahosted.org/389/ticket/47596
Reviewed by: nkinder (Thanks!)
Branch: master
Fix Description: Additional fix to the previous fix. As it turns out, the
function PK11_IsLoggedIn() only returns true if the slot has been unlocked
with a pin or password. If the slot does not need a login at all, because
the cert/key db has no password, PK11_IsLoggedIn will return false. The code
must check for PK11_NeedLogin too.
Platforms tested: RHEL6 x86_64
Flag Day: no
Doc impact: no
diff --git a/ldap/servers/slapd/ssl.c b/ldap/servers/slapd/ssl.c
index 8b80acbc9..61809aa5f 100644
--- a/ldap/servers/slapd/ssl.c
+++ b/ldap/servers/slapd/ssl.c
@@ -1602,7 +1602,7 @@ slapd_get_unlocked_key_for_cert(CERTCertificate *cert, void *pin_arg)
slapi_log_error(SLAPI_LOG_TRACE, "slapd_get_unlocked_key_for_cert",
"Missing slot for slot list element for certificate [%s]\n",
certsubject);
- } else if (PK11_IsLoggedIn(slot, pin_arg)) {
+ } else if (!PK11_NeedLogin(slot) || PK11_IsLoggedIn(slot, pin_arg)) {
key = PK11_FindKeyByDERCert(slot, cert, pin_arg);
slapi_log_error(SLAPI_LOG_TRACE, "slapd_get_unlocked_key_for_cert",
"Found unlocked slot [%s] token [%s] for certificate [%s]\n",
| 0 |
bcbc3e9c3889d9df0f777c55d66e099d33a1ab52
|
389ds/389-ds-base
|
Ticket 49184 - adjust logging level in MO plugin
Description: Change logging level for benign message
https://pagure.io/389-ds-base/issue/49184
Reviewed by: mreynolds(one line commit ruile)
|
commit bcbc3e9c3889d9df0f777c55d66e099d33a1ab52
Author: Mark Reynolds <[email protected]>
Date: Tue Jun 6 10:50:19 2017 -0400
Ticket 49184 - adjust logging level in MO plugin
Description: Change logging level for benign message
https://pagure.io/389-ds-base/issue/49184
Reviewed by: mreynolds(one line commit ruile)
diff --git a/ldap/servers/plugins/memberof/memberof.c b/ldap/servers/plugins/memberof/memberof.c
index b37f1a1c1..46fbf27c0 100644
--- a/ldap/servers/plugins/memberof/memberof.c
+++ b/ldap/servers/plugins/memberof/memberof.c
@@ -3399,7 +3399,7 @@ int memberof_fix_memberof_callback(Slapi_Entry *e, void *callback_data)
/* This is quite unexpected, after a call to memberof_get_groups
* ndn ancestors should be in the cache
*/
- slapi_log_err(SLAPI_LOG_FATAL, MEMBEROF_PLUGIN_SUBSYSTEM, "memberof_fix_memberof_callback: Weird, %s is not in the cache\n", ndn);
+ slapi_log_err(SLAPI_LOG_PLUGIN, MEMBEROF_PLUGIN_SUBSYSTEM, "memberof_fix_memberof_callback: Weird, %s is not in the cache\n", ndn);
}
}
}
| 0 |
a06cb4269613224e1454ed8c1ad6f702cc247b2b
|
389ds/389-ds-base
|
Ticket #48939 - nsslapd-workingdir is empty when ns-slapd is started by systemd
Description: If the Type of the service is notify in systemd, the server
process does not fork. Setting nsslapd-workingdir was missing in the not-
fork path. This patch adds it.
https://fedorahosted.org/389/ticket/48939
Reviewed by [email protected] (Thank you, William!!)
|
commit a06cb4269613224e1454ed8c1ad6f702cc247b2b
Author: Noriko Hosoi <[email protected]>
Date: Tue Jul 26 16:51:41 2016 -0700
Ticket #48939 - nsslapd-workingdir is empty when ns-slapd is started by systemd
Description: If the Type of the service is notify in systemd, the server
process does not fork. Setting nsslapd-workingdir was missing in the not-
fork path. This patch adds it.
https://fedorahosted.org/389/ticket/48939
Reviewed by [email protected] (Thank you, William!!)
diff --git a/ldap/servers/slapd/detach.c b/ldap/servers/slapd/detach.c
index 84a9eef0b..cd13a997a 100644
--- a/ldap/servers/slapd/detach.c
+++ b/ldap/servers/slapd/detach.c
@@ -44,16 +44,50 @@
#include <unistd.h>
#endif /* USE_SYSCONF */
-int
-detach( int slapd_exemode, int importexport_encrypt,
- int s_port, daemon_ports_t *ports_info )
+static int
+set_workingdir()
{
- int i, sd;
int rc = 0;
- char *workingdir = 0;
+ char *workingdir = config_get_workingdir();
char *errorlog = 0;
char *ptr = 0;
extern char *config_get_errorlog(void);
+ extern int config_set_workingdir(const char *attrname, char *value, char *errorbuf, int apply);
+ char errorbuf[SLAPI_DSE_RETURNTEXT_SIZE];
+
+ if ( NULL == workingdir ) {
+ errorlog = config_get_errorlog();
+ if (NULL == errorlog) {
+ rc = chdir("/");
+ } else {
+ ptr = strrchr(errorlog, '/');
+ if (ptr) {
+ *ptr = '\0';
+ }
+ rc = chdir(errorlog);
+ if (config_set_workingdir(CONFIG_WORKINGDIR_ATTRIBUTE, errorlog, errorbuf, 1) == LDAP_OPERATIONS_ERROR) {
+ LDAPDebug1Arg(LDAP_DEBUG_ANY, "detach: set workingdir failed with \"%s\"\n", errorbuf);
+ }
+ slapi_ch_free_string(&errorlog);
+ }
+ } else {
+ /* calling config_set_workingdir to check for validity of directory, don't apply */
+ if (config_set_workingdir(CONFIG_WORKINGDIR_ATTRIBUTE, workingdir, errorbuf, 0) == LDAP_OPERATIONS_ERROR) {
+ LDAPDebug1Arg(LDAP_DEBUG_ANY, "detach: set workingdir failed with \"%s\"\n", errorbuf);
+ rc = chdir("/");
+ } else {
+ rc = chdir(workingdir);
+ }
+ slapi_ch_free_string(&workingdir);
+ }
+ return rc;
+}
+
+int
+detach( int slapd_exemode, int importexport_encrypt,
+ int s_port, daemon_ports_t *ports_info )
+{
+ int i, sd;
if ( should_detach ) {
for ( i = 0; i < 5; i++ ) {
@@ -76,35 +110,12 @@ detach( int slapd_exemode, int importexport_encrypt,
}
/* call this right after the fork, but before closing stdin */
- if (slapd_do_all_nss_ssl_init(slapd_exemode, importexport_encrypt,
- s_port, ports_info)) {
+ if (slapd_do_all_nss_ssl_init(slapd_exemode, importexport_encrypt, s_port, ports_info)) {
return 1;
}
- workingdir = config_get_workingdir();
- if ( NULL == workingdir ) {
- errorlog = config_get_errorlog();
- if ( NULL == errorlog ) {
- rc = chdir( "/" );
- PR_ASSERT(rc == 0);
- } else {
- if ((ptr = strrchr(errorlog, '/')) ||
- (ptr = strrchr(errorlog, '\\'))) {
- *ptr = 0;
- }
- rc = chdir( errorlog );
- PR_ASSERT(rc == 0);
- config_set_workingdir(CONFIG_WORKINGDIR_ATTRIBUTE, errorlog, NULL, 1);
- slapi_ch_free_string(&errorlog);
- }
- } else {
- /* calling config_set_workingdir to check for validity of directory, don't apply */
- if (config_set_workingdir(CONFIG_WORKINGDIR_ATTRIBUTE, workingdir, NULL, 0) == LDAP_OPERATIONS_ERROR) {
- return 1;
- }
- rc = chdir( workingdir );
- PR_ASSERT(rc == 0);
- slapi_ch_free_string(&workingdir);
+ if (set_workingdir()) {
+ LDAPDebug0Args(LDAP_DEBUG_ANY, "detach: chdir to workingdir failed.\n");
}
if ( (sd = open( "/dev/null", O_RDWR )) == -1 ) {
@@ -127,14 +138,16 @@ detach( int slapd_exemode, int importexport_encrypt,
g_set_detached(1);
} else { /* not detaching - call nss/ssl init */
- if (slapd_do_all_nss_ssl_init(slapd_exemode, importexport_encrypt,
- s_port, ports_info)) {
+ if (slapd_do_all_nss_ssl_init(slapd_exemode, importexport_encrypt, s_port, ports_info)) {
return 1;
}
+ if (set_workingdir()) {
+ LDAPDebug0Args(LDAP_DEBUG_ANY, "detach: chdir to workingdir failed.\n");
+ }
}
(void) SIGNAL( SIGPIPE, SIG_IGN );
- return rc;
+ return 0;
}
/*
| 0 |
71a891f0dcfd1aafeb3913279d42e33ed2355312
|
389ds/389-ds-base
|
Ticket 48445: keep alive entries can break replication
Bug Description:
On the consumer side, at the end of a total update the replica is enabled and the changelog recreated.
When the replica is enabled the keep alive entry (for that replica) is created .
There is a race condition (that look quite systematic in our tests) if the creation of the entry is added to the changelog
before the changelog is recreated.
In that case the ADD is erased from the CL and will never be replicated.
The keep alive entry is created (if it does not already exist) :
- during a total update (as supplier)
- when the keep alive is updated
- when the replica is enabled
Fix Description:
It is not strictly necessary to create the keep alive when the replica is enabled.
So we can skip the creation during that step.
https://fedorahosted.org/389/ticket/48445
Reviewed by: Mark Reynolds (thank you Mark)
Platforms tested: F23
Flag Day: no
Doc impact: no
|
commit 71a891f0dcfd1aafeb3913279d42e33ed2355312
Author: Thierry Bordaz <[email protected]>
Date: Wed Feb 10 15:17:02 2016 +0100
Ticket 48445: keep alive entries can break replication
Bug Description:
On the consumer side, at the end of a total update the replica is enabled and the changelog recreated.
When the replica is enabled the keep alive entry (for that replica) is created .
There is a race condition (that look quite systematic in our tests) if the creation of the entry is added to the changelog
before the changelog is recreated.
In that case the ADD is erased from the CL and will never be replicated.
The keep alive entry is created (if it does not already exist) :
- during a total update (as supplier)
- when the keep alive is updated
- when the replica is enabled
Fix Description:
It is not strictly necessary to create the keep alive when the replica is enabled.
So we can skip the creation during that step.
https://fedorahosted.org/389/ticket/48445
Reviewed by: Mark Reynolds (thank you Mark)
Platforms tested: F23
Flag Day: no
Doc impact: no
diff --git a/ldap/servers/plugins/replication/repl5_replica.c b/ldap/servers/plugins/replication/repl5_replica.c
index 8b53f3c18..31c5f0ff9 100644
--- a/ldap/servers/plugins/replication/repl5_replica.c
+++ b/ldap/servers/plugins/replication/repl5_replica.c
@@ -3972,7 +3972,6 @@ replica_enable_replication (Replica *r)
/* What to do ? */
}
- replica_subentry_check(r->repl_root, replica_get_rid(r));
/* Replica came back online, Check if the total update was terminated.
If flag is still set, it was not terminated, therefore the data is
very likely to be incorrect, and we should not restart Replication threads...
| 0 |
f980ff61da122b39d33bf83bc20f2b7a81b6be24
|
389ds/389-ds-base
|
Bug 606920 - anonymous resource limit- nstimelimit -
also applied to "cn=directory manager"
https://bugzilla.redhat.com/show_bug.cgi?id=606920
Description: Client side sizelimit / timelimit request should
be honoured by the Directory Manager, too. Changing the time/
sizelimit evaluation so that if client side request exists,
the value is set even if the bind user is the directory manager.
|
commit f980ff61da122b39d33bf83bc20f2b7a81b6be24
Author: Noriko Hosoi <[email protected]>
Date: Wed Sep 22 16:28:59 2010 -0700
Bug 606920 - anonymous resource limit- nstimelimit -
also applied to "cn=directory manager"
https://bugzilla.redhat.com/show_bug.cgi?id=606920
Description: Client side sizelimit / timelimit request should
be honoured by the Directory Manager, too. Changing the time/
sizelimit evaluation so that if client side request exists,
the value is set even if the bind user is the directory manager.
diff --git a/ldap/servers/slapd/opshared.c b/ldap/servers/slapd/opshared.c
index 858bc8f75..7aec7910e 100644
--- a/ldap/servers/slapd/opshared.c
+++ b/ldap/servers/slapd/opshared.c
@@ -1388,12 +1388,18 @@ compute_limits (Slapi_PBlock *pb)
}
}
- if ( isroot ) {
- timelimit = max_timelimit = -1; /* no limit */
- } else if ( requested_timelimit == 0 ) {
- timelimit = ( max_timelimit == -1 ) ? -1 : max_timelimit;
- } else if ( max_timelimit == -1 || requested_timelimit < max_timelimit ) {
- timelimit = requested_timelimit;
+ if ( requested_timelimit ) {
+ /* requested limit should be applied to all (including root) */
+ if ( isroot ) {
+ timelimit = requested_timelimit;
+ } else if ( (max_timelimit == -1) ||
+ (requested_timelimit < max_timelimit) ) {
+ timelimit = requested_timelimit;
+ } else {
+ timelimit = max_timelimit;
+ }
+ } else if ( isroot ) {
+ timelimit = -1; /* no limit */
} else {
timelimit = max_timelimit;
}
@@ -1419,12 +1425,18 @@ compute_limits (Slapi_PBlock *pb)
}
}
- if ( isroot ) {
- sizelimit = max_sizelimit = -1;
- } else if ( requested_sizelimit == 0 ) {
- sizelimit = ( max_sizelimit == -1 ) ? -1 : max_sizelimit;
- } else if ( max_sizelimit == -1 || requested_sizelimit < max_sizelimit ) {
- sizelimit = requested_sizelimit;
+ if ( requested_sizelimit ) {
+ /* requested limit should be applied to all (including root) */
+ if ( isroot ) {
+ sizelimit = requested_sizelimit;
+ } else if ( (max_sizelimit == -1) ||
+ (requested_sizelimit < max_sizelimit) ) {
+ sizelimit = requested_sizelimit;
+ } else {
+ sizelimit = max_sizelimit;
+ }
+ } else if ( isroot ) {
+ sizelimit = -1; /* no limit */
} else {
sizelimit = max_sizelimit;
}
| 0 |
b98d694978a0de93995a4186069f40409016bcf9
|
389ds/389-ds-base
|
Ticket 47819 - Add the new precise tombstone purging config attribute
Description: Add REPLICA_PRECISE_PURGING:
'nsds5ReplicaPreciseTombstonePurging'
Reviewed by: rmeggins(Thanks!)
|
commit b98d694978a0de93995a4186069f40409016bcf9
Author: Mark Reynolds <[email protected]>
Date: Mon Jul 7 12:15:02 2014 -0400
Ticket 47819 - Add the new precise tombstone purging config attribute
Description: Add REPLICA_PRECISE_PURGING:
'nsds5ReplicaPreciseTombstonePurging'
Reviewed by: rmeggins(Thanks!)
diff --git a/src/lib389/lib389/properties.py b/src/lib389/lib389/properties.py
index 73bfd296e..84dca3274 100644
--- a/src/lib389/lib389/properties.py
+++ b/src/lib389/lib389/properties.py
@@ -123,6 +123,7 @@ REPLICA_LEGACY_CONS = 'legacy'
REPLICA_BINDDN = 'binddn'
REPLICA_PURGE_INTERVAL = 'purge-interval'
REPLICA_PURGE_DELAY = 'purge-delay'
+REPLICA_PRECISE_PURGING = 'precise-purging'
REPLICA_REFERRAL = 'referral'
REPLICA_FLAGS = 'flags'
@@ -136,6 +137,7 @@ REPLICA_PROPNAME_TO_ATTRNAME = {
REPLICA_BINDDN: 'nsds5replicabinddn',
REPLICA_PURGE_INTERVAL: 'nsds5replicatombstonepurgeinterval',
REPLICA_PURGE_DELAY: 'nsds5ReplicaPurgeDelay',
+ REPLICA_PRECISE_PURGING: 'nsds5ReplicaPreciseTombstonePurging',
REPLICA_REFERRAL: 'nsds5ReplicaReferral',
REPLICA_FLAGS: 'nsds5flags'}
diff --git a/src/lib389/lib389/replica.py b/src/lib389/lib389/replica.py
index 318ba6a81..00d765f2f 100644
--- a/src/lib389/lib389/replica.py
+++ b/src/lib389/lib389/replica.py
@@ -160,7 +160,8 @@ class Replica(object):
REPLICA_LEGACY_CONS
REPLICA_BINDDN
REPLICA_PURGE_INTERVAL
- REPLICA_PURGE_DELAY
+ REPLICA_PURGE_DELAY
+ REPLICA_PRECISE_PURGING
REPLICA_REFERRAL
REPLICA_FLAGS
@@ -253,6 +254,7 @@ class Replica(object):
REPLICA_BINDDN [defaultProperties[REPLICATION_BIND_DN]]
REPLICA_PURGE_INTERVAL
REPLICA_PURGE_DELAY
+ REPLICA_PRECISE_PURGING
REPLICA_REFERRAL
REPLICA_FLAGS
| 0 |
538ff19416eaeb54e04c89202fda30f709127678
|
389ds/389-ds-base
|
fix MEMPOOL_EXPERIMENTAL build
|
commit 538ff19416eaeb54e04c89202fda30f709127678
Author: Rich Megginson <[email protected]>
Date: Mon Jun 15 12:17:11 2015 -0600
fix MEMPOOL_EXPERIMENTAL build
diff --git a/ldap/servers/slapd/libglobs.c b/ldap/servers/slapd/libglobs.c
index ddc6552ba..2499c5738 100644
--- a/ldap/servers/slapd/libglobs.c
+++ b/ldap/servers/slapd/libglobs.c
@@ -1129,7 +1129,7 @@ static struct config_get_and_set {
CONFIG_ON_OFF, (ConfigGetFunc)config_get_enable_nunc_stans, &init_enable_nunc_stans},
#endif
#ifdef MEMPOOL_EXPERIMENTAL
- ,{CONFIG_MEMPOOL_SWITCH_ATTRIBUTE, config_set_mempool_switch,
+ {CONFIG_MEMPOOL_SWITCH_ATTRIBUTE, config_set_mempool_switch,
NULL, 0,
(void**)&global_slapdFrontendConfig.mempool_switch,
CONFIG_ON_OFF, (ConfigGetFunc)config_get_mempool_switch,
| 0 |
22ad5a9a938cf0698f8a2e096ff863670c4d03af
|
389ds/389-ds-base
|
Related: 214238
Summary: Make fallback SASL path work for 64-bit Linux default location.
|
commit 22ad5a9a938cf0698f8a2e096ff863670c4d03af
Author: Nathan Kinder <[email protected]>
Date: Tue Nov 7 04:42:55 2006 +0000
Related: 214238
Summary: Make fallback SASL path work for 64-bit Linux default location.
diff --git a/ldap/servers/slapd/charray.c b/ldap/servers/slapd/charray.c
index 855b24637..327de756e 100644
--- a/ldap/servers/slapd/charray.c
+++ b/ldap/servers/slapd/charray.c
@@ -269,10 +269,21 @@ charray_dup( char **a )
char **
str2charray( char *str, char *brkstr )
+{
+ return( str2charray_ext( str, brkstr, 1 ));
+}
+
+/*
+ * extended version of str2charray lets you disallow
+ * duplicate values into the array.
+ */
+char **
+str2charray_ext( char *str, char *brkstr, int allow_dups )
{
char **res;
char *s;
- int i;
+ int i, j;
+ int dup_found = 0;
char * iter = NULL;
i = 1;
@@ -284,9 +295,22 @@ str2charray( char *str, char *brkstr )
res = (char **) slapi_ch_malloc( (i + 1) * sizeof(char *) );
i = 0;
- for ( s = ldap_utf8strtok_r( str, brkstr , &iter); s != NULL;
+ for ( s = ldap_utf8strtok_r( str, brkstr , &iter); s != NULL;
s = ldap_utf8strtok_r( NULL, brkstr , &iter) ) {
- res[i++] = slapi_ch_strdup( s );
+ dup_found = 0;
+ /* Always copy the first value into the array */
+ if ( (!allow_dups) && (i != 0) ) {
+ /* Check for duplicates */
+ for ( j = 0; j < i; j++ ) {
+ if ( strncmp( res[j], s, strlen( s ) ) == 0 ) {
+ dup_found = 1;
+ break;
+ }
+ }
+ }
+
+ if ( !dup_found )
+ res[i++] = slapi_ch_strdup( s );
}
res[i] = NULL;
diff --git a/ldap/servers/slapd/saslbind.c b/ldap/servers/slapd/saslbind.c
index 1c91fe924..8bd1a19ce 100644
--- a/ldap/servers/slapd/saslbind.c
+++ b/ldap/servers/slapd/saslbind.c
@@ -560,7 +560,7 @@ static int ids_sasl_getpluginpath(sasl_conn_t *conn, const char **path)
char *pluginpath = config_get_saslpath();
if ((!pluginpath) || (*pluginpath == '\0')) {
if (!(pluginpath = getenv("SASL_PATH"))) {
- pluginpath = "/usr/lib/sasl2";
+ pluginpath = "/usr/lib64/sasl2:/usr/lib/sasl2";
}
}
*path = pluginpath;
@@ -744,7 +744,7 @@ char **ids_sasl_listmech(Slapi_PBlock *pb)
LDAPDebug(LDAP_DEBUG_TRACE, "sasl library mechs: %s\n", str, 0, 0);
/* merge into result set */
dupstr = slapi_ch_strdup(str);
- others = str2charray(dupstr, ",");
+ others = str2charray_ext(dupstr, ",", 0 /* don't list duplicate mechanisms */);
charray_merge(&ret, others, 1);
charray_free(others);
slapi_ch_free((void**)&dupstr);
diff --git a/ldap/servers/slapd/slapi-private.h b/ldap/servers/slapd/slapi-private.h
index 7cb6021c8..fdb883d1a 100644
--- a/ldap/servers/slapd/slapi-private.h
+++ b/ldap/servers/slapd/slapi-private.h
@@ -760,6 +760,7 @@ int charray_inlist( char **a, char *s );
int charray_utf8_inlist( char **a, char *s );
char ** charray_dup( char **a );
char ** str2charray( char *str, char *brkstr );
+char ** str2charray_ext( char *str, char *brkstr, int allow_dups );
int charray_remove(char **a,const char *s);
char ** cool_charray_dup( char **a );
void cool_charray_free( char **array );
| 0 |
318a3ce0c721ce217fc166b69e3457ad6ee0fb98
|
389ds/389-ds-base
|
Bump version to 1.4.4.4
|
commit 318a3ce0c721ce217fc166b69e3457ad6ee0fb98
Author: Mark Reynolds <[email protected]>
Date: Wed Jul 8 17:01:34 2020 -0400
Bump version to 1.4.4.4
diff --git a/VERSION.sh b/VERSION.sh
index f84f24b5d..648f6fd1a 100644
--- a/VERSION.sh
+++ b/VERSION.sh
@@ -10,7 +10,7 @@ vendor="389 Project"
# PACKAGE_VERSION is constructed from these
VERSION_MAJOR=1
VERSION_MINOR=4
-VERSION_MAINT=4.3
+VERSION_MAINT=4.4
# NOTE: VERSION_PREREL is automatically set for builds made out of a git tree
VERSION_PREREL=
VERSION_DATE=$(date -u +%Y%m%d)
| 0 |
0d8ca98d16f35452c5dfab07e67582df7b355f46
|
389ds/389-ds-base
|
Fix display names in pass sync to say Fedora
|
commit 0d8ca98d16f35452c5dfab07e67582df7b355f46
Author: David Boreham <[email protected]>
Date: Wed May 4 23:45:07 2005 +0000
Fix display names in pass sync to say Fedora
diff --git a/ldap/synctools/passwordsync/wix/PassSync.wxs b/ldap/synctools/passwordsync/wix/PassSync.wxs
index 36f1ae391..9080e6697 100644
--- a/ldap/synctools/passwordsync/wix/PassSync.wxs
+++ b/ldap/synctools/passwordsync/wix/PassSync.wxs
@@ -36,13 +36,13 @@
All rights reserved.
END COPYRIGHT BLOCK -->
<Wix xmlns='http://schemas.microsoft.com/wix/2003/01/wi'>
- <Product Name='Password Sync' Id='C1842CD5-4659-4E7C-A53C-37083B0CB59F'
+ <Product Name='Fedora Directory Password Sync' Id='C1842CD5-4659-4E7C-A53C-37083B0CB59F'
Language='1033' Codepage='1252'
- Version='1.0.0' Manufacturer='Brandx'>
+ Version='1.0.0' Manufacturer='Fedora'>
<Package Id='????????-????-????-????-????????????' Keywords='Installer'
- Description="Password Synchronization Installer"
- Manufacturer='Brandx'
+ Description="Fedora Directory Password Synchronization Installer"
+ Manufacturer='Fedora'
InstallerVersion='100' Languages='1033' Compressed='yes' SummaryCodepage='1252' />
<Media Id='1' Cabinet='Sample.cab' EmbedCab='yes' DiskPrompt="CD-ROM #1" />
@@ -98,7 +98,7 @@
<Directory Id='ProgramFilesFolder' Name='PFiles'>
- <Directory Id='INSTALLDIR' Name='PassSync' LongName='Password Synchronization'>
+ <Directory Id='INSTALLDIR' Name='PassSync' LongName='Fedora Directory Password Synchronization'>
<Component Id='MainExecutable' Guid='DCEECAA4-83F1-4F22-985B-FDB3C8ABD471'>
<File Id='PassSyncEXE' Name='PassSync.exe' LongName='passsync.exe' DiskId='1'
@@ -133,7 +133,7 @@
</Directory>
<!-- <Directory Id="ProgramMenuFolder" Name="PMenu" LongName="Programs">
- <Directory Id="ProgramMenuDir" Name='PassSync' LongName="Password Synchronization Service" />
+ <Directory Id="ProgramMenuDir" Name='PassSync' LongName="Fedora Directory Password Synchronization Service" />
</Directory>
-->
| 0 |
eb5fd6927601f6dbbf4758eb155acf752d199bc9
|
389ds/389-ds-base
|
[155137] SASL mapping online help not in correct format
The online help file did not inherit the right format.
Replaced the header part with the same one the rest is using.
|
commit eb5fd6927601f6dbbf4758eb155acf752d199bc9
Author: Noriko Hosoi <[email protected]>
Date: Mon Apr 18 16:17:22 2005 +0000
[155137] SASL mapping online help not in correct format
The online help file did not inherit the right format.
Replaced the header part with the same one the rest is using.
diff --git a/ldap/docs/dirhlp/help/configtab_rootnode8.htm b/ldap/docs/dirhlp/help/configtab_rootnode8.htm
index aaf5c6993..ab69053aa 100644
--- a/ldap/docs/dirhlp/help/configtab_rootnode8.htm
+++ b/ldap/docs/dirhlp/help/configtab_rootnode8.htm
@@ -1,4 +1,3 @@
-<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
<html>
<head>
<!--This html file is XHTML complaint, as set forth in the
@@ -7,52 +6,64 @@ Lists work as they do in older versions on HTML and not as
directed in XHTML.
The <a name=" "> tags have targets that use spaces. -->
<meta name="keywords" content="e-commerce, ecommerce, Internet software, e-commerce applications, electronic commerce, ebusiness, e-business, enterprise software, net economy, software, ecommerce solutions, e-commerce services, netscape, marketplace, digital marketplace, Red Hat, Fedora" />
- <meta http-equiv="content-type"
- content="text/html; charset=ISO-8859-1">
- <meta name="templatebase"
- content="Authored in FrameMaker. Converted to HTML in WebWorks Publisher. manual wdt 1.6">
- <meta name="LASTUPDATED" content="04/29/03 15:35:31">
- <title>Directory Server Help: Settings Tab</title>
+<meta http-equiv="content-type"
+content="text/html; charset=ISO-8859-1">
+<meta name="templatebase"
+content="Authored in FrameMaker. Converted to HTML in WebWorks Publisher. manual wdt 1.6">
+<meta name="LASTUPDATED" content="04/29/03 15:35:31">
+<title>Directory Server Help: Settings Tab</title>
<!--The following is a javascript which determines whether the client
is on a Windows machine, or is on another type of operating system. Once
the operating system is determined, either a windows or other operating
system cascading style sheet is used. -->
- <script type="text/JavaScript" src="help_files/sniffer.js">
+<script type="text/JavaScript" src="/manual/en/slapd/help/sniffer.js">
- </script>
+</script>
</head>
-<body text="#000000" link="#006666" vlink="#006666" alink="#333366"
- bgcolor="#ffffff">
+<body text="#000000" link="#006666" vlink="#006666" alink="#333366" bgcolor="#FFFFFF">
+
<!--maincontent defines everything between the body tags -->
-<!--start maincontent--><!--navigationcontent defines the top row of links and the banner --><!--start navigationcontent-->
+<!--start maincontent-->
+
+<!--navigationcontent defines the top row of links and the banner -->
+<!--start navigationcontent-->
+
<table border="0" cellspacing="0" cellpadding="0" width="100%">
- <tbody>
- <tr>
- <td>
- <table border="0" cellspacing="0" cellpadding="0">
- <tbody>
- <tr>
- <td valign="bottom" width="67"> <img alt=""></td>
- <td valign="middle"> <span class="product">
-Directory Server</span> <span class="booktitle">Console Help</span> </td>
- </tr>
- </tbody>
- </table>
- </td>
- </tr>
- <tr>
- <td>
- <hr size="1" noshade="noshade"><span class="navigation">
+<tr>
+<td><table border="0" cellspacing="0" cellpadding="0">
+<tr>
+<td valign="bottom" width="67">
+</td>
+<td valign="middle">
+<span class="product">Directory Server</span>
+<span class="booktitle">Console Help</span>
+</td>
+</tr>
+</table>
+</td>
+</tr>
+
+<tr>
+<td>
+<hr size="1" noshade="noshade" />
+
+<span class="navigation">
<!-- BEGIN DOC CONTROLLER --
-<a style="text-decoration: none; color: rgb(0, 102, 102);" href="/manual/en/slapd/index.htm">
+<a style="text-decoration: none; color:#006666" href="/manual/en/slapd/index.htm">
DocHome
</a>
-- END DOC CONTROLLER -->
- </span> </td>
- </tr>
- </tbody>
+</span>
+
+
+
+
+
+</td>
+</tr>
</table>
+
<!--end navigationcontent-->
<!--bookcontent defines the actual content of the file, sans headers and footers --><!--start bookcontent-->
<blockquote><br>
| 0 |
fbcaf84d590f71f31c8cb107728feb2e180fd3f2
|
389ds/389-ds-base
|
Issue 4866 - CLI - when enabling replication set changelog trimming by
default
Description: Enable changelog trimming by default when enabling
replication.
relates: https://github.com/389ds/389-ds-base/issues/4866
Reviewed by: spichugi(Thanks!)
|
commit fbcaf84d590f71f31c8cb107728feb2e180fd3f2
Author: Mark Reynolds <[email protected]>
Date: Mon Apr 18 12:15:49 2022 -0400
Issue 4866 - CLI - when enabling replication set changelog trimming by
default
Description: Enable changelog trimming by default when enabling
replication.
relates: https://github.com/389ds/389-ds-base/issues/4866
Reviewed by: spichugi(Thanks!)
diff --git a/dirsrvtests/tests/suites/healthcheck/health_repl_test.py b/dirsrvtests/tests/suites/healthcheck/health_repl_test.py
index 9eba8d373..8f9136229 100644
--- a/dirsrvtests/tests/suites/healthcheck/health_repl_test.py
+++ b/dirsrvtests/tests/suites/healthcheck/health_repl_test.py
@@ -66,7 +66,7 @@ def set_changelog_trimming(instance):
inst_changelog = Changelog(instance, suffix=DEFAULT_SUFFIX)
log.info('Set nsslapd-changelogmaxage to 30d')
- inst_changelog.add('nsslapd-changelogmaxage', '30')
+ inst_changelog.set_max_age('30d')
@pytest.mark.ds50873
diff --git a/dirsrvtests/tests/suites/replication/acceptance_test.py b/dirsrvtests/tests/suites/replication/acceptance_test.py
index 0fd65908f..70b563caf 100644
--- a/dirsrvtests/tests/suites/replication/acceptance_test.py
+++ b/dirsrvtests/tests/suites/replication/acceptance_test.py
@@ -15,7 +15,7 @@ from lib389.topologies import topology_m4 as topo_m4
from lib389.topologies import topology_m2 as topo_m2
from . import get_repl_entries
from lib389.idm.user import UserAccount
-from lib389.replica import ReplicationManager
+from lib389.replica import ReplicationManager, Changelog
from lib389._constants import *
pytestmark = pytest.mark.tier0
@@ -644,6 +644,22 @@ def test_csngen_task(topo_m2):
assert m1.searchErrorsLog("_csngen_gen_tester_main")
+def test_default_cl_trimming_enabled(topo_m2):
+ """Check that changelog trimming was enabled by default
+
+ :id: c37b9a28-f961-4867-b8a1-e81edd7f9bf3
+ :setup: Supplier Instance
+ :steps:
+ 1. Check changelog has trimming set up by default
+ :expectedresults:
+ 1. Success
+ """
+
+ # Set up changelog trimming by default
+ cl = Changelog(topo_m2.ms["supplier1"], DEFAULT_SUFFIX)
+ assert cl.get_attr_val_utf8("nsslapd-changelogmaxage") == "7d"
+
+
if __name__ == '__main__':
# Run isolated
# -s for DEBUG mode
diff --git a/src/lib389/lib389/replica.py b/src/lib389/lib389/replica.py
index 923415d4e..652630fa8 100644
--- a/src/lib389/lib389/replica.py
+++ b/src/lib389/lib389/replica.py
@@ -1745,6 +1745,19 @@ class Replicas(DSLdapObjects):
self._childobject = Replica
self._basedn = DN_MAPPING_TREE
+ def create(self, rdn=None, properties=None):
+ replica = super(Replicas, self).create(rdn, properties)
+
+ # Set up changelog trimming by default
+ if properties is not None:
+ for attr, val in properties.items():
+ if attr.lower() == 'nsds5replicaroot':
+ cl = Changelog(self._instance, val[0])
+ cl.set_max_age("7d")
+ break
+
+ return replica
+
def get(self, selector=[], dn=None):
"""Get a child entry (DSLdapObject, Replica, etc.) with dn or selector
using a base DN and objectClasses of our object (DSLdapObjects, Replicas, etc.)
@@ -2448,7 +2461,7 @@ class ReplicationManager(object):
def wait_while_replication_is_progressing(self, from_instance, to_instance, timeout=5):
""" Wait while replication is progressing
used by wait_for_replication to avoid timeout because of
- slow replication (typically when traces have been added)
+ slow replication (typically when traces have been added)
Returns true is repliaction is stalled.
:param from_instance: The instance whos state we we want to check from
| 0 |
b4e585fabf815801aa8e359d9fce240f81e6d7e3
|
389ds/389-ds-base
|
Issue: 48851 - investigate and port TET matching rules filter tests(match)
Investigate and port TET matching rules filter tests(match)
Relates: https://pagure.io/389-ds-base/issue/48851
Author: aborah
Reviewed by: Matus Honek, Simon Pichugin
|
commit b4e585fabf815801aa8e359d9fce240f81e6d7e3
Author: Anuj Borah <[email protected]>
Date: Wed Jun 12 15:58:49 2019 +0530
Issue: 48851 - investigate and port TET matching rules filter tests(match)
Investigate and port TET matching rules filter tests(match)
Relates: https://pagure.io/389-ds-base/issue/48851
Author: aborah
Reviewed by: Matus Honek, Simon Pichugin
diff --git a/dirsrvtests/tests/suites/filter/filter_match_test.py b/dirsrvtests/tests/suites/filter/filter_match_test.py
new file mode 100644
index 000000000..c7b1adabd
--- /dev/null
+++ b/dirsrvtests/tests/suites/filter/filter_match_test.py
@@ -0,0 +1,281 @@
+# --- BEGIN COPYRIGHT BLOCK ---
+# Copyright (C) 2019 Red Hat, Inc.
+# All rights reserved.
+#
+# License: GPL (version 3 or any later version).
+# See LICENSE for details.
+# --- END COPYRIGHT BLOCK ----
+
+
+"""
+Test the matching rules feature .
+"""
+
+import os
+import pytest
+
+from lib389._constants import DEFAULT_SUFFIX
+from lib389.topologies import topology_st
+from lib389.cos import CosTemplates
+from lib389.schema import Schema
+
+import ldap
+
+pytestmark = pytest.mark.tier1
+
+
+ATTR = ["( 2.16.840.1.113730.3.1.999999.0 NAME 'attroctetStringMatch' "
+ "DESC 'for testing matching rules' EQUALITY octetStringMatch "
+ "ORDERING octetStringOrderingMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.40 "
+ "X-ORIGIN 'matching rule tests' )",
+ "( 2.16.840.1.113730.3.1.999999.1 NAME 'attrbitStringMatch' DESC "
+ "'for testing matching rules' EQUALITY bitStringMatch SYNTAX "
+ "1.3.6.1.4.1.1466.115.121.1.6 X-ORIGIN 'matching rule tests' )",
+ "( 2.16.840.1.113730.3.1.999999.2 NAME 'attrcaseExactIA5Match' "
+ "DESC 'for testing matching rules' EQUALITY caseExactIA5Match "
+ "SUBSTR caseExactIA5SubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 "
+ "X-ORIGIN 'matching rule tests' )",
+ "( 2.16.840.1.113730.3.1.999999.3 NAME 'attrcaseExactMatch' DESC "
+ "'for testing matching rules' EQUALITY caseExactMatch ORDERING "
+ "caseExactOrderingMatch SUBSTR caseExactSubstringsMatch SYNTAX "
+ "1.3.6.1.4.1.1466.115.121.1.15 "
+ "X-ORIGIN 'matching rule tests' )",
+ "( 2.16.840.1.113730.3.1.999999.4 NAME 'attrgeneralizedTimeMatch' DESC "
+ "'for testing matching rules' EQUALITY generalizedTimeMatch ORDERING "
+ "generalizedTimeOrderingMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.24 "
+ "X-ORIGIN 'matching rule tests' )",
+ "( 2.16.840.1.113730.3.1.999999.5 NAME 'attrbooleanMatch' DESC "
+ "'for testing matching rules' EQUALITY booleanMatch SYNTAX "
+ "1.3.6.1.4.1.1466.115.121.1.7 X-ORIGIN 'matching rule tests' )",
+ "( 2.16.840.1.113730.3.1.999999.6 NAME 'attrcaseIgnoreIA5Match' DESC "
+ "'for testing matching rules' EQUALITY caseIgnoreIA5Match SUBSTR "
+ "caseIgnoreIA5SubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 "
+ "X-ORIGIN 'matching rule tests' )",
+ "( 2.16.840.1.113730.3.1.999999.7 NAME 'attrcaseIgnoreMatch' DESC "
+ "'for testing matching rules' EQUALITY caseIgnoreMatch ORDERING "
+ "caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch "
+ "SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 X-ORIGIN 'matching rule tests' )",
+ "( 2.16.840.1.113730.3.1.999999.8 NAME 'attrcaseIgnoreListMatch' DESC "
+ "'for testing matching rules' EQUALITY caseIgnoreListMatch SUBSTR "
+ "caseIgnoreListSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.41 "
+ "X-ORIGIN 'matching rule tests' )",
+ "( 2.16.840.1.113730.3.1.999999.9 NAME 'attrobjectIdentifierMatch' DESC "
+ "'for testing matching rules' EQUALITY objectIdentifierMatch SYNTAX "
+ "1.3.6.1.4.1.1466.115.121.1.38 X-ORIGIN 'matching rule tests' )",
+ "( 2.16.840.1.113730.3.1.999999.10 NAME 'attrdistinguishedNameMatch' DESC "
+ "'for testing matching rules' EQUALITY distinguishedNameMatch SYNTAX "
+ "1.3.6.1.4.1.1466.115.121.1.12 X-ORIGIN 'matching rule tests' )",
+ "( 2.16.840.1.113730.3.1.999999.11 NAME 'attrintegerMatch' DESC "
+ "'for testing matching rules' EQUALITY integerMatch ORDERING "
+ "integerOrderingMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 "
+ "X-ORIGIN 'matching rule tests' )",
+ "( 2.16.840.1.113730.3.1.999999.12 NAME 'attruniqueMemberMatch' DESC "
+ "'for testing matching rules' EQUALITY uniqueMemberMatch SYNTAX "
+ "1.3.6.1.4.1.1466.115.121.1.34 X-ORIGIN 'matching rule tests' )",
+ "( 2.16.840.1.113730.3.1.999999.13 NAME 'attrnumericStringMatch' DESC "
+ "'for testing matching rules' EQUALITY numericStringMatch ORDERING "
+ "numericStringOrderingMatch SUBSTR numericStringSubstringsMatch "
+ "SYNTAX 1.3.6.1.4.1.1466.115.121.1.36 X-ORIGIN 'matching rule tests' )",
+ "( 2.16.840.1.113730.3.1.999999.14 NAME 'attrtelephoneNumberMatch' DESC "
+ "'for testing matching rules' EQUALITY telephoneNumberMatch SUBSTR "
+ "telephoneNumberSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.50 "
+ "X-ORIGIN 'matching rule tests' )",
+ "( 2.16.840.1.113730.3.1.999999.15 NAME 'attrdirectoryStringFirstComponentMatch' "
+ "DESC 'for testing matching rules' EQUALITY directoryStringFirstComponentMatch "
+ "SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 X-ORIGIN 'matching rule tests' )",
+ "( 2.16.840.1.113730.3.1.999999.16 NAME 'attrobjectIdentifierFirstComponentMatch' "
+ "DESC 'for testing matching rules' EQUALITY objectIdentifierFirstComponentMatch "
+ "SYNTAX 1.3.6.1.4.1.1466.115.121.1.38 X-ORIGIN 'matching rule tests' )",
+ "( 2.16.840.1.113730.3.1.999999.17 NAME 'attrintegerFirstComponentMatch' "
+ "DESC 'for testing matching rules' EQUALITY integerFirstComponentMatch SYNTAX "
+ "1.3.6.1.4.1.1466.115.121.1.27 X-ORIGIN 'matching rule tests' )"]
+
+TESTED_MATCHING_RULES = ["bitStringMatch", "caseExactIA5Match", "caseExactMatch",
+ "caseExactOrderingMatch", "caseExactSubstringsMatch",
+ "caseExactIA5SubstringsMatch", "generalizedTimeMatch",
+ "generalizedTimeOrderingMatch", "booleanMatch", "caseIgnoreIA5Match",
+ "caseIgnoreIA5SubstringsMatch", "caseIgnoreMatch",
+ "caseIgnoreOrderingMatch", "caseIgnoreSubstringsMatch",
+ "caseIgnoreListMatch", "caseIgnoreListSubstringsMatch",
+ "objectIdentifierMatch", "directoryStringFirstComponentMatch",
+ "objectIdentifierFirstComponentMatch", "distinguishedNameMatch",
+ "integerMatch", "integerOrderingMatch", "integerFirstComponentMatch",
+ "uniqueMemberMatch", "numericStringMatch", "numericStringOrderingMatch",
+ "numericStringSubstringsMatch", "telephoneNumberMatch",
+ "telephoneNumberSubstringsMatch", "octetStringMatch",
+ "octetStringOrderingMatch"]
+
+
+MATCHING_RULES = [('addentrybitStringMatch', 'attrbitStringMatch',
+ ["'0010'B", "'0011'B", "'0100'B", "'0101'B", "'0110'B"],
+ ["'0001'B", "'0001'B", "'0010'B", "'0010'B", "'0011'B",
+ "'0011'B", "'0100'B", "'0100'B", "'0101'B",
+ "'0101'B", "'0110'B", "'0110'B"]),
+ ('addentrycaseExactIA5Match', 'attrcaseExactIA5Match',
+ ['sPrain', 'spRain', 'sprAin', 'spraIn', 'sprain'],
+ ['Sprain', 'Sprain', 'Sprain', 'Sprain', 'SpRain',
+ 'SpRain', 'SprAin', 'SprAin', 'SpraIn', 'SpraIn',
+ 'Sprain', 'Sprain']),
+ ('addentrycaseExactMatch', 'attrcaseExactMatch',
+ ['ÇéliNé Ändrè', 'Çéliné ÄndrÈ', 'Çéliné Ändrè', 'çÉliné Ändrè'],
+ ['ÇélIné Ändrè', 'ÇélIné Ändrè', 'ÇéliNé Ändrè', 'ÇéliNé Ändrè',
+ 'Çéliné ÄndrÈ', 'Çéliné ÄndrÈ', 'Çéliné Ändrè', 'Çéliné Ändrè',
+ 'çÉliné Ändrè', 'çÉliné Ändrè']),
+ ('addentrygeneralizedTimeMatch', 'attrgeneralizedTimeMatch',
+ ['20100218171301Z', '20100218171302Z', '20100218171303Z',
+ '20100218171304Z', '20100218171305Z'],
+ ['20100218171300Z', '20100218171300Z', '20100218171301Z',
+ '20100218171301Z', '20100218171302Z', '20100218171302Z',
+ '20100218171303Z', '20100218171303Z', '20100218171304Z',
+ '20100218171304Z', '20100218171305Z', '20100218171305Z']),
+ ('addentrybooleanMatch', 'attrbooleanMatch',
+ ['FALSE'],
+ ['TRUE', 'TRUE', 'FALSE', 'FALSE']),
+ ('addentrycaseIgnoreIA5Match', 'attrcaseIgnoreIA5Match',
+ ['sprain2', 'sprain3', 'sprain4', 'sprain5', 'sprain6'],
+ ['sprain1', 'sprain1', 'sprain2', 'sprain2', 'sprain3',
+ 'sprain3', 'sprain4', 'sprain4', 'sprain5', 'sprain5',
+ 'sprain6', 'sprain6']),
+ ('addentrycaseIgnoreMatch', 'attrcaseIgnoreMatch',
+ ['ÇélIné Ändrè2', 'ÇélIné Ändrè3', 'ÇélIné Ändrè4',
+ 'ÇélIné Ändrè5', 'ÇélIné Ändrè6'],
+ ['ÇélIné Ändrè1', 'ÇélIné Ändrè1', 'ÇélIné Ändrè2',
+ 'ÇélIné Ändrè2', 'ÇélIné Ändrè3', 'ÇélIné Ändrè3',
+ 'ÇélIné Ändrè4', 'ÇélIné Ändrè4', 'ÇélIné Ändrè5',
+ 'ÇélIné Ändrè5', 'ÇélIné Ändrè6', 'ÇélIné Ändrè6']),
+ ('addentrycaseIgnoreListMatch', 'attrcaseIgnoreListMatch',
+ ['foo2$bar', 'foo3$bar', 'foo4$bar', 'foo5$bar', 'foo6$bar'],
+ ['foo1$bar', 'foo1$bar', 'foo2$bar', 'foo2$bar', 'foo3$bar',
+ 'foo3$bar', 'foo4$bar', 'foo4$bar', 'foo5$bar', 'foo5$bar',
+ 'foo6$bar', 'foo6$bar']),
+ ('addentryobjectIdentifierMatch', 'attrobjectIdentifierMatch',
+ ['1.3.6.1.4.1.1466.115.121.1.24', '1.3.6.1.4.1.1466.115.121.1.26',
+ '1.3.6.1.4.1.1466.115.121.1.40', '1.3.6.1.4.1.1466.115.121.1.41',
+ '1.3.6.1.4.1.1466.115.121.1.6'],
+ ['1.3.6.1.4.1.1466.115.121.1.15', '1.3.6.1.4.1.1466.115.121.1.15',
+ '1.3.6.1.4.1.1466.115.121.1.24', '1.3.6.1.4.1.1466.115.121.1.24',
+ '1.3.6.1.4.1.1466.115.121.1.26', '1.3.6.1.4.1.1466.115.121.1.26',
+ '1.3.6.1.4.1.1466.115.121.1.40', '1.3.6.1.4.1.1466.115.121.1.40',
+ '1.3.6.1.4.1.1466.115.121.1.41', '1.3.6.1.4.1.1466.115.121.1.41',
+ '1.3.6.1.4.1.1466.115.121.1.6', '1.3.6.1.4.1.1466.115.121.1.6']),
+ ('addentrydirectoryStringFirstComponentMatch',
+ 'attrdirectoryStringFirstComponentMatch',
+ ['ÇélIné Ändrè2', 'ÇélIné Ändrè3', 'ÇélIné Ändrè4', 'ÇélIné Ändrè5',
+ 'ÇélIné Ändrè6'],
+ ['ÇélIné Ändrè1', 'ÇélIné Ändrè1', 'ÇélIné Ändrè2', 'ÇélIné Ändrè2',
+ 'ÇélIné Ändrè3', 'ÇélIné Ändrè3', 'ÇélIné Ändrè4', 'ÇélIné Ändrè4',
+ 'ÇélIné Ändrè5', 'ÇélIné Ändrè5', 'ÇélIné Ändrè6', 'ÇélIné Ändrè6']),
+ ('addentryobjectIdentifierFirstComponentMatch',
+ 'attrobjectIdentifierFirstComponentMatch',
+ ['1.3.6.1.4.1.1466.115.121.1.24', '1.3.6.1.4.1.1466.115.121.1.26',
+ '1.3.6.1.4.1.1466.115.121.1.40', '1.3.6.1.4.1.1466.115.121.1.41',
+ '1.3.6.1.4.1.1466.115.121.1.6'],
+ ['1.3.6.1.4.1.1466.115.121.1.15', '1.3.6.1.4.1.1466.115.121.1.15',
+ '1.3.6.1.4.1.1466.115.121.1.24', '1.3.6.1.4.1.1466.115.121.1.24',
+ '1.3.6.1.4.1.1466.115.121.1.26', '1.3.6.1.4.1.1466.115.121.1.26',
+ '1.3.6.1.4.1.1466.115.121.1.40', '1.3.6.1.4.1.1466.115.121.1.40',
+ '1.3.6.1.4.1.1466.115.121.1.41', '1.3.6.1.4.1.1466.115.121.1.41',
+ '1.3.6.1.4.1.1466.115.121.1.6', '1.3.6.1.4.1.1466.115.121.1.6']),
+ ('addentrydistinguishedNameMatch', 'attrdistinguishedNameMatch',
+ ['cn=foo2,cn=bar', 'cn=foo3,cn=bar', 'cn=foo4,cn=bar',
+ 'cn=foo5,cn=bar', 'cn=foo6,cn=bar'],
+ ['cn=foo1,cn=bar', 'cn=foo1,cn=bar', 'cn=foo2,cn=bar',
+ 'cn=foo2,cn=bar', 'cn=foo3,cn=bar', 'cn=foo3,cn=bar',
+ 'cn=foo4,cn=bar', 'cn=foo4,cn=bar', 'cn=foo5,cn=bar',
+ 'cn=foo5,cn=bar', 'cn=foo6,cn=bar', 'cn=foo6,cn=bar']),
+ ('addentryintegerMatch', 'attrintegerMatch',
+ ['-1', '0', '1', '2', '3'],
+ ['-2', '-2', '-1', '-1', '0', '0', '1', '1', '2', '2', '3', '3']),
+ ('addentryintegerFirstComponentMatch', 'attrintegerFirstComponentMatch',
+ ['-1', '0', '1', '2', '3'],
+ ['-2', '-2', '-1', '-1', '0', '0', '1', '1', '2', '2', '3', '3']),
+ ('addentryuniqueMemberMatch', 'attruniqueMemberMatch',
+ ["cn=foo2,cn=bar#'0010'B", "cn=foo3,cn=bar#'0011'B",
+ "cn=foo4,cn=bar#'0100'B", "cn=foo5,cn=bar#'0101'B",
+ "cn=foo6,cn=bar#'0110'B"],
+ ["cn=foo1,cn=bar#'0001'B", "cn=foo1,cn=bar#'0001'B",
+ "cn=foo2,cn=bar#'0010'B", "cn=foo2,cn=bar#'0010'B",
+ "cn=foo3,cn=bar#'0011'B", "cn=foo3,cn=bar#'0011'B",
+ "cn=foo4,cn=bar#'0100'B", "cn=foo4,cn=bar#'0100'B",
+ "cn=foo5,cn=bar#'0101'B", "cn=foo5,cn=bar#'0101'B",
+ "cn=foo6,cn=bar#'0110'B", "cn=foo6,cn=bar#'0110'B"]),
+ ('addentrynumericStringMatch', 'attrnumericStringMatch',
+ ['00002', '00003', '00004', '00005', '00006'],
+ ['00001', '00001', '00002', '00002', '00003', '00003', '00004',
+ '00004', '00005', '00005', '00006', '00006']),
+ ('addentrytelephoneNumberMatch', 'attrtelephoneNumberMatch',
+ ['+1 408 555 5625', '+1 408 555 6201', '+1 408 555 8585',
+ '+1 408 555 9187', '+1 408 555 9423'],
+ ['+1 408 555 4798', '+1 408 555 4798', '+1 408 555 5625',
+ '+1 408 555 5625', '+1 408 555 6201', '+1 408 555 6201',
+ '+1 408 555 8585', '+1 408 555 8585', '+1 408 555 9187',
+ '+1 408 555 9187', '+1 408 555 9423', '+1 408 555 9423']),
+ ('addentryoctetStringMatch', 'attroctetStringMatch',
+ ['AAAAAAAAAAAAAAI=', 'AAAAAAAAAAAAAAM=', 'AAAAAAAAAAAAAAQ=',
+ 'AAAAAAAAAAAAAAU=', 'AAAAAAAAAAAAAAY='],
+ ['AAAAAAAAAAAAAAE=', 'AAAAAAAAAAAAAAE=', 'AAAAAAAAAAAAAAI=',
+ 'AAAAAAAAAAAAAAI=', 'AAAAAAAAAAAAAAM=', 'AAAAAAAAAAAAAAM=',
+ 'AAAAAAAAAAAAAAQ=', 'AAAAAAAAAAAAAAQ=', 'AAAAAAAAAAAAAAU=',
+ 'AAAAAAAAAAAAAAU=', 'AAAAAAAAAAAAAAY=', 'AAAAAAAAAAAAAAY='])]
+
+
+def test_matching_rules(topology_st):
+ """Test matching rules.
+ :id: 8cb6e62a-8cfc-11e9-be9a-8c16451d917b
+ :setup: Standalone
+ :steps:
+ 1. Search for matching rule.
+ 2. Matching rule should be there in schema.
+ :expected results:
+ 1. Pass
+ 2. Pass
+ """
+ matchingrules = Schema(topology_st.standalone).get_matchingrules()
+ assert matchingrules
+ rules = set(matchingrule.names for matchingrule in matchingrules)
+ rules1 = [role[0] for role in rules if len(role) != 0]
+ for rule in TESTED_MATCHING_RULES:
+ assert rule in rules1
+
+
+def test_add_attribute_types(topology_st):
+ """Test add attribute types to schema
+ :id: 84d6dece-8cfc-11e9-89a3-8c16451d917b
+ :setup: Standalone
+ :steps:
+ 1. Add new attribute types to schema.
+ :expected results:
+ 1. Pass
+ """
+ for attribute in ATTR:
+ Schema(topology_st.standalone).add('attributetypes', attribute)
+
+
[email protected]("cn_cn, attr, positive, negative", MATCHING_RULES)
+def test_valid_invalid_attributes(topology_st, cn_cn, attr, positive, negative):
+ """Test valid and invalid values of attributes
+ :id: 7ec19eca-8cfc-11e9-a0df-8c16451d917b
+ :setup: Standalone
+ :steps:
+ 1. Create entry with an attribute that uses that matching rule
+ 2. Delete existing entry
+ 3. Create entry with an attribute that uses that matching rule providing duplicate
+ values that are duplicates according to the equality matching rule.
+ :expected results:
+ 1. Pass
+ 2. Pass
+ 3. Fail
+ """
+ cos = CosTemplates(topology_st.standalone, DEFAULT_SUFFIX)
+ cos.create(properties={'cn': cn_cn,
+ attr: positive})
+ for entry in cos.list():
+ entry.delete()
+ with pytest.raises(ldap.TYPE_OR_VALUE_EXISTS):
+ cos.create(properties={'cn': cn_cn,
+ attr: negative})
+
+
+if __name__ == '__main__':
+ CURRENT_FILE = os.path.realpath(__file__)
+ pytest.main("-s -v %s" % CURRENT_FILE)
| 0 |
c4233ec14a82d1d4c0c71915be40913259d417fa
|
389ds/389-ds-base
|
Ticket #48305 - perl module conditional test is not conditional when checking SELinux policies
Description: commit 9fefc13c02c9ae037fad053152193794706aaa31 introduced
a regression:
Bug 1287547 - 389-ds-base-1.3.4.5-1.fc23.x86_64 leaves empty /NUL around
after ipa-server-install
To check the existence of a character special file "/dev/null", "-c" is
supposed to be used instead of "-f".
Reviewed by [email protected] (Thank you, Mark!!)
https://fedorahosted.org/389/ticket/48305
|
commit c4233ec14a82d1d4c0c71915be40913259d417fa
Author: Noriko Hosoi <[email protected]>
Date: Tue Dec 8 12:30:15 2015 -0800
Ticket #48305 - perl module conditional test is not conditional when checking SELinux policies
Description: commit 9fefc13c02c9ae037fad053152193794706aaa31 introduced
a regression:
Bug 1287547 - 389-ds-base-1.3.4.5-1.fc23.x86_64 leaves empty /NUL around
after ipa-server-install
To check the existence of a character special file "/dev/null", "-c" is
supposed to be used instead of "-f".
Reviewed by [email protected] (Thank you, Mark!!)
https://fedorahosted.org/389/ticket/48305
diff --git a/ldap/admin/src/scripts/DSCreate.pm.in b/ldap/admin/src/scripts/DSCreate.pm.in
index d449b023e..e62ae2c47 100644
--- a/ldap/admin/src/scripts/DSCreate.pm.in
+++ b/ldap/admin/src/scripts/DSCreate.pm.in
@@ -283,7 +283,7 @@ sub createInstanceScripts {
my $skip = shift;
my $perlexec = "@perlexec@" || "/usr/bin/env perl";
my $myperl = "!$perlexec";
- my $mydevnull = (-f "/dev/null" ? " /dev/null " : " NUL ");
+ my $mydevnull = (-c "/dev/null" ? " /dev/null " : " NUL ");
# If we have InstScriptsEnabled, we likely have setup.inf or the argument.
# However, during an upgrade, we need to know if we should upgrade the template files or not.
@@ -997,7 +997,7 @@ sub setDefaults {
sub updateSelinuxPolicy {
my $inf = shift;
- my $mydevnull = (-f "/dev/null" ? " /dev/null " : " NUL ");
+ my $mydevnull = (-c "/dev/null" ? " /dev/null " : " NUL ");
# if selinux is not available, do nothing
if ((getLogin() eq 'root') and "@with_selinux@" and
@@ -1451,7 +1451,7 @@ sub removeDSInstance {
}
# remove the selinux label from the ports if needed
- my $mydevnull = (-f "/dev/null" ? " /dev/null " : " NUL ");
+ my $mydevnull = (-c "/dev/null" ? " /dev/null " : " NUL ");
if ((getLogin() eq 'root') and "@with_selinux@" and
-f "@sbindir@/sestatus" and !system ("@sbindir@/sestatus | egrep -i \"selinux status:\\s*enabled\" > $mydevnull 2>&1")) {
foreach my $port (@{$entry->{"nsslapd-port"}})
| 0 |
9265113fa31adfc13cf2e30d4f362e25ada15582
|
389ds/389-ds-base
|
Bug 750625 - Fix Coverity (11109, 11110, 11111) Uninitialized pointer read
https://bugzilla.redhat.com/show_bug.cgi?id=750625
plugins/replication/cl5_config.c (changelog5_read_config)
Bug Description: Using uninitialized value "config.dir".
changelog config is set with the changelog config entry in
changelog5_read_config. If the search for the config entry
succeeds but there's no entry returned (actually, there is
no such case, though), the config structure is not initialized.
Fix Description: if changelog config entry search is success and
no entry is returned, initialize the config structure with NULLs.
|
commit 9265113fa31adfc13cf2e30d4f362e25ada15582
Author: Noriko Hosoi <[email protected]>
Date: Tue Nov 1 18:12:50 2011 -0700
Bug 750625 - Fix Coverity (11109, 11110, 11111) Uninitialized pointer read
https://bugzilla.redhat.com/show_bug.cgi?id=750625
plugins/replication/cl5_config.c (changelog5_read_config)
Bug Description: Using uninitialized value "config.dir".
changelog config is set with the changelog config entry in
changelog5_read_config. If the search for the config entry
succeeds but there's no entry returned (actually, there is
no such case, though), the config structure is not initialized.
Fix Description: if changelog config entry search is success and
no entry is returned, initialize the config structure with NULLs.
diff --git a/ldap/servers/plugins/replication/cl5_config.c b/ldap/servers/plugins/replication/cl5_config.c
index 09c5ecafc..980cb7fdb 100644
--- a/ldap/servers/plugins/replication/cl5_config.c
+++ b/ldap/servers/plugins/replication/cl5_config.c
@@ -125,29 +125,35 @@ int changelog5_read_config (changelog5Config *config)
int rc = LDAP_SUCCESS;
Slapi_PBlock *pb;
- pb = slapi_pblock_new ();
- slapi_search_internal_set_pb (pb, CONFIG_BASE, LDAP_SCOPE_BASE, CONFIG_FILTER, NULL, 0, NULL,
- NULL, repl_get_plugin_identity (PLUGIN_MULTIMASTER_REPLICATION), 0);
+ pb = slapi_pblock_new ();
+ slapi_search_internal_set_pb (pb, CONFIG_BASE, LDAP_SCOPE_BASE,
+ CONFIG_FILTER, NULL, 0, NULL, NULL,
+ repl_get_plugin_identity (PLUGIN_MULTIMASTER_REPLICATION), 0);
slapi_search_internal_pb (pb);
- slapi_pblock_get( pb, SLAPI_PLUGIN_INTOP_RESULT, &rc );
- if ( LDAP_SUCCESS == rc )
+ slapi_pblock_get( pb, SLAPI_PLUGIN_INTOP_RESULT, &rc );
+ if ( LDAP_SUCCESS == rc )
{
- Slapi_Entry **entries = NULL;
- slapi_pblock_get( pb, SLAPI_PLUGIN_INTOP_SEARCH_ENTRIES, &entries );
- if ( NULL != entries && NULL != entries[0])
+ Slapi_Entry **entries = NULL;
+ slapi_pblock_get( pb, SLAPI_PLUGIN_INTOP_SEARCH_ENTRIES, &entries );
+ if ( NULL != entries && NULL != entries[0])
{
- /* Extract the config info from the changelog entry */
+ /* Extract the config info from the changelog entry */
changelog5_extract_config(entries[0], config);
- }
- }
+ }
+ else
+ {
+ memset (config, 0, sizeof (*config));
+ rc = LDAP_SUCCESS;
+ }
+ }
else
{
memset (config, 0, sizeof (*config));
- rc = LDAP_SUCCESS;
+ rc = LDAP_SUCCESS;
}
- slapi_free_search_results_internal(pb);
- slapi_pblock_destroy(pb);
+ slapi_free_search_results_internal(pb);
+ slapi_pblock_destroy(pb);
return rc;
}
| 0 |
791b7e016853125fc759ee750959e6ce121533c0
|
389ds/389-ds-base
|
New net-snmp subagent
|
commit 791b7e016853125fc759ee750959e6ce121533c0
Author: Nathan Kinder <[email protected]>
Date: Fri Feb 25 22:47:13 2005 +0000
New net-snmp subagent
diff --git a/ldap/servers/slapd/agtmmap.c b/ldap/servers/slapd/agtmmap.c
index 65fbbe4b5..735366542 100644
--- a/ldap/servers/slapd/agtmmap.c
+++ b/ldap/servers/slapd/agtmmap.c
@@ -28,6 +28,7 @@
#include <stdio.h>
#include <stdlib.h>
+#include <string.h>
#include <sys/stat.h>
#ifndef _WIN32
@@ -328,3 +329,69 @@ agt_mclose_stats (int hdl)
return EINVAL;
} /* agt_mclose_stats () */
+
+
+int
+agt_mread_stats (int hdl, struct hdr_stats_t *pHdrInfo, struct ops_stats_t *pDsOpsTbl,
+ struct entries_stats_t *pDsEntTbl) {
+ struct agt_stats_t *pfile_stats;
+
+ if ( (hdl > 1) || (hdl < 0) ) {
+ return (EINVAL);
+ }
+
+ if ((mmap_tbl [hdl].maptype != AGT_MAP_READ) && (mmap_tbl [hdl].maptype != AGT_MAP_RDWR)) {
+ return (EINVAL); /* Inavlid handle */
+ }
+
+ if (mmap_tbl [hdl].fp <= (caddr_t) 0) {
+ return (EFAULT); /* Something got corrupted */
+ }
+
+ pfile_stats = (struct agt_stats_t *) (mmap_tbl [hdl].fp);
+
+ if (pHdrInfo != NULL) {
+ /* Header */
+ pHdrInfo->restarted = pfile_stats->hdr_stats.restarted;
+ pHdrInfo->startTime = pfile_stats->hdr_stats.startTime;
+ pHdrInfo->updateTime = pfile_stats->hdr_stats.updateTime;
+ strncpy(pHdrInfo->dsVersion, pfile_stats->hdr_stats.dsVersion,
+ (sizeof(pHdrInfo->dsVersion)/sizeof(char)) - 1);
+ }
+
+ if (pDsOpsTbl != NULL) {
+ /* Ops Table */
+ pDsOpsTbl->dsAnonymousBinds = pfile_stats->ops_stats.dsAnonymousBinds;
+ pDsOpsTbl->dsUnAuthBinds = pfile_stats->ops_stats.dsUnAuthBinds;
+ pDsOpsTbl->dsSimpleAuthBinds = pfile_stats->ops_stats.dsSimpleAuthBinds;
+ pDsOpsTbl->dsStrongAuthBinds = pfile_stats->ops_stats.dsStrongAuthBinds;
+ pDsOpsTbl->dsBindSecurityErrors = pfile_stats->ops_stats.dsBindSecurityErrors;
+ pDsOpsTbl->dsInOps = pfile_stats->ops_stats.dsInOps;
+ pDsOpsTbl->dsReadOps = pfile_stats->ops_stats.dsReadOps;
+ pDsOpsTbl->dsCompareOps = pfile_stats->ops_stats.dsCompareOps;
+ pDsOpsTbl->dsAddEntryOps = pfile_stats->ops_stats.dsAddEntryOps;
+ pDsOpsTbl->dsRemoveEntryOps = pfile_stats->ops_stats.dsRemoveEntryOps;
+ pDsOpsTbl->dsModifyEntryOps = pfile_stats->ops_stats.dsModifyEntryOps;
+ pDsOpsTbl->dsModifyRDNOps = pfile_stats->ops_stats.dsModifyRDNOps;
+ pDsOpsTbl->dsListOps = pfile_stats->ops_stats.dsListOps;
+ pDsOpsTbl->dsSearchOps = pfile_stats->ops_stats.dsSearchOps;
+ pDsOpsTbl->dsOneLevelSearchOps = pfile_stats->ops_stats.dsOneLevelSearchOps;
+ pDsOpsTbl->dsWholeSubtreeSearchOps = pfile_stats->ops_stats.dsWholeSubtreeSearchOps;
+ pDsOpsTbl->dsReferrals = pfile_stats->ops_stats.dsReferrals;
+ pDsOpsTbl->dsChainings = pfile_stats->ops_stats.dsChainings;
+ pDsOpsTbl->dsSecurityErrors = pfile_stats->ops_stats.dsSecurityErrors;
+ pDsOpsTbl->dsErrors = pfile_stats->ops_stats.dsErrors;
+ }
+
+ if (pDsEntTbl != NULL) {
+ /* Entries Table */
+ pDsEntTbl->dsMasterEntries = pfile_stats->entries_stats.dsMasterEntries;
+ pDsEntTbl->dsCopyEntries = pfile_stats->entries_stats.dsCopyEntries;
+ pDsEntTbl->dsCacheEntries = pfile_stats->entries_stats.dsCacheEntries;
+ pDsEntTbl->dsCacheHits = pfile_stats->entries_stats.dsCacheHits;
+ pDsEntTbl->dsSlaveHits = pfile_stats->entries_stats.dsSlaveHits;
+ }
+
+ return (0);
+}
+
diff --git a/ldap/servers/slapd/agtmmap.h b/ldap/servers/slapd/agtmmap.h
index 1a9cd6b72..b7c3a205d 100644
--- a/ldap/servers/slapd/agtmmap.h
+++ b/ldap/servers/slapd/agtmmap.h
@@ -21,6 +21,7 @@
#include <sys/stat.h>
#include <fcntl.h>
#include <errno.h>
+#include "nspr.h"
#ifdef _WIN32
#include <windows.h>
#define caddr_t PCHAR
@@ -67,83 +68,78 @@ struct hdr_stats_t{
/*
* Header
*/
- int hdrVersionMjr;
- int hdrVersionMnr;
int restarted; /* 1/0 = Yes/No */
time_t startTime;
time_t updateTime;
-
+ char dsVersion[100];
};
struct ops_stats_t{
/*
* Ops Table attributes
*/
- int dsAnonymousBinds;
- int dsUnAuthBinds;
- int dsSimpleAuthBinds;
- int dsStrongAuthBinds;
- int dsBindSecurityErrors;
- int dsInOps;
- int dsReadOps;
- int dsCompareOps;
- int dsAddEntryOps;
- int dsRemoveEntryOps;
- int dsModifyEntryOps;
- int dsModifyRDNOps;
- int dsListOps;
- int dsSearchOps;
- int dsOneLevelSearchOps;
- int dsWholeSubtreeSearchOps;
- int dsReferrals;
- int dsChainings;
- int dsSecurityErrors;
- int dsErrors;
- int dsConnections; /* Number of currently connected clients */
- int dsConnectionSeq; /* Monotonically increasing number bumped on each new conn est */
- int dsBytesRecv; /* Count of bytes read from clients */
- int dsBytesSent; /* Count of bytes sent to clients */
- int dsEntriesReturned; /* Number of entries returned by the server */
- int dsReferralsReturned; /* Number of entries returned by the server */
+ PRUint32 dsAnonymousBinds;
+ PRUint32 dsUnAuthBinds;
+ PRUint32 dsSimpleAuthBinds;
+ PRUint32 dsStrongAuthBinds;
+ PRUint32 dsBindSecurityErrors;
+ PRUint32 dsInOps;
+ PRUint32 dsReadOps;
+ PRUint32 dsCompareOps;
+ PRUint32 dsAddEntryOps;
+ PRUint32 dsRemoveEntryOps;
+ PRUint32 dsModifyEntryOps;
+ PRUint32 dsModifyRDNOps;
+ PRUint32 dsListOps;
+ PRUint32 dsSearchOps;
+ PRUint32 dsOneLevelSearchOps;
+ PRUint32 dsWholeSubtreeSearchOps;
+ PRUint32 dsReferrals;
+ PRUint32 dsChainings;
+ PRUint32 dsSecurityErrors;
+ PRUint32 dsErrors;
+ PRUint32 dsConnections; /* Number of currently connected clients */
+ PRUint32 dsConnectionSeq; /* Monotonically increasing number bumped on each new conn est */
+ PRUint32 dsBytesRecv; /* Count of bytes read from clients */
+ PRUint32 dsBytesSent; /* Count of bytes sent to clients */
+ PRUint32 dsEntriesReturned; /* Number of entries returned by the server */
+ PRUint32 dsReferralsReturned; /* Number of entries returned by the server */
};
struct entries_stats_t
{
- /*
- * Entries Table Attributes
- */
-
- int dsMasterEntries;
- int dsCopyEntries;
- int dsCacheEntries;
- int dsCacheHits;
- int dsSlaveHits;
-
+ /*
+ * Entries Table Attributes
+ */
+ PRUint32 dsMasterEntries;
+ PRUint32 dsCopyEntries;
+ PRUint32 dsCacheEntries;
+ PRUint32 dsCacheHits;
+ PRUint32 dsSlaveHits;
};
+
struct int_stats_t
{
- /*
- * Interaction Table Attributes
- */
-
- int dsIntIndex;
- char dsName[100];
- time_t dsTimeOfCreation;
- time_t dsTimeOfLastAttempt;
- time_t dsTimeOfLastSuccess;
- int dsFailuresSinceLastSuccess;
- int dsFailures;
- int dsSuccesses;
- char dsURL[100];
-
+ /*
+ * Interaction Table Attributes
+ */
+ PRUint32 dsIntIndex;
+ char dsName[100];
+ time_t dsTimeOfCreation;
+ time_t dsTimeOfLastAttempt;
+ time_t dsTimeOfLastSuccess;
+ PRUint32 dsFailuresSinceLastSuccess;
+ PRUint32 dsFailures;
+ PRUint32 dsSuccesses;
+ char dsURL[100];
};
+
struct agt_stats_t
{
struct hdr_stats_t hdr_stats;
struct ops_stats_t ops_stats;
struct entries_stats_t entries_stats;
struct int_stats_t int_stats[NUM_SNMP_INT_TBL_ROWS];
-
} ;
extern agt_mmap_context_t mmap_tbl[];
@@ -186,6 +182,9 @@ int agt_mopen_stats (char * statsfile, int mode, int *hdl);
****************************************************************************/
int agt_mclose_stats (int hdl);
+int agt_mread_stats(int hdl, struct hdr_stats_t *, struct ops_stats_t *,
+ struct entries_stats_t *);
+
#ifdef __cplusplus
}
#endif
diff --git a/ldap/servers/slapd/slap.h b/ldap/servers/slapd/slap.h
index 0172384b2..3569a198a 100644
--- a/ldap/servers/slapd/slap.h
+++ b/ldap/servers/slapd/slap.h
@@ -1437,54 +1437,54 @@ LDAPMod** entry2mods(Slapi_Entry *, LDAPMod **, int *, int);
/* SNMP Variables */
struct snmp_ops_tbl_t{
- PRInt32 *dsAnonymousBinds;
- PRInt32 *dsUnAuthBinds;
- PRInt32 *dsSimpleAuthBinds;
- PRInt32 *dsStrongAuthBinds;
- PRInt32 *dsBindSecurityErrors;
- PRInt32 *dsInOps;
- PRInt32 *dsReadOps;
- PRInt32 *dsCompareOps;
- PRInt32 *dsAddEntryOps;
- PRInt32 *dsRemoveEntryOps;
- PRInt32 *dsModifyEntryOps;
- PRInt32 *dsModifyRDNOps;
- PRInt32 *dsListOps;
- PRInt32 *dsSearchOps;
- PRInt32 *dsOneLevelSearchOps;
- PRInt32 *dsWholeSubtreeSearchOps;
- PRInt32 *dsReferrals;
- PRInt32 *dsChainings;
- PRInt32 *dsSecurityErrors;
- PRInt32 *dsErrors;
- PRInt32 *dsConnections; /* Number of currently connected clients */
- PRInt32 *dsConnectionSeq; /* Monotonically increasing number bumped on each new conn est */
- PRInt32 *dsBytesRecv; /* Count of bytes read from clients */
- PRInt32 *dsBytesSent; /* Count of bytes sent to clients */
- PRInt32 *dsEntriesReturned;
- PRInt32 *dsReferralsReturned;
+ PRUint32 *dsAnonymousBinds;
+ PRUint32 *dsUnAuthBinds;
+ PRUint32 *dsSimpleAuthBinds;
+ PRUint32 *dsStrongAuthBinds;
+ PRUint32 *dsBindSecurityErrors;
+ PRUint32 *dsInOps;
+ PRUint32 *dsReadOps;
+ PRUint32 *dsCompareOps;
+ PRUint32 *dsAddEntryOps;
+ PRUint32 *dsRemoveEntryOps;
+ PRUint32 *dsModifyEntryOps;
+ PRUint32 *dsModifyRDNOps;
+ PRUint32 *dsListOps;
+ PRUint32 *dsSearchOps;
+ PRUint32 *dsOneLevelSearchOps;
+ PRUint32 *dsWholeSubtreeSearchOps;
+ PRUint32 *dsReferrals;
+ PRUint32 *dsChainings;
+ PRUint32 *dsSecurityErrors;
+ PRUint32 *dsErrors;
+ PRUint32 *dsConnections; /* Number of currently connected clients */
+ PRUint32 *dsConnectionSeq; /* Monotonically increasing number bumped on each new conn est */
+ PRUint32 *dsBytesRecv; /* Count of bytes read from clients */
+ PRUint32 *dsBytesSent; /* Count of bytes sent to clients */
+ PRUint32 *dsEntriesReturned;
+ PRUint32 *dsReferralsReturned;
};
struct snmp_entries_tbl_t{
/* entries table */
- PRInt32 *dsMasterEntries;
- PRInt32 *dsCopyEntries;
- PRInt32 *dsCacheEntries;
- PRInt32 *dsCacheHits;
- PRInt32 *dsSlaveHits;
+ PRUint32 *dsMasterEntries;
+ PRUint32 *dsCopyEntries;
+ PRUint32 *dsCacheEntries;
+ PRUint32 *dsCacheHits;
+ PRUint32 *dsSlaveHits;
};
struct snmp_int_tbl_t{
/* interaction table */
- PRInt32 *dsIntIndex;
+ PRUint32 *dsIntIndex;
char *dsName;
time_t *dsTimeOfCreation;
time_t *dsTimeOfLastAttempt;
time_t *dsTimeOfLastSuccess;
- PRInt32 *dsFailuresSinceLastSuccess;
- PRInt32 *dsFailures;
- PRInt32 *dsSuccesses;
+ PRUint32 *dsFailuresSinceLastSuccess;
+ PRUint32 *dsFailures;
+ PRUint32 *dsSuccesses;
char *dsURL;
};
diff --git a/ldap/servers/slapd/snmp_collator.c b/ldap/servers/slapd/snmp_collator.c
index 320c7fe9c..75e033ed8 100644
--- a/ldap/servers/slapd/snmp_collator.c
+++ b/ldap/servers/slapd/snmp_collator.c
@@ -76,14 +76,14 @@ int snmp_collator_init(){
* Initialize the mmap structure
*/
memset((void *) stats, 0, sizeof(*stats));
- stats->hdr_stats.hdrVersionMjr = AGT_MJR_VERSION;
- stats->hdr_stats.hdrVersionMnr = AGT_MNR_VERSION;
+ strncpy(stats->hdr_stats.dsVersion, SLAPD_VERSION_STR,
+ (sizeof(stats->hdr_stats.dsVersion)/sizeof(char)) - 1);
stats->hdr_stats.restarted = 0;
stats->hdr_stats.startTime = time(0); /* This is a bit off, hope it's ok */
/* point these at the mmaped data */
g_get_global_snmp_vars()->ops_tbl.dsAnonymousBinds = &(stats->ops_stats.dsAnonymousBinds);
- g_get_global_snmp_vars()->ops_tbl.dsUnAuthBinds = &(stats->ops_stats.dsUnAuthBinds);
+ g_get_global_snmp_vars()->ops_tbl.dsUnAuthBinds = &(stats->ops_stats.dsUnAuthBinds);
g_get_global_snmp_vars()->ops_tbl.dsSimpleAuthBinds = &(stats->ops_stats.dsSimpleAuthBinds);
g_get_global_snmp_vars()->ops_tbl.dsStrongAuthBinds = &(stats->ops_stats.dsStrongAuthBinds);
g_get_global_snmp_vars()->ops_tbl.dsBindSecurityErrors = &(stats->ops_stats.dsBindSecurityErrors);
diff --git a/ldap/servers/snmp/ldap-agent.c b/ldap/servers/snmp/ldap-agent.c
new file mode 100644
index 000000000..a8060e8c5
--- /dev/null
+++ b/ldap/servers/snmp/ldap-agent.c
@@ -0,0 +1,678 @@
+#include <stdio.h>
+#include <time.h>
+#include <net-snmp/net-snmp-config.h>
+#include <net-snmp/net-snmp-includes.h>
+#include <net-snmp/agent/net-snmp-agent-includes.h>
+#include <net-snmp/library/snmp_assert.h>
+
+#include "ldap-agent.h"
+
+static netsnmp_handler_registration *ops_handler = NULL;
+static netsnmp_handler_registration *entries_handler = NULL;
+static netsnmp_handler_registration *entity_handler = NULL;
+static netsnmp_table_array_callbacks ops_cb;
+static netsnmp_table_array_callbacks entries_cb;
+static netsnmp_table_array_callbacks entity_cb;
+extern server_instance *server_head;
+
+/* Set table oids */
+oid dsOpsTable_oid[] = { dsOpsTable_TABLE_OID };
+size_t dsOpsTable_oid_len = OID_LENGTH(dsOpsTable_oid);
+oid dsEntriesTable_oid[] = { dsEntriesTable_TABLE_OID };
+size_t dsEntriesTable_oid_len = OID_LENGTH(dsEntriesTable_oid);
+oid dsEntityTable_oid[] = {dsEntityTable_TABLE_OID };
+size_t dsEntityTable_oid_len = OID_LENGTH(dsEntityTable_oid);
+
+/* Set trap oids */
+oid snmptrap_oid[] = { snmptrap_OID };
+size_t snmptrap_oid_len = OID_LENGTH(snmptrap_oid);
+oid enterprise_oid[] = { enterprise_OID };
+size_t enterprise_oid_len = OID_LENGTH(enterprise_oid);
+
+/************************************************************
+ * init_ldap_agent
+ *
+ * Initializes the agent and populates the stats table
+ * with initial data.
+ */
+void
+init_ldap_agent(void)
+{
+ server_instance *serv_p = NULL;
+ stats_table_context *new_row = NULL;
+ int err;
+ int stats_hdl = -1;
+
+ /* Define and create the table */
+ initialize_stats_table();
+
+ /* Initialize data for each server in conf file */
+ for (serv_p = server_head; serv_p != NULL; serv_p = serv_p->next) {
+ /* Check if this row already exists. */
+ if ((new_row = stats_table_find_row(serv_p->port)) == NULL) {
+ /* Create a new row */
+ if ((new_row = stats_table_create_row(serv_p->port)) != NULL) {
+ /* Set pointer for entity table */
+ new_row->entity_tbl = serv_p;
+
+ /* Set previous state of server to unknown */
+ serv_p->server_state = STATE_UNKNOWN;
+
+ /* Insert new row into the table */
+ snmp_log(LOG_DEBUG, "Inserting row for server: %d\n", serv_p->port);
+ CONTAINER_INSERT(ops_cb.container, new_row);
+ } else {
+ /* error during malloc of row */
+ snmp_log(LOG_ERR, "Error creating row for server: %d\n",
+ serv_p->port);
+ }
+ }
+ }
+
+ /* Force load data into stats table */
+ load_stats_table(NULL, NULL);
+}
+
+/************************************************************
+ * initialize_stats_table
+ *
+ * Initializes the stats table by defining its contents,
+ * how it's structured, and registering callbacks.
+ */
+void
+initialize_stats_table(void)
+{
+ netsnmp_table_registration_info *ops_table_info = NULL;
+ netsnmp_table_registration_info *entries_table_info = NULL;
+ netsnmp_table_registration_info *entity_table_info = NULL;
+ netsnmp_cache *stats_table_cache = NULL;
+
+ if (ops_handler || entries_handler || entity_handler) {
+ snmp_log(LOG_ERR, "initialize_stats_table called more than once.\n");
+ return;
+ }
+
+ memset(&ops_cb, 0x00, sizeof(ops_cb));
+ memset(&entries_cb, 0x00, sizeof(entries_cb));
+ memset(&entity_cb, 0x00, sizeof(entity_cb));
+
+ /* create table structures */
+ ops_table_info = SNMP_MALLOC_TYPEDEF(netsnmp_table_registration_info);
+ entries_table_info = SNMP_MALLOC_TYPEDEF(netsnmp_table_registration_info);
+ entity_table_info = SNMP_MALLOC_TYPEDEF(netsnmp_table_registration_info);
+
+ /* create handlers */
+ ops_handler = netsnmp_create_handler_registration("dsOpsTable",
+ netsnmp_table_array_helper_handler,
+ dsOpsTable_oid,
+ dsOpsTable_oid_len,
+ HANDLER_CAN_RONLY);
+ entries_handler = netsnmp_create_handler_registration("dsEntriesTable",
+ netsnmp_table_array_helper_handler,
+ dsEntriesTable_oid,
+ dsEntriesTable_oid_len,
+ HANDLER_CAN_RONLY);
+ entity_handler = netsnmp_create_handler_registration("dsEntityTable",
+ netsnmp_table_array_helper_handler,
+ dsEntityTable_oid,
+ dsEntityTable_oid_len,
+ HANDLER_CAN_RONLY);
+
+ if (!ops_handler || !entries_handler || !entity_handler ||
+ !ops_table_info || !entries_table_info || !entity_table_info) {
+ /* malloc failed */
+ snmp_log(LOG_ERR, "malloc failed in initialize_stats_table\n");
+ return;
+ }
+
+ /* define table structures */
+ netsnmp_table_helper_add_index(ops_table_info, ASN_INTEGER);
+ netsnmp_table_helper_add_index(entries_table_info, ASN_INTEGER);
+ netsnmp_table_helper_add_index(entity_table_info, ASN_INTEGER);
+
+ ops_table_info->min_column = dsOpsTable_COL_MIN;
+ ops_table_info->max_column = dsOpsTable_COL_MAX;
+ entries_table_info->min_column = dsEntriesTable_COL_MIN;
+ entries_table_info->max_column = dsEntriesTable_COL_MAX;
+ entity_table_info->min_column = dsEntityTable_COL_MIN;
+ entity_table_info->max_column = dsEntityTable_COL_MAX;
+
+ /*
+ * Define callbacks and the container. We only use one container that
+ * all of the tables use.
+ */
+ ops_cb.get_value = dsOpsTable_get_value;
+ ops_cb.container = netsnmp_container_find("dsOpsTable_primary:"
+ "dsOpsTable:" "table_container");
+ entries_cb.get_value = dsEntriesTable_get_value;
+ entries_cb.container = ops_cb.container;
+ entity_cb.get_value = dsEntityTable_get_value;
+ entity_cb.container = ops_cb.container;
+
+ /* registering the tables with the master agent */
+ netsnmp_table_container_register(ops_handler, ops_table_info, &ops_cb,
+ ops_cb.container, 1);
+ netsnmp_table_container_register(entries_handler, entries_table_info, &entries_cb,
+ entries_cb.container, 1);
+ netsnmp_table_container_register(entity_handler, entity_table_info, &entity_cb,
+ entity_cb.container, 1);
+
+ /* Setup cache for auto reloading of stats */
+ stats_table_cache = netsnmp_cache_create(CACHE_REFRESH_INTERVAL, load_stats_table,
+ NULL, dsOpsTable_oid, dsOpsTable_oid_len);
+ stats_table_cache->flags |= NETSNMP_CACHE_DONT_FREE_EXPIRED;
+ stats_table_cache->flags |= NETSNMP_CACHE_DONT_AUTO_RELEASE;
+ stats_table_cache->flags |= NETSNMP_CACHE_AUTO_RELOAD;
+ netsnmp_inject_handler(ops_handler, netsnmp_cache_handler_get(stats_table_cache));
+}
+
+/************************************************************
+ * stats_table_create_row
+ *
+ * Creates a new table row using the supplied port number as
+ * the index, then returns a pointer to the new row.
+ */
+stats_table_context *
+stats_table_create_row(unsigned long portnum)
+{
+ netsnmp_index index;
+ stats_table_context *ctx = SNMP_MALLOC_TYPEDEF(stats_table_context);
+ oid *index_oid = (oid *)malloc(sizeof(oid) * MAX_OID_LEN);
+
+ /* Create index using port number */
+ index_oid[0] = portnum;
+ index.oids = index_oid;
+ index.len = 1;
+
+ /* Copy index into row structure */
+ if (ctx && index_oid) {
+ memcpy(&ctx->index, &index, sizeof(index));
+ return ctx;
+ } else {
+ /* Error during malloc */
+ snmp_log(LOG_ERR, "malloc failed in stats_table_create_row\n");
+ return NULL;
+ }
+}
+
+/************************************************************
+ * stats_table_find_row
+ *
+ * Searches for a row by the port number. Returns NULL if
+ * the row doesn't exist.
+ */
+stats_table_context *
+stats_table_find_row(unsigned long portnum)
+{
+ netsnmp_index index;
+ oid index_oid[MAX_OID_LEN];
+
+ index_oid[0] = portnum;
+ index.oids = index_oid;
+ index.len = 1;
+
+ return (stats_table_context *)
+ CONTAINER_FIND(ops_cb.container, &index);
+}
+
+/************************************************************
+ * load_stats_table
+ *
+ * Reloads the stats into the table. This is called
+ * automatically from the cache handler. This function
+ * does not reload the entity table since it's static
+ * information. We also check if any traps need to
+ * be sent here.
+ */
+int
+load_stats_table(netsnmp_cache *cache, void *foo)
+{
+ server_instance *serv_p = NULL;
+ stats_table_context *ctx = NULL;
+ netsnmp_variable_list *vars = NULL;
+ time_t previous_start;
+ int previous_state;
+ int stats_hdl = -1;
+ int err;
+
+ snmp_log(LOG_DEBUG, "Reloading stats.\n");
+
+ /* Initialize data for each server in conf file */
+ for (serv_p = server_head; serv_p != NULL; serv_p = serv_p->next) {
+ if ((ctx = stats_table_find_row(serv_p->port)) != NULL) {
+ /* Save previous state of the server to
+ * see if a trap needs to be sent */
+ previous_state = serv_p->server_state;
+ previous_start = ctx->hdr_tbl.startTime;
+
+ snmp_log(LOG_DEBUG, "Opening stats file (%s) for server: %d\n",
+ serv_p->stats_file, serv_p->port);
+
+ /* Open the stats file */
+ if ((err = agt_mopen_stats(serv_p->stats_file, O_RDONLY, &stats_hdl)) != 0) {
+ /* Server must be down */
+ serv_p->server_state = SERVER_DOWN;
+ /* Zero out the ops and entries tables */
+ memset(&ctx->ops_tbl, 0x00, sizeof(ctx->ops_tbl));
+ memset(&ctx->entries_tbl, 0x00, sizeof(ctx->entries_tbl));
+ if (previous_state != SERVER_DOWN)
+ snmp_log(LOG_INFO, "Unable to open stats file (%s) for server: %d\n",
+ serv_p->stats_file, serv_p->port);
+ } else {
+ /* Initialize ops table */
+ if ((err = agt_mread_stats(stats_hdl, &ctx->hdr_tbl, &ctx->ops_tbl,
+ &ctx->entries_tbl)) != 0)
+ snmp_log(LOG_ERR, "Unable to read stats file: %s\n",
+ serv_p->stats_file);
+
+ /* Close stats file */
+ if ((err = agt_mclose_stats(stats_hdl)) != 0)
+ snmp_log(LOG_ERR, "Error closing stats file: %s\n",
+ serv_p->stats_file);
+
+ /* Server must be down if the stats file hasn't been
+ * updated in a while */
+ if (difftime(time(NULL), ctx->hdr_tbl.updateTime) >= UPDATE_THRESHOLD) {
+ serv_p->server_state = SERVER_DOWN;
+ if (previous_state != SERVER_DOWN)
+ snmp_log(LOG_INFO, "Stats file for server %d hasn't been updated"
+ " in %d seconds.\n", serv_p->port, UPDATE_THRESHOLD);
+ } else {
+ serv_p->server_state = SERVER_UP;
+ }
+ }
+
+ /* If the state of the server changed since the last
+ * load of the stats, send a trap. */
+ if (previous_state != STATE_UNKNOWN) {
+ if (serv_p->server_state != previous_state) {
+ if (serv_p->server_state == SERVER_UP) {
+ snmp_log(LOG_INFO, "Detected start of server: %d\n",
+ serv_p->port);
+ send_nsDirectoryServerStart_trap(serv_p);
+ } else {
+ send_nsDirectoryServerDown_trap(serv_p);
+ /* Zero out the ops and entries tables */
+ memset(&ctx->ops_tbl, 0x00, sizeof(ctx->ops_tbl));
+ memset(&ctx->entries_tbl, 0x00, sizeof(ctx->entries_tbl));
+ }
+ } else if (ctx->hdr_tbl.startTime != previous_start) {
+ /* Send traps if the server has restarted since the last load */
+ snmp_log(LOG_INFO, "Detected restart of server: %d\n", serv_p->port);
+ send_nsDirectoryServerDown_trap(serv_p);
+ send_nsDirectoryServerStart_trap(serv_p);
+ }
+ }
+ } else {
+ /* Can't find our row. This shouldn't ever happen. */
+ snmp_log(LOG_ERR, "Row not found for server: %d\n",
+ serv_p->port);
+ }
+ }
+ return 0;
+}
+
+/************************************************************
+ * dsOpsTable_get_value
+ *
+ * This routine is called for get requests to copy the data
+ * from the context to the varbind for the request. If the
+ * context has been properly maintained, you don't need to
+ * change in code in this fuction.
+ */
+int
+dsOpsTable_get_value(netsnmp_request_info *request,
+ netsnmp_index * item,
+ netsnmp_table_request_info *table_info)
+{
+ netsnmp_variable_list *var = request->requestvb;
+ stats_table_context *context = (stats_table_context *) item;
+
+ switch (table_info->colnum) {
+
+ case COLUMN_DSANONYMOUSBINDS:
+ snmp_set_var_typed_value(var, ASN_COUNTER,
+ (char *) &context->ops_tbl.dsAnonymousBinds,
+ sizeof(context->ops_tbl.dsAnonymousBinds));
+ break;
+
+ case COLUMN_DSUNAUTHBINDS:
+ snmp_set_var_typed_value(var, ASN_COUNTER,
+ (char *) &context->ops_tbl.dsUnAuthBinds,
+ sizeof(context->ops_tbl.dsUnAuthBinds));
+ break;
+
+ case COLUMN_DSSIMPLEAUTHBINDS:
+ snmp_set_var_typed_value(var, ASN_COUNTER,
+ (char *) &context->ops_tbl.dsSimpleAuthBinds,
+ sizeof(context->ops_tbl.dsSimpleAuthBinds));
+ break;
+
+ case COLUMN_DSSTRONGAUTHBINDS:
+ snmp_set_var_typed_value(var, ASN_COUNTER,
+ (char *) &context->ops_tbl.dsStrongAuthBinds,
+ sizeof(context->ops_tbl.dsStrongAuthBinds));
+ break;
+
+ case COLUMN_DSBINDSECURITYERRORS:
+ snmp_set_var_typed_value(var, ASN_COUNTER,
+ (char *) &context->ops_tbl.dsBindSecurityErrors,
+ sizeof(context->ops_tbl.dsBindSecurityErrors));
+ break;
+
+ case COLUMN_DSINOPS:
+ snmp_set_var_typed_value(var, ASN_COUNTER,
+ (char *) &context->ops_tbl.dsInOps,
+ sizeof(context->ops_tbl.dsInOps));
+ break;
+
+ case COLUMN_DSREADOPS:
+ snmp_set_var_typed_value(var, ASN_COUNTER,
+ (char *) &context->ops_tbl.dsReadOps,
+ sizeof(context->ops_tbl.dsReadOps));
+ break;
+
+ case COLUMN_DSCOMPAREOPS:
+ snmp_set_var_typed_value(var, ASN_COUNTER,
+ (char *) &context->ops_tbl.dsCompareOps,
+ sizeof(context->ops_tbl.dsCompareOps));
+ break;
+
+ case COLUMN_DSADDENTRYOPS:
+ snmp_set_var_typed_value(var, ASN_COUNTER,
+ (char *) &context->ops_tbl.dsAddEntryOps,
+ sizeof(context->ops_tbl.dsAddEntryOps));
+ break;
+
+ case COLUMN_DSREMOVEENTRYOPS:
+ snmp_set_var_typed_value(var, ASN_COUNTER,
+ (char *) &context->ops_tbl.dsRemoveEntryOps,
+ sizeof(context->ops_tbl.dsRemoveEntryOps));
+ break;
+
+ case COLUMN_DSMODIFYENTRYOPS:
+ snmp_set_var_typed_value(var, ASN_COUNTER,
+ (char *) &context->ops_tbl.dsModifyEntryOps,
+ sizeof(context->ops_tbl.dsModifyEntryOps));
+ break;
+
+ case COLUMN_DSMODIFYRDNOPS:
+ snmp_set_var_typed_value(var, ASN_COUNTER,
+ (char *) &context->ops_tbl.dsModifyRDNOps,
+ sizeof(context->ops_tbl.dsModifyRDNOps));
+ break;
+
+ case COLUMN_DSLISTOPS:
+ snmp_set_var_typed_value(var, ASN_COUNTER,
+ (char *) &context->ops_tbl.dsListOps,
+ sizeof(context->ops_tbl.dsListOps));
+ break;
+
+ case COLUMN_DSSEARCHOPS:
+ snmp_set_var_typed_value(var, ASN_COUNTER,
+ (char *) &context->ops_tbl.dsSearchOps,
+ sizeof(context->ops_tbl.dsSearchOps));
+ break;
+
+ case COLUMN_DSONELEVELSEARCHOPS:
+ snmp_set_var_typed_value(var, ASN_COUNTER,
+ (char *) &context->ops_tbl.dsOneLevelSearchOps,
+ sizeof(context->ops_tbl.dsOneLevelSearchOps));
+ break;
+
+ case COLUMN_DSWHOLESUBTREESEARCHOPS:
+ snmp_set_var_typed_value(var, ASN_COUNTER,
+ (char *) &context->ops_tbl.dsWholeSubtreeSearchOps,
+ sizeof(context->ops_tbl.dsWholeSubtreeSearchOps));
+ break;
+
+ case COLUMN_DSREFERRALS:
+ snmp_set_var_typed_value(var, ASN_COUNTER,
+ (char *) &context->ops_tbl.dsReferrals,
+ sizeof(context->ops_tbl.dsReferrals));
+ break;
+
+ case COLUMN_DSCHAININGS:
+ snmp_set_var_typed_value(var, ASN_COUNTER,
+ (char *) &context->ops_tbl.dsChainings,
+ sizeof(context->ops_tbl.dsChainings));
+ break;
+
+ case COLUMN_DSSECURITYERRORS:
+ snmp_set_var_typed_value(var, ASN_COUNTER,
+ (char *) &context->ops_tbl.dsSecurityErrors,
+ sizeof(context->ops_tbl.dsSecurityErrors));
+ break;
+
+ case COLUMN_DSERRORS:
+ snmp_set_var_typed_value(var, ASN_COUNTER,
+ (char *) &context->ops_tbl.dsErrors,
+ sizeof(context->ops_tbl.dsErrors));
+ break;
+
+ default:/* We shouldn't get here */
+ snmp_log(LOG_ERR, "Unknown column in dsOpsTable_get_value\n");
+ return SNMP_ERR_GENERR;
+ }
+ return SNMP_ERR_NOERROR;
+}
+
+/************************************************************
+ * dsEntriesTable_get_value
+ *
+ * This routine is called for get requests to copy the data
+ * from the context to the varbind for the request. If the
+ * context has been properly maintained, you don't need to
+ * change in code in this fuction.
+ */
+int
+dsEntriesTable_get_value(netsnmp_request_info *request,
+ netsnmp_index * item,
+ netsnmp_table_request_info *table_info)
+{
+ netsnmp_variable_list *var = request->requestvb;
+ stats_table_context *context = (stats_table_context *) item;
+
+ switch (table_info->colnum) {
+
+ case COLUMN_DSMASTERENTRIES:
+ snmp_set_var_typed_value(var, ASN_GAUGE,
+ (char *) &context->entries_tbl.dsMasterEntries,
+ sizeof(context->entries_tbl.dsMasterEntries));
+ break;
+
+ case COLUMN_DSCOPYENTRIES:
+ snmp_set_var_typed_value(var, ASN_GAUGE,
+ (char *) &context->entries_tbl.dsCopyEntries,
+ sizeof(context->entries_tbl.dsCopyEntries));
+ break;
+
+ case COLUMN_DSCACHEENTRIES:
+ snmp_set_var_typed_value(var, ASN_GAUGE,
+ (char *) &context->entries_tbl.dsCacheEntries,
+ sizeof(context->entries_tbl.dsCacheEntries));
+ break;
+
+ case COLUMN_DSCACHEHITS:
+ snmp_set_var_typed_value(var, ASN_COUNTER,
+ (char *) &context->entries_tbl.dsCacheHits,
+ sizeof(context->entries_tbl.dsCacheHits));
+ break;
+
+ case COLUMN_DSSLAVEHITS:
+ snmp_set_var_typed_value(var, ASN_COUNTER,
+ (char *) &context->entries_tbl.dsSlaveHits,
+ sizeof(context->entries_tbl.dsSlaveHits));
+ break;
+
+ default:/* We shouldn't get here */
+ snmp_log(LOG_ERR, "Unknown column in dsEntriesTable_get_value\n");
+ return SNMP_ERR_GENERR;
+ }
+ return SNMP_ERR_NOERROR;
+}
+
+/************************************************************
+ * dsEntityTable_get_value
+ *
+ * This routine is called for get requests to copy the data
+ * from the context to the varbind for the request. If the
+ * context has been properly maintained, you don't need to
+ * change in code in this fuction.
+ */
+int
+dsEntityTable_get_value(netsnmp_request_info *request,
+ netsnmp_index * item,
+ netsnmp_table_request_info *table_info)
+{
+ netsnmp_variable_list *var = request->requestvb;
+ stats_table_context *context = (stats_table_context *) item;
+ server_instance *server = (server_instance *) context->entity_tbl;
+
+ switch (table_info->colnum) {
+
+ case COLUMN_DSENTITYDESCR:
+ snmp_set_var_typed_value(var, ASN_OCTET_STR,
+ (char *) server->description,
+ strlen(server->description));
+ break;
+
+ case COLUMN_DSENTITYVERS:
+ snmp_set_var_typed_value(var, ASN_OCTET_STR,
+ (char *) context->hdr_tbl.dsVersion,
+ strlen(context->hdr_tbl.dsVersion));
+ break;
+
+ case COLUMN_DSENTITYORG:
+ snmp_set_var_typed_value(var, ASN_OCTET_STR,
+ (char *) server->org,
+ strlen(server->org));
+ break;
+
+ case COLUMN_DSENTITYLOCATION:
+ snmp_set_var_typed_value(var, ASN_OCTET_STR,
+ (char *) server->location,
+ strlen(server->location));
+ break;
+
+ case COLUMN_DSENTITYCONTACT:
+ snmp_set_var_typed_value(var, ASN_OCTET_STR,
+ (char *) server->contact,
+ strlen(server->contact));
+ break;
+
+ case COLUMN_DSENTITYNAME:
+ snmp_set_var_typed_value(var, ASN_OCTET_STR,
+ (char *) server->name,
+ strlen(server->name));
+ break;
+
+ default:/* We shouldn't get here */
+ snmp_log(LOG_ERR, "Unknown column in dsEntityTable_get_value\n");
+ return SNMP_ERR_GENERR;
+ }
+ return SNMP_ERR_NOERROR;
+}
+
+/************************************************************
+ * send_nsDirectoryServerDown_trap
+ *
+ * Sends off the server down trap.
+ */
+int
+send_nsDirectoryServerDown_trap(server_instance *serv_p)
+{
+ netsnmp_variable_list *var_list = NULL;
+
+ snmp_log(LOG_INFO, "Sending down trap for server: %d\n", serv_p->port);
+
+ /* Define the oids for the trap */
+ oid nsDirectoryServerDown_oid[] = { nsDirectoryServerDown_OID };
+ oid dsEntityDescr_oid[] = { dsEntityTable_TABLE_OID, 1, COLUMN_DSENTITYDESCR, serv_p->port };
+ oid dsEntityVers_oid[] = { dsEntityTable_TABLE_OID, 1, COLUMN_DSENTITYVERS, serv_p->port };
+ oid dsEntityLocation_oid[] = { dsEntityTable_TABLE_OID, 1, COLUMN_DSENTITYLOCATION, serv_p->port };
+ oid dsEntityContact_oid[] = { dsEntityTable_TABLE_OID, 1, COLUMN_DSENTITYCONTACT, serv_p->port };
+
+ /* Setup the variable list to send with the trap */
+ snmp_varlist_add_variable(&var_list,
+ snmptrap_oid, OID_LENGTH(snmptrap_oid),
+ ASN_OBJECT_ID,
+ (u_char *) &nsDirectoryServerDown_oid,
+ sizeof(nsDirectoryServerDown_oid));
+ snmp_varlist_add_variable(&var_list,
+ dsEntityDescr_oid,
+ OID_LENGTH(dsEntityDescr_oid), ASN_OCTET_STR,
+ (char *) serv_p->description,
+ strlen(serv_p->description));
+ snmp_varlist_add_variable(&var_list,
+ dsEntityVers_oid,
+ OID_LENGTH(dsEntityVers_oid), ASN_OCTET_STR,
+ (char *) serv_p->version,
+ strlen(serv_p->version));
+ snmp_varlist_add_variable(&var_list,
+ dsEntityLocation_oid,
+ OID_LENGTH(dsEntityLocation_oid),
+ ASN_OCTET_STR,
+ (char *) serv_p->location,
+ strlen(serv_p->location));
+ snmp_varlist_add_variable(&var_list,
+ dsEntityContact_oid,
+ OID_LENGTH(dsEntityContact_oid),
+ ASN_OCTET_STR,
+ (char *) serv_p->contact,
+ strlen(serv_p->contact));
+
+ /* Send the trap */
+ send_v2trap(var_list);
+ snmp_free_varbind(var_list);
+
+ return SNMP_ERR_NOERROR;
+}
+
+/************************************************************
+ * send_nsDirectoryServerStart_trap
+ *
+ * Sends off the server start trap.
+ */
+int
+send_nsDirectoryServerStart_trap(server_instance *serv_p)
+{
+ netsnmp_variable_list *var_list = NULL;
+
+ snmp_log(LOG_INFO, "Sending start trap for server: %d\n", serv_p->port);
+
+ /* Define the oids for the trap */
+ oid nsDirectoryServerStart_oid[] = { nsDirectoryServerStart_OID };
+ oid dsEntityDescr_oid[] = { dsEntityTable_TABLE_OID, 1, COLUMN_DSENTITYDESCR, serv_p->port };
+ oid dsEntityVers_oid[] = { dsEntityTable_TABLE_OID, 1, COLUMN_DSENTITYVERS, serv_p->port };
+ oid dsEntityLocation_oid[] = { dsEntityTable_TABLE_OID, 1, COLUMN_DSENTITYLOCATION, serv_p->port };
+
+ /* Setup the variable list to send with the trap */
+ snmp_varlist_add_variable(&var_list,
+ snmptrap_oid, OID_LENGTH(snmptrap_oid),
+ ASN_OBJECT_ID,
+ (u_char *) &nsDirectoryServerStart_oid,
+ sizeof(nsDirectoryServerStart_oid));
+ snmp_varlist_add_variable(&var_list,
+ dsEntityDescr_oid,
+ OID_LENGTH(dsEntityDescr_oid), ASN_OCTET_STR,
+ (char *) serv_p->description,
+ strlen(serv_p->description));
+ snmp_varlist_add_variable(&var_list,
+ dsEntityVers_oid,
+ OID_LENGTH(dsEntityVers_oid), ASN_OCTET_STR,
+ (char *) serv_p->version,
+ strlen(serv_p->version));
+ snmp_varlist_add_variable(&var_list,
+ dsEntityLocation_oid,
+ OID_LENGTH(dsEntityLocation_oid),
+ ASN_OCTET_STR,
+ (char *) serv_p->location,
+ strlen(serv_p->location));
+
+ /* Send the trap */
+ send_v2trap(var_list);
+ snmp_free_varbind(var_list);
+
+ return SNMP_ERR_NOERROR;
+}
diff --git a/ldap/servers/snmp/ldap-agent.h b/ldap/servers/snmp/ldap-agent.h
new file mode 100644
index 000000000..6acf79c69
--- /dev/null
+++ b/ldap/servers/snmp/ldap-agent.h
@@ -0,0 +1,146 @@
+#ifndef DSOPSTABLE_H
+#define DSOPSTABLE_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+#include <net-snmp/net-snmp-config.h>
+#include <net-snmp/library/container.h>
+#include <net-snmp/agent/table_array.h>
+#include "agtmmap.h"
+
+#define MAXLINE 4096
+#define CACHE_REFRESH_INTERVAL 15
+#define UPDATE_THRESHOLD 20
+#define LDAP_AGENT_PIDFILE ".ldap-agent.pid"
+#define LDAP_AGENT_LOGFILE "ldap-agent.log"
+
+/*************************************************************
+ * Trap value defines
+ */
+#define SERVER_UP 7002
+#define SERVER_DOWN 7001
+#define STATE_UNKNOWN 0
+
+/*************************************************************
+ * Structures
+ */
+typedef struct server_instance_s {
+ PRUint32 port;
+ int server_state;
+ char *stats_file;
+ char *dse_ldif;
+ char *description;
+ char *version;
+ char *org;
+ char *location;
+ char *contact;
+ char *name;
+ struct server_instance_s *next;
+} server_instance;
+
+typedef struct stats_table_context_s {
+ netsnmp_index index;
+ struct hdr_stats_t hdr_tbl;
+ struct ops_stats_t ops_tbl;
+ struct entries_stats_t entries_tbl;
+ server_instance *entity_tbl;
+} stats_table_context;
+
+/*************************************************************
+ * Function Declarations
+ */
+ void exit_usage();
+ void load_config(char *);
+ void init_ldap_agent(void);
+ void initialize_stats_table(void);
+ int load_stats_table(netsnmp_cache *, void *);
+ stats_table_context *stats_table_create_row(unsigned long);
+ stats_table_context *stats_table_find_row(unsigned long);
+ int dsOpsTable_get_value(netsnmp_request_info *,
+ netsnmp_index *,
+ netsnmp_table_request_info *);
+ int dsEntriesTable_get_value(netsnmp_request_info *,
+ netsnmp_index *,
+ netsnmp_table_request_info *);
+ int dsEntityTable_get_value(netsnmp_request_info *,
+ netsnmp_index *,
+ netsnmp_table_request_info *);
+ int send_nsDirectoryServerDown_trap(server_instance *);
+ int send_nsDirectoryServerStart_trap(server_instance *);
+
+/*************************************************************
+ * Oid Declarations
+ */
+ extern oid dsOpsTable_oid[];
+ extern size_t dsOpsTable_oid_len;
+ extern oid dsEntriesTable_oid[];
+ extern size_t dsEntriesTable_oid_len;
+ extern oid dsEntityTable_oid[];
+ extern size_t dsEntityTable_oid_len;
+ extern oid snmptrap_oid[];
+ extern size_t snmptrap_oid_len;
+
+#define enterprise_OID 1,3,6,1,4,1,1450
+#define dsOpsTable_TABLE_OID enterprise_OID,7,1
+#define dsEntriesTable_TABLE_OID enterprise_OID,7,2
+#define dsEntityTable_TABLE_OID enterprise_OID,7,5
+#define snmptrap_OID 1,3,6,1,6,3,1,1,4,1,0
+#define nsDirectoryServerDown_OID enterprise_OID,0,7001
+#define nsDirectoryServerStart_OID enterprise_OID,0,7002
+
+/*************************************************************
+ * dsOpsTable column defines
+ */
+#define COLUMN_DSANONYMOUSBINDS 1
+#define COLUMN_DSUNAUTHBINDS 2
+#define COLUMN_DSSIMPLEAUTHBINDS 3
+#define COLUMN_DSSTRONGAUTHBINDS 4
+#define COLUMN_DSBINDSECURITYERRORS 5
+#define COLUMN_DSINOPS 6
+#define COLUMN_DSREADOPS 7
+#define COLUMN_DSCOMPAREOPS 8
+#define COLUMN_DSADDENTRYOPS 9
+#define COLUMN_DSREMOVEENTRYOPS 10
+#define COLUMN_DSMODIFYENTRYOPS 11
+#define COLUMN_DSMODIFYRDNOPS 12
+#define COLUMN_DSLISTOPS 13
+#define COLUMN_DSSEARCHOPS 14
+#define COLUMN_DSONELEVELSEARCHOPS 15
+#define COLUMN_DSWHOLESUBTREESEARCHOPS 16
+#define COLUMN_DSREFERRALS 17
+#define COLUMN_DSCHAININGS 18
+#define COLUMN_DSSECURITYERRORS 19
+#define COLUMN_DSERRORS 20
+#define dsOpsTable_COL_MIN 1
+#define dsOpsTable_COL_MAX 20
+
+/*************************************************************
+ * dsEntriesTable column defines
+ */
+#define COLUMN_DSMASTERENTRIES 1
+#define COLUMN_DSCOPYENTRIES 2
+#define COLUMN_DSCACHEENTRIES 3
+#define COLUMN_DSCACHEHITS 4
+#define COLUMN_DSSLAVEHITS 5
+#define dsEntriesTable_COL_MIN 1
+#define dsEntriesTable_COL_MAX 5
+
+/*************************************************************
+ * dsEntityTable column defines
+ */
+#define COLUMN_DSENTITYDESCR 1
+#define COLUMN_DSENTITYVERS 2
+#define COLUMN_DSENTITYORG 3
+#define COLUMN_DSENTITYLOCATION 4
+#define COLUMN_DSENTITYCONTACT 5
+#define COLUMN_DSENTITYNAME 6
+#define dsEntityTable_COL_MIN 1
+#define dsEntityTable_COL_MAX 6
+
+#ifdef __cplusplus
+}
+#endif
+#endif /** DSOPSTABLE_H */
diff --git a/ldap/servers/snmp/main.c b/ldap/servers/snmp/main.c
new file mode 100644
index 000000000..a0900de3e
--- /dev/null
+++ b/ldap/servers/snmp/main.c
@@ -0,0 +1,367 @@
+#include <net-snmp/net-snmp-config.h>
+#include <net-snmp/net-snmp-includes.h>
+#include <net-snmp/agent/net-snmp-agent-includes.h>
+#include <signal.h>
+#include <string.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <signal.h>
+#include <sys/stat.h>
+#include <ldap-agent.h>
+
+static char *agentx_master = NULL;
+static char *agent_logdir = NULL;
+static char *pidfile = NULL;
+server_instance *server_head = NULL;
+
+static int keep_running;
+
+RETSIGTYPE
+stop_server(int signum) {
+ if (signum == SIGUSR1) {
+ snmp_log(LOG_INFO, "Detected attempt to start ldap-agent again.\n");
+ } else {
+ snmp_log(LOG_INFO, "Received stop signal. Stopping ldap-agent...\n");
+ keep_running = 0;
+ }
+}
+
+int
+main (int argc, char *argv[]) {
+ char *config_file = NULL;
+ netsnmp_log_handler *log_hdl = NULL;
+ int c, log_level = LOG_INFO;
+ struct stat logdir_s;
+ pid_t child_pid;
+ FILE *pid_fp;
+
+ /* Load options */
+ while ((--argc > 0) && ((*++argv)[0] == '-')) {
+ while (c = *++argv[0]) {
+ switch (c) {
+ case 'D':
+ log_level = LOG_DEBUG;
+ break;
+ default:
+ printf("ldap-agent: illegal option %c\n", c);
+ exit_usage();
+ }
+ }
+ }
+
+ if (argc != 1)
+ exit_usage();
+
+ /* load config file */
+ if ((config_file = strdup(*argv)) == NULL) {
+ printf("ldap-agent: Memory error loading config file\n");
+ exit(1);
+ }
+
+ load_config(config_file);
+
+ /* check if we're already running as another process */
+ if ((pid_fp = fopen(pidfile, "r")) != NULL) {
+ fscanf(pid_fp, "%d", &child_pid);
+ fclose(pid_fp);
+ if (kill(child_pid, SIGUSR1) == 0) {
+ printf("ldap-agent: Already running as pid %d!\n", child_pid);
+ exit(1);
+ } else {
+ /* old pidfile exists, but the process doesn't. Cleanup pidfile */
+ remove(pidfile);
+ }
+ }
+
+ /* start logging */
+ netsnmp_ds_set_boolean(NETSNMP_DS_LIBRARY_ID,
+ NETSNMP_DS_LIB_LOG_TIMESTAMP, 1);
+
+ if ((log_hdl = netsnmp_register_loghandler(NETSNMP_LOGHANDLER_FILE,
+ log_level)) != NULL) {
+ if (agent_logdir != NULL) {
+ /* Verify agent-logdir setting */
+ if (stat(agent_logdir, &logdir_s) < 0) {
+ printf("ldap-agent: Error reading logdir: %s\n", agent_logdir);
+ exit(1);
+ } else {
+ /* Is it a directory? */
+ if (S_ISDIR(logdir_s.st_mode)) {
+ /* Can we write to it? */
+ if (access(agent_logdir, W_OK) < 0) {
+ printf("ldap-agent: Unable to write to logdir: %s\n",
+ agent_logdir);
+ exit(1);
+ }
+ } else {
+ printf("ldap-agent: agent-logdir setting must point to a directory.\n");
+ exit(1);
+ }
+ }
+
+ /* agent-logdir setting looks ok */
+ if ((log_hdl->token = malloc(strlen(agent_logdir) +
+ strlen(LDAP_AGENT_LOGFILE) + 2)) != NULL) {
+ strncpy((char *) log_hdl->token, agent_logdir, strlen(agent_logdir) + 1);
+ /* add a trailing slash if needed */
+ if (*(agent_logdir + strlen(agent_logdir)) != '/')
+ strcat((char *) log_hdl->token, "/");
+ strcat((char *) log_hdl->token, LDAP_AGENT_LOGFILE);
+ }
+ } else {
+ /* agent-logdir not set, so write locally */
+ log_hdl->token = strdup(LDAP_AGENT_LOGFILE);
+ }
+
+ netsnmp_enable_filelog(log_hdl, 1);
+ } else {
+ printf("Error starting logging.");
+ exit(1);
+ }
+
+ snmp_log(LOG_INFO, "Starting ldap-agent...\n");
+
+ /* setup agentx master */
+ netsnmp_ds_set_boolean(NETSNMP_DS_APPLICATION_ID,
+ NETSNMP_DS_AGENT_ROLE, 1);
+ if (agentx_master)
+ netsnmp_ds_set_string(NETSNMP_DS_APPLICATION_ID,
+ NETSNMP_DS_AGENT_X_SOCKET, agentx_master);
+
+ /* run as a daemon */
+ if (netsnmp_daemonize(0, 0)) {
+ /* sleep to allow pidfile to be created by child */
+ sleep(3);
+ if((pid_fp = fopen(pidfile,"r")) == NULL) {
+ printf("ldap-agent: Not started! Check log file for details.\n");
+ exit(1);
+ } else {
+ fscanf(pid_fp, "%d", &child_pid);
+ fclose(pid_fp);
+ }
+ printf("ldap-agent: Started as pid %d\n", child_pid);
+ exit(1);
+ }
+
+ /* initialize the agent */
+ init_agent("ldap-agent");
+ init_ldap_agent();
+ init_snmp("ldap-agent");
+
+ /* listen for signals */
+ keep_running = 1;
+ signal(SIGUSR1, stop_server);
+ signal(SIGTERM, stop_server);
+ signal(SIGINT, stop_server);
+
+ /* create pidfile in config file dir */
+ child_pid = getpid();
+ if ((pid_fp = fopen(pidfile, "w")) == NULL) {
+ snmp_log(LOG_ERR, "Error creating pid file: %s\n", pidfile);
+ exit(1);
+ } else {
+ if (fprintf(pid_fp, "%d", child_pid) < 0) {
+ snmp_log(LOG_ERR, "Error writing pid file: %s\n", pidfile);
+ exit(1);
+ }
+ fclose(pid_fp);
+ }
+
+ /* we're up and running! */
+ snmp_log(LOG_INFO, "Started ldap-agent as pid %d\n", child_pid);
+
+ /* loop here until asked to stop */
+ while(keep_running) {
+ agent_check_and_process(1);
+ }
+
+ /* say goodbye */
+ snmp_shutdown("ldap-agent");
+ snmp_log(LOG_INFO, "ldap-agent stopped.\n");
+
+ /* remove pidfile */
+ remove(pidfile);
+
+ return 0;
+}
+
+/************************************************************************
+ * load_config
+ *
+ * Loads subagent config file and reads directory server config files.
+ */
+void
+load_config(char *conf_path)
+{
+ server_instance *serv_p = NULL;
+ FILE *conf_file = NULL;
+ FILE *dse_fp = NULL;
+ char line[MAXLINE];
+ char *p = NULL;
+ char *p2 = NULL;
+
+ /* Open config file */
+ if ((conf_file = fopen(conf_path, "r")) == NULL) {
+ printf("ldap-agent: Error opening config file: %s\n", conf_path);
+ exit(1);
+ }
+
+ /* set pidfile path */
+ for (p = (conf_path + strlen(conf_path) - 1); p >= conf_path; p--) {
+ if (*p == '/') {
+ if ((pidfile = malloc((p - conf_path) +
+ strlen(LDAP_AGENT_PIDFILE) + 2)) != NULL) {
+ strncpy(pidfile, conf_path, (p - conf_path + 1));
+ strcat(pidfile, LDAP_AGENT_PIDFILE);
+ break;
+ } else {
+ printf("ldap-agent: malloc error processing config file\n");
+ exit(1);
+ }
+ }
+ }
+
+ while (fgets(line, MAXLINE, conf_file) != NULL) {
+ /* Ignore comment lines in config file */
+ if (line[0] == '#')
+ continue;
+
+ if ((p = strstr(line, "agentx-master")) != NULL) {
+ /* load agentx-master setting */
+ p = p + 13;
+ if ((p = strtok(p, " \t\n")) != NULL) {
+ if ((agentx_master = (char *) malloc(strlen(p) + 1)) != NULL)
+ strcpy(agentx_master, p);
+ }
+ } else if ((p = strstr(line, "agent-logdir")) != NULL) {
+ /* load agent-logdir setting */
+ p = p + 12;
+ if ((p = strtok(p, " \t\n")) != NULL) {
+ if ((agent_logdir = (char *) malloc(strlen(p) + 1)) != NULL)
+ strcpy(agent_logdir, p);
+ }
+ } else if ((p = strstr(line, "server")) != NULL) {
+ /* Allocate a server_instance */
+ if ((serv_p = malloc(sizeof(server_instance))) == NULL) {
+ printf("ldap-agent: malloc error processing config file\n");
+ exit(1);
+ }
+
+ /* load server setting */
+ p = p + 6;
+ if ((p = strtok_r(p, " :\t\n", &p2)) != NULL) {
+ /* first token is the instance root */
+ if ((serv_p->stats_file = malloc(strlen(p) + 18)) != NULL)
+ snprintf(serv_p->stats_file, strlen(p) + 18,
+ "%s/logs/slapd.stats", p);
+ if ((serv_p->dse_ldif = malloc(strlen(p) + 17)) != NULL) {
+ snprintf(serv_p->dse_ldif, strlen(p) + 17, "%s/config/dse.ldif", p);
+ }
+
+ /* second token is the name */
+ p = p2;
+ if((p2 = strchr(p, ':')) != NULL) {
+ *p2 = '\0';
+ ++p2;
+ if ((serv_p->name = malloc(strlen(p) + 1)) != NULL)
+ snprintf(serv_p->name, strlen(p) + 1, "%s", p);
+ } else {
+ printf("ldap-agent: Invalid config file\n");
+ exit(1);
+ }
+
+ /* third token is the description */
+ p = p2;
+ if((p2 = strchr(p, ':')) != NULL) {
+ *p2 = '\0';
+ ++p2;
+ if ((serv_p->description = malloc(strlen(p) + 1)) != NULL)
+ snprintf(serv_p->description, strlen(p) + 1, "%s", p);
+ } else {
+ printf("ldap-agent: Invalid config file\n");
+ exit(1);
+ }
+
+ /* fourth token is the org */
+ p = p2;
+ if((p2 = strchr(p, ':')) != NULL) {
+ *p2 = '\0';
+ ++p2;
+ if ((serv_p->org = malloc(strlen(p) + 1)) != NULL)
+ snprintf(serv_p->org, strlen(p) + 1, "%s", p);
+ } else {
+ printf("ldap-agent: Invalid config file\n");
+ exit(1);
+ }
+
+ /* fifth token is the location */
+ p = p2;
+ if((p2 = strchr(p, ':')) != NULL) {
+ *p2 = '\0';
+ ++p2;
+ if ((serv_p->location = malloc(strlen(p) + 1)) != NULL)
+ snprintf(serv_p->location, strlen(p) + 1, "%s", p);
+ } else {
+ printf("ldap-agent: Invalid config file\n");
+ exit(1);
+ }
+
+ /* sixth token is the contact */
+ p = p2;
+ if((p2 = strchr(p, '\n')) != NULL) {
+ *p2 = '\0';
+ if ((serv_p->contact = malloc(strlen(p) + 1)) != NULL)
+ snprintf(serv_p->contact, strlen(p) + 1, "%s", p);
+ } else {
+ printf("ldap-agent: Invalid config file\n");
+ exit(1);
+ }
+ }
+
+ /* Open dse.ldif */
+ if ((dse_fp = fopen(serv_p->dse_ldif, "r")) == NULL) {
+ printf("ldap-agent: Error opening server config file: %s\n",
+ serv_p->dse_ldif);
+ exit(1);
+ }
+
+ /* Get port value */
+ while (fgets(line, MAXLINE, dse_fp) != NULL) {
+ if ((p = strstr(line, "nsslapd-port: ")) != NULL) {
+ p = p + 14;
+ if ((p = strtok(p, ": \t\n")) != NULL)
+ serv_p->port = atol(p);
+ }
+ }
+
+ /* Close dse.ldif */
+ fclose(dse_fp);
+
+ /* Insert server instance into linked list */
+ serv_p->next = server_head;
+ server_head = serv_p;
+ }
+ }
+
+ /* Close config file */
+ fclose(conf_file);
+
+ /* check for at least one directory server instance */
+ if (server_head == NULL) {
+ printf("ldap-agent: No server instances defined in config file\n");
+ exit(1);
+ }
+}
+
+/************************************************************************
+ * exit_usage
+ *
+ * Prints usage message and exits program.
+ */
+void
+exit_usage()
+{
+ printf("Usage: ldap-agent [-D] configfile\n");
+ printf(" -D Enable debug logging\n");
+ exit(1);
+}
| 0 |
b29560435fefd67360ba2eaff0d9f6bb8ddde0b3
|
389ds/389-ds-base
|
Issue 5105 - lmdb - Cannot create entries with long rdn - fix covscan (#6131)
A minor code cleanup issue fixing: CID 1540880 CID 1540879 CID 1540878 CID 1540876 CID 1540875
All these report have the same pattern but on different function.
The issue is that ctx == NULL is tested as part of the parameter validity tests (even if it is never NULL)
then goto bail but the bail code dereference ctx to potentially free some resources.
So I changed the code from:
Log Entering in Function
If (One of parameter is NULL) {
Log Error message
goto bail
}
To:
If (One of parameter is NULL) {
Log Error message
return -1;
}
Log Entering in Function
CID 1540877 is a real issue about a potential memory leak in case of error (shoud goto bail0 instead of bail to make sure childelem is free)
Issue: #6105
Reviewed by: @droideck Thanks!
|
commit b29560435fefd67360ba2eaff0d9f6bb8ddde0b3
Author: progier389 <[email protected]>
Date: Tue Mar 26 11:22:43 2024 +0100
Issue 5105 - lmdb - Cannot create entries with long rdn - fix covscan (#6131)
A minor code cleanup issue fixing: CID 1540880 CID 1540879 CID 1540878 CID 1540876 CID 1540875
All these report have the same pattern but on different function.
The issue is that ctx == NULL is tested as part of the parameter validity tests (even if it is never NULL)
then goto bail but the bail code dereference ctx to potentially free some resources.
So I changed the code from:
Log Entering in Function
If (One of parameter is NULL) {
Log Error message
goto bail
}
To:
If (One of parameter is NULL) {
Log Error message
return -1;
}
Log Entering in Function
CID 1540877 is a real issue about a potential memory leak in case of error (shoud goto bail0 instead of bail to make sure childelem is free)
Issue: #6105
Reviewed by: @droideck Thanks!
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c b/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c
index 49e1adf09..c873a7902 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c
@@ -1733,16 +1733,15 @@ _entryrdn_put_data(entryrdn_db_ctx_t *ctx, dbi_val_t *key, dbi_val_t *data, char
int db_retry = 0;
int rc = -1;
- slapi_log_err(SLAPI_LOG_TRACE, "_entryrdn_put_data",
- "--> _entryrdn_put_data\n");
if (NULL == ctx || NULL == key || NULL == data) {
slapi_log_err(SLAPI_LOG_ERR, "_entryrdn_put_data",
"Param error: Empty %s\n",
NULL == ctx ? "database context" : NULL == key
? "key" : NULL == data ? "data" : "unknown");
- _ENTRYRDN_DEBUG_GOTO_BAIL();
- goto bail;
+ return -1;
}
+ slapi_log_err(SLAPI_LOG_TRACE, "_entryrdn_put_data",
+ "--> _entryrdn_put_data\n");
dblayer_entryrdn_init_records(ctx->be, key, data, &rec);
if (rec.suffix_too_long) {
@@ -1819,15 +1818,14 @@ _entryrdn_del_data(entryrdn_db_ctx_t *ctx, dbi_val_t *key, dbi_val_t *data)
int db_retry = 0;
int rc = -1;
- slapi_log_err(SLAPI_LOG_TRACE, "_entryrdn_del_data",
- "--> _entryrdn_del_data\n");
if (NULL == ctx || NULL == key || NULL == data) {
slapi_log_err(SLAPI_LOG_ERR, "_entryrdn_del_data",
"Param error: Empty %s\n",
NULL == ctx ? "database context" : NULL == key ? "key" : NULL == data ? "data" : "unknown");
- _ENTRYRDN_DEBUG_GOTO_BAIL();
- goto bail;
+ return -1;
}
+ slapi_log_err(SLAPI_LOG_TRACE, "_entryrdn_del_data",
+ "--> _entryrdn_del_data\n");
dblayer_entryrdn_init_records(ctx->be, key, data, &rec);
if (rec.suffix_too_long) {
errmsg = "Backend suffix is too long";
@@ -1928,17 +1926,16 @@ _entryrdn_insert_key_elems(entryrdn_db_ctx_t *ctx,
int rc = 0;
ID myid = 0;
- slapi_log_err(SLAPI_LOG_TRACE, "_entryrdn_insert_key_elems",
- "--> _entryrdn_insert_key_elems\n");
-
if (NULL == ctx || NULL == srdn ||
NULL == key || NULL == parentelem || NULL == elem) {
slapi_log_err(SLAPI_LOG_ERR, "_entryrdn_insert_key_elems",
"Param error: Empty %s\n",
NULL == ctx ? "database context" : NULL == srdn ? "RDN" : NULL == key ? "key" : NULL == parentelem ? "parent element" : NULL == elem ? "target element" : "unknown");
- _ENTRYRDN_DEBUG_GOTO_BAIL();
- goto bail;
+ return -1;
}
+ slapi_log_err(SLAPI_LOG_TRACE, "_entryrdn_insert_key_elems",
+ "--> _entryrdn_insert_key_elems\n");
+
_ENTRYRDN_DUMP_RDN_ELEM(elem);
dblayer_value_set_buffer(ctx->be, &adddata, elem, elemlen);
@@ -2182,7 +2179,7 @@ _entryrdn_replace_suffix_id(entryrdn_db_ctx_t *ctx, dbi_val_t *key, dbi_val_t *a
rc = _entryrdn_resolve_redirect(ctx, &pelem, 1);
if (rc) {
_ENTRYRDN_DEBUG_GOTO_BAIL();
- goto bail;
+ goto bail0;
}
moddata.data = pelem;
}
@@ -2250,16 +2247,14 @@ entryrdn_insert_key(entryrdn_db_ctx_t *ctx, Slapi_RDN *srdn, ID id)
Slapi_RDN *tmpsrdn = NULL;
int db_retry = 0;
- slapi_log_err(SLAPI_LOG_TRACE, "entryrdn_insert_key",
- "--> _entryrdn_insert_key\n");
-
if (NULL == ctx || NULL == srdn || 0 == id) {
slapi_log_err(SLAPI_LOG_ERR, "entryrdn_insert_key",
"Param error: Empty %s\n",
NULL == ctx ? "database context" : NULL == srdn ? "RDN" : 0 == id ? "id" : "unknown");
- _ENTRYRDN_DEBUG_GOTO_BAIL();
- goto bail;
+ return -1;
}
+ slapi_log_err(SLAPI_LOG_TRACE, "entryrdn_insert_key",
+ "--> _entryrdn_insert_key\n");
if (ctx->txn && ctx->txn->back_special_handling_fn) {
/* back_special_handling_fn means that the calling thread is doing an import or reindex
@@ -2644,16 +2639,14 @@ entryrdn_delete_key(entryrdn_db_ctx_t *ctx, Slapi_RDN *srdn, ID id)
int done = 0;
char buffer[RDN_BULK_FETCH_BUFFER_SIZE];
- slapi_log_err(SLAPI_LOG_TRACE, "entryrdn_delete_key",
- "--> entryrdn_delete_key\n");
-
if (NULL == ctx || NULL == srdn || 0 == id) {
slapi_log_err(SLAPI_LOG_ERR, "entryrdn_delete_key",
"Param error: Empty %s\n",
NULL == ctx ? "database context" : NULL == srdn ? "RDN" : 0 == id ? "ID" : "unknown");
- _ENTRYRDN_DEBUG_GOTO_BAIL();
- goto bail;
+ return -1;
}
+ slapi_log_err(SLAPI_LOG_TRACE, "entryrdn_delete_key",
+ "--> entryrdn_delete_key\n");
if (ctx->txn && ctx->txn->back_special_handling_fn) {
/* back_special_handling_fn means that the calling thread is doing an import or reindex
| 0 |
46dbd62db4413c1a1ea3adfb1f07863a4f2982ba
|
389ds/389-ds-base
|
Ticket #48779 - Remove startpidfile check in start-dirsrv
Description: Since this fix was made, startpid file is not longer
generated in the systemd start.
Ticket 47951 - Add PIDFile option to .service
commit d4deb29a2ed2c738fab41500e6c806f40c4dff17
start-dirsrv script should be updated to adjust to the fix.
https://fedorahosted.org/389/ticket/48779
Reviewed by [email protected] (Thank you, William!!)
|
commit 46dbd62db4413c1a1ea3adfb1f07863a4f2982ba
Author: Noriko Hosoi <[email protected]>
Date: Tue Mar 29 16:45:59 2016 -0700
Ticket #48779 - Remove startpidfile check in start-dirsrv
Description: Since this fix was made, startpid file is not longer
generated in the systemd start.
Ticket 47951 - Add PIDFile option to .service
commit d4deb29a2ed2c738fab41500e6c806f40c4dff17
start-dirsrv script should be updated to adjust to the fix.
https://fedorahosted.org/389/ticket/48779
Reviewed by [email protected] (Thank you, William!!)
diff --git a/ldap/admin/src/scripts/start-dirsrv.in b/ldap/admin/src/scripts/start-dirsrv.in
index 513addb56..4bc0ba2af 100755
--- a/ldap/admin/src/scripts/start-dirsrv.in
+++ b/ldap/admin/src/scripts/start-dirsrv.in
@@ -41,16 +41,6 @@ start_instance() {
export DS_CONFIG_DIR
PIDFILE=$RUN_DIR/$PRODUCT_NAME-$SERV_ID.pid
- STARTPIDFILE=$RUN_DIR/$PRODUCT_NAME-$SERV_ID.startpid
- if test -f $STARTPIDFILE ; then
- PID=`cat $STARTPIDFILE`
- if kill -s 0 $PID > /dev/null 2>&1 ; then
- echo There is an ns-slapd process already running: $PID
- return 2;
- else
- rm -f $STARTPIDFILE
- fi
- fi
if test -f $PIDFILE ; then
PID=`cat $PIDFILE`
if kill -s 0 $PID > /dev/null 2>&1 ; then
@@ -70,27 +60,12 @@ start_instance() {
return 1
fi
else
- $SERVERBIN_DIR/ns-slapd -D $CONFIG_DIR -i $PIDFILE -w $STARTPIDFILE "$@"
+ $SERVERBIN_DIR/ns-slapd -D $CONFIG_DIR -i $PIDFILE "$@"
if [ $? -ne 0 ]; then
return 1
fi
fi
loop_counter=1
- # wait for 10 seconds for the start pid file to appear
- max_count=${STARTPID_TIME:-10}
- while test $loop_counter -le $max_count; do
- loop_counter=`expr $loop_counter + 1`
- if test ! -f $STARTPIDFILE ; then
- sleep 1;
- else
- PID=`cat $STARTPIDFILE`
- fi
- done
- if test ! -f $STARTPIDFILE ; then
- echo Server failed to start !!! Please check errors log for problems
- return 1
- fi
- loop_counter=1
# wait for 10 minutes (600 times 1 seconds)
max_count=${PID_TIME:-600}
while test $loop_counter -le $max_count; do
@@ -104,7 +79,6 @@ start_instance() {
fi
else
PID=`cat $PIDFILE`
- rm -f $STARTPIDFILE
return 0;
fi
done
| 0 |
f5b9053e3d408c9b81d5ac537bd772360515a641
|
389ds/389-ds-base
|
Ticket #48244 - No validation check for the value for nsslapd-db-locks
Description: Added a validation check for the value for nsslapd-db-locks.
The default value is 10000 and now the lower value is set, it is set back
to the default value with this error message:
[..] - New max db lock count is too small. Resetting it to the default value 10000.
https://fedorahosted.org/389/ticket/48244
Reviewed by [email protected] (Thank you, William!!)
|
commit f5b9053e3d408c9b81d5ac537bd772360515a641
Author: Noriko Hosoi <[email protected]>
Date: Wed Dec 16 17:49:34 2015 -0800
Ticket #48244 - No validation check for the value for nsslapd-db-locks
Description: Added a validation check for the value for nsslapd-db-locks.
The default value is 10000 and now the lower value is set, it is set back
to the default value with this error message:
[..] - New max db lock count is too small. Resetting it to the default value 10000.
https://fedorahosted.org/389/ticket/48244
Reviewed by [email protected] (Thank you, William!!)
diff --git a/ldap/servers/slapd/back-ldbm/dblayer.c b/ldap/servers/slapd/back-ldbm/dblayer.c
index 9168c8cdb..e65f3cfd8 100644
--- a/ldap/servers/slapd/back-ldbm/dblayer.c
+++ b/ldap/servers/slapd/back-ldbm/dblayer.c
@@ -1661,8 +1661,19 @@ dblayer_start(struct ldbminfo *li, int dbmode)
priv->dblayer_previous_ncache, priv->dblayer_ncache, 0);
}
if (priv->dblayer_lock_config != priv->dblayer_previous_lock_config) {
- LDAPDebug(LDAP_DEBUG_ANY, "resizing max db lock count: %d -> %d\n",
+ /*
+ * The default value of nsslapd-db-locks is 10000.
+ * We don't allow lower value than that.
+ */
+ if (priv->dblayer_lock_config <= 10000) {
+ LDAPDebug0Args(LDAP_DEBUG_ANY, "New max db lock count is too small. "
+ "Resetting it to the default value 10000.\n");
+ priv->dblayer_lock_config = 10000;
+ }
+ if (priv->dblayer_lock_config != priv->dblayer_previous_lock_config) {
+ LDAPDebug(LDAP_DEBUG_ANY, "resizing max db lock count: %d -> %d\n",
priv->dblayer_previous_lock_config, priv->dblayer_lock_config, 0);
+ }
}
dblayer_reset_env(li);
/*
| 0 |
79267d0b0b24aec2174d9910e1787da633f73882
|
389ds/389-ds-base
|
Resolves: #214533
Summary: configure needs to support --with-fhs (Comment #10)
Changes: if --with-fhs is set, bindir, libdir, and datadir starts w/ $prefix/usr
|
commit 79267d0b0b24aec2174d9910e1787da633f73882
Author: Noriko Hosoi <[email protected]>
Date: Mon Nov 13 18:46:58 2006 +0000
Resolves: #214533
Summary: configure needs to support --with-fhs (Comment #10)
Changes: if --with-fhs is set, bindir, libdir, and datadir starts w/ $prefix/usr
diff --git a/configure b/configure
index 80a89ab99..0a093ca40 100755
--- a/configure
+++ b/configure
@@ -25029,6 +25029,11 @@ fi
# installation paths
+if test "$with_fhs" = "yes"; then
+ bindir=$prefix/usr/bin
+ libdir=$prefix/usr/lib
+ datadir=$prefix/usr/share
+fi
# relative to sysconfdir
configdir=/fedora-ds/config
# relative to datadir
diff --git a/configure.ac b/configure.ac
index f7e1f52f0..2cbe9ef7a 100644
--- a/configure.ac
+++ b/configure.ac
@@ -128,7 +128,11 @@ AC_SUBST(netsnmp_libdir)
AC_SUBST(netsnmp_link)
# installation paths
-dnl bindir=/usr/bin
+if test "$with_fhs" = "yes"; then
+ bindir=$prefix/usr/bin
+ libdir=$prefix/usr/lib
+ datadir=$prefix/usr/share
+fi
# relative to sysconfdir
configdir=/fedora-ds/config
# relative to datadir
| 0 |
e580506d52eed2c07a093026095ad6107b2ee8d5
|
389ds/389-ds-base
|
Ticket 49873 - Contention on virtual attribute lookup
Bug Description:
During lookup of the virtual attribute table (filter evaluation and returned attribute)
the lock is acquired many times in read. For example it is acquired for each targetfilter aci and for
each evaluated entry.
Unfortunately RW lock is expensive and appears frequently on pstacks.
The lock exists because the table can be updated but update is very rare (addition of a new service provider).
So it slows down general proceeding for exceptional events.
Fix Description:
The fix is to acquire/release the read lock at the operation level and set a per-cpu flag, so that later lookup
would just check the flag.
https://pagure.io/389-ds-base/issue/49873
Reviewed by: Ludwig Krispenz, William Brown (thanks !!)
Platforms tested: F27
Flag Day: no
Doc impact: no
|
commit e580506d52eed2c07a093026095ad6107b2ee8d5
Author: Thierry Bordaz <[email protected]>
Date: Tue Jan 15 11:13:42 2019 +0100
Ticket 49873 - Contention on virtual attribute lookup
Bug Description:
During lookup of the virtual attribute table (filter evaluation and returned attribute)
the lock is acquired many times in read. For example it is acquired for each targetfilter aci and for
each evaluated entry.
Unfortunately RW lock is expensive and appears frequently on pstacks.
The lock exists because the table can be updated but update is very rare (addition of a new service provider).
So it slows down general proceeding for exceptional events.
Fix Description:
The fix is to acquire/release the read lock at the operation level and set a per-cpu flag, so that later lookup
would just check the flag.
https://pagure.io/389-ds-base/issue/49873
Reviewed by: Ludwig Krispenz, William Brown (thanks !!)
Platforms tested: F27
Flag Day: no
Doc impact: no
diff --git a/ldap/servers/slapd/connection.c b/ldap/servers/slapd/connection.c
index 8b885686c..fcc46cd8d 100644
--- a/ldap/servers/slapd/connection.c
+++ b/ldap/servers/slapd/connection.c
@@ -1509,6 +1509,7 @@ connection_threadmain()
long bypasspollcnt = 0;
enable_nunc_stans = config_get_enable_nunc_stans();
+ vattr_global_lock_init();
#if defined(hpux)
/* Arrange to ignore SIGPIPE signals. */
SIGNAL(SIGPIPE, SIG_IGN);
diff --git a/ldap/servers/slapd/opshared.c b/ldap/servers/slapd/opshared.c
index e791a9054..8b895a1b7 100644
--- a/ldap/servers/slapd/opshared.c
+++ b/ldap/servers/slapd/opshared.c
@@ -243,6 +243,7 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
int pr_idx = -1;
Slapi_DN *orig_sdn = NULL;
int free_sdn = 0;
+ PRBool vattr_lock_acquired = PR_FALSE;
be_list[0] = NULL;
referral_list[0] = NULL;
@@ -528,6 +529,8 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
}
slapi_pblock_set(pb, SLAPI_BACKEND_COUNT, &index);
+ vattr_rdlock();
+ vattr_lock_acquired = PR_TRUE;
if (be) {
slapi_pblock_set(pb, SLAPI_BACKEND, be);
@@ -983,6 +986,9 @@ free_and_return:
} else if (be_single) {
slapi_be_Unlock(be_single);
}
+ if (vattr_lock_acquired) {
+ vattr_unlock();
+ }
free_and_return_nolock:
slapi_pblock_set(pb, SLAPI_PLUGIN_OPRETURN, &rc);
diff --git a/ldap/servers/slapd/proto-slap.h b/ldap/servers/slapd/proto-slap.h
index ca946fbde..2029f41cf 100644
--- a/ldap/servers/slapd/proto-slap.h
+++ b/ldap/servers/slapd/proto-slap.h
@@ -1419,6 +1419,9 @@ void subentry_create_filter(Slapi_Filter **filter);
* vattr.c
*/
void vattr_init(void);
+void vattr_global_lock_init(void);
+void vattr_rdlock();
+void vattr_unlock();
void vattr_cleanup(void);
/*
diff --git a/ldap/servers/slapd/psearch.c b/ldap/servers/slapd/psearch.c
index 8ad268a85..e7b97a7c3 100644
--- a/ldap/servers/slapd/psearch.c
+++ b/ldap/servers/slapd/psearch.c
@@ -267,6 +267,7 @@ ps_send_results(void *arg)
Operation *pb_op = NULL;
g_incr_active_threadcnt();
+ vattr_global_lock_init();
slapi_pblock_get(ps->ps_pblock, SLAPI_CONNECTION, &pb_conn);
slapi_pblock_get(ps->ps_pblock, SLAPI_OPERATION, &pb_op);
diff --git a/ldap/servers/slapd/vattr.c b/ldap/servers/slapd/vattr.c
index f7c473ab1..155afcafb 100644
--- a/ldap/servers/slapd/vattr.c
+++ b/ldap/servers/slapd/vattr.c
@@ -102,6 +102,16 @@ int vattr_basic_sp_init();
void **statechange_api;
+struct _vattr_map
+{
+ Slapi_RWLock *lock;
+ PLHashTable *hashtable; /* Hash table */
+};
+typedef struct _vattr_map vattr_map;
+
+static vattr_map *the_map = NULL;
+static PRUintn thread_private_global_vattr_lock;
+
/* Housekeeping Functions, called by server startup/shutdown code */
/* Called on server startup, init all structures etc */
@@ -109,6 +119,7 @@ void
vattr_init()
{
statechange_api = 0;
+ PR_NewThreadPrivateIndex(&thread_private_global_vattr_lock, NULL);
vattr_map_create();
#ifdef VATTR_TEST_CODE
@@ -116,6 +127,60 @@ vattr_init()
#endif
}
+void
+vattr_global_lock_init()
+{
+ if (thread_private_global_vattr_lock) {
+ PR_SetThreadPrivate(thread_private_global_vattr_lock, (void *) 0);
+ }
+}
+/* The map lock can be acquired recursively. So only the first rdlock
+ * will acquire the lock.
+ * A optimization acquires it at high level (op_shared_search), so that
+ * later calls during the operation processing will just increase/decrease a counter.
+ */
+void
+vattr_rdlock()
+{
+ if (thread_private_global_vattr_lock) {
+ int nb_acquire = (int) PR_GetThreadPrivate(thread_private_global_vattr_lock);
+
+ if (nb_acquire == 0) {
+ /* The lock was not held just acquire it */
+ slapi_rwlock_rdlock(the_map->lock);
+ }
+ nb_acquire++;
+ PR_SetThreadPrivate(thread_private_global_vattr_lock, (void *) nb_acquire);
+ } else {
+ slapi_rwlock_rdlock(the_map->lock);
+ }
+}
+/* The map lock can be acquired recursively. So only the last unlock
+ * will release the lock.
+ * A optimization acquires it at high level (op_shared_search), so that
+ * later calls during the operation processing will just increase/decrease a counter.
+ */
+void
+vattr_unlock()
+{
+ if (thread_private_global_vattr_lock) {
+ int nb_acquire = (int) PR_GetThreadPrivate(thread_private_global_vattr_lock);
+
+ if (nb_acquire >= 1) {
+ nb_acquire--;
+ if (nb_acquire == 0) {
+ slapi_rwlock_unlock(the_map->lock);
+ }
+ PR_SetThreadPrivate(thread_private_global_vattr_lock, (void *) nb_acquire);
+ } else {
+ slapi_log_err(SLAPI_LOG_CRIT,
+ "vattr_unlock", "The lock was not acquire. We should not be here\n");
+ PR_ASSERT(nb_acquire >= 1);
+ }
+ } else {
+ slapi_rwlock_unlock(the_map->lock);
+ }
+}
/* Called on server shutdown, free all structures, inform service providers that we're going down etc */
void
vattr_cleanup()
@@ -1811,15 +1876,6 @@ typedef struct _vattr_map_entry vattr_map_entry;
vattr_map_entry test_entry = {NULL};
-struct _vattr_map
-{
- Slapi_RWLock *lock;
- PLHashTable *hashtable; /* Hash table */
-};
-typedef struct _vattr_map vattr_map;
-
-static vattr_map *the_map = NULL;
-
static PRIntn
vattr_hash_compare_keys(const void *v1, const void *v2)
{
@@ -1939,11 +1995,11 @@ vattr_map_lookup(const char *type_to_find, vattr_map_entry **result)
}
/* Get the reader lock */
- slapi_rwlock_rdlock(the_map->lock);
+ vattr_rdlock();
*result = (vattr_map_entry *)PL_HashTableLookupConst(the_map->hashtable,
(void *)basetype);
/* Release ze lock */
- slapi_rwlock_unlock(the_map->lock);
+ vattr_unlock();
if (tmp) {
slapi_ch_free_string(&tmp);
@@ -2131,7 +2187,7 @@ slapi_vattr_schema_check_type(Slapi_Entry *e, char *type)
objAttrValue *obj;
if (0 == vattr_map_lookup(type, &map_entry)) {
- slapi_rwlock_rdlock(the_map->lock);
+ vattr_rdlock();
obj = map_entry->objectclasses;
@@ -2148,7 +2204,7 @@ slapi_vattr_schema_check_type(Slapi_Entry *e, char *type)
obj = obj->pNext;
}
- slapi_rwlock_unlock(the_map->lock);
+ vattr_unlock();
}
slapi_valueset_free(vs);
| 0 |
9f433e82e153f8f6ccd6a6d32cd6440fa9beec0c
|
389ds/389-ds-base
|
Update .gitignore to hide extra files
Description: Add a small number of files to gitignore
|
commit 9f433e82e153f8f6ccd6a6d32cd6440fa9beec0c
Author: William Brown <[email protected]>
Date: Thu Jan 10 12:30:25 2019 +1000
Update .gitignore to hide extra files
Description: Add a small number of files to gitignore
diff --git a/.gitignore b/.gitignore
index b8bff61fb..c1f704c56 100644
--- a/.gitignore
+++ b/.gitignore
@@ -31,14 +31,18 @@ stamp-h1
.settings
.cache
*.a
+*.rsa
*.dirstamp
*.la
*.lo
*.o
+*.rso
*.pyc
+*.rej
__pycache__
.libs
.deps
+Cargo.lock
rpmbuild
rpm/389-ds-base.spec
Makefile
@@ -216,3 +220,7 @@ wrappers/ds_systemd_ask_password_acl
docs/slapi.doxy
man/man3/
html/
+.pytest_cache/
+src/lib389/dist/
+src/lib389/man/
+src/libsds/target/
| 0 |
f69ce333052f7f33350fd4038b8f598f650a743f
|
389ds/389-ds-base
|
Ticket #48194 - CI test: fixing test cases for ticket 48194
Description: nsSSL3Ciphers preference not enforced server side
. Test Case 6 - wrong expectation for RC4-SHA
. Test Case 7 - removing a extra space in nsSSL3Ciphers
|
commit f69ce333052f7f33350fd4038b8f598f650a743f
Author: Noriko Hosoi <[email protected]>
Date: Tue Jul 14 11:12:56 2015 -0700
Ticket #48194 - CI test: fixing test cases for ticket 48194
Description: nsSSL3Ciphers preference not enforced server side
. Test Case 6 - wrong expectation for RC4-SHA
. Test Case 7 - removing a extra space in nsSSL3Ciphers
diff --git a/dirsrvtests/tickets/ticket48194_test.py b/dirsrvtests/tickets/ticket48194_test.py
index 18739ca99..17e179aa7 100644
--- a/dirsrvtests/tickets/ticket48194_test.py
+++ b/dirsrvtests/tickets/ticket48194_test.py
@@ -295,7 +295,7 @@ def test_ticket48194_run_4(topology):
Default ciphers are enabled.
default allowWeakCipher
"""
- _header(topology, 'Test Case 5 - Check no nsSSL3Ciphers (default setting) with default allowWeakCipher')
+ _header(topology, 'Test Case 5 - Check no nsSSL3Ciphers (-all) with default allowWeakCipher')
topology.standalone.simple_bind_s(DN_DM, PASSWORD)
topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_DELETE, 'nsSSL3Ciphers', '-all')])
@@ -326,7 +326,7 @@ def test_ticket48194_run_5(topology):
os.system('touch %s' % (topology.standalone.errlog))
topology.standalone.start(timeout=120)
- connectWithOpenssl(topology, 'RC4-SHA', True)
+ connectWithOpenssl(topology, 'RC4-SHA', False)
connectWithOpenssl(topology, 'AES256-SHA256', True)
def test_ticket48194_run_6(topology):
@@ -338,7 +338,7 @@ def test_ticket48194_run_6(topology):
_header(topology, 'Test Case 7 - Check nsSSL3Ciphers: +all,-TLS_RSA_WITH_AES_256_CBC_SHA256 with default allowWeakCipher')
topology.standalone.simple_bind_s(DN_DM, PASSWORD)
- topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', '+all,-TLS_RSA_WITH_AES_256_CBC_SHA256 ')])
+ topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', '+all,-TLS_RSA_WITH_AES_256_CBC_SHA256')])
log.info("\n######################### Restarting the server ######################\n")
topology.standalone.stop(timeout=10)
| 0 |
5559c97b1788c7cb36a4dde62b0379d1bd6e94cf
|
389ds/389-ds-base
|
Resolves: bug 450973
Bug Description: rhds80 account accountunlocktime attribute breaks replication
Reviewed by: nhosoi (Thanks!)
Fix Description: We were not handling errors returned from the consumer correctly in the async replication code. The problem was that we were exiting the async read results thread immediately. However, we needed to wait for and read all of the outstanding responses, then exit the thread when all of them had been read. The new code handles this case correctly, allowing us to read all of the pending responses before exiting.
The flip side of this is that passwordIsGlobalPolicy only works on the _consumer_. It has no effect whatsoever on the _supplier_ side of replication. The fix for this is to configure fractional replication _always_ and to add the password policy op attrs to the list of attrs not to replicate. This should work fine with RHDS 8.0.0-14 and later.
Platforms tested: RHEL5
Flag Day: no
Doc impact: Yes. We will need to document exactly how passwordIsGlobalPolicy works and how to configure fractional replication.
QA impact: Will need to do more testing of MMR with account lockout to make sure this error does not blow up MMR anymore.
New Tests integrated into TET: Working on it.
|
commit 5559c97b1788c7cb36a4dde62b0379d1bd6e94cf
Author: Rich Megginson <[email protected]>
Date: Mon Jun 23 18:38:40 2008 +0000
Resolves: bug 450973
Bug Description: rhds80 account accountunlocktime attribute breaks replication
Reviewed by: nhosoi (Thanks!)
Fix Description: We were not handling errors returned from the consumer correctly in the async replication code. The problem was that we were exiting the async read results thread immediately. However, we needed to wait for and read all of the outstanding responses, then exit the thread when all of them had been read. The new code handles this case correctly, allowing us to read all of the pending responses before exiting.
The flip side of this is that passwordIsGlobalPolicy only works on the _consumer_. It has no effect whatsoever on the _supplier_ side of replication. The fix for this is to configure fractional replication _always_ and to add the password policy op attrs to the list of attrs not to replicate. This should work fine with RHDS 8.0.0-14 and later.
Platforms tested: RHEL5
Flag Day: no
Doc impact: Yes. We will need to document exactly how passwordIsGlobalPolicy works and how to configure fractional replication.
QA impact: Will need to do more testing of MMR with account lockout to make sure this error does not blow up MMR anymore.
New Tests integrated into TET: Working on it.
diff --git a/ldap/servers/plugins/replication/repl5_inc_protocol.c b/ldap/servers/plugins/replication/repl5_inc_protocol.c
index 3249319d4..e04734383 100644
--- a/ldap/servers/plugins/replication/repl5_inc_protocol.c
+++ b/ldap/servers/plugins/replication/repl5_inc_protocol.c
@@ -294,6 +294,7 @@ static void repl5_inc_result_threadmain(void *param)
while (!finished)
{
conres = conn_read_result_ex(conn, NULL, NULL, NULL, &message_id, 0);
+ slapi_log_error(SLAPI_LOG_REPL, NULL, "repl5_inc_result_threadmain: read result for message_id %d\n", message_id);
/* Timeout here means that we didn't block, not a real timeout */
if (CONN_TIMEOUT == conres)
{
@@ -328,6 +329,7 @@ static void repl5_inc_result_threadmain(void *param)
}
if (conres != CONN_TIMEOUT)
{
+ int should_finish = 0;
if (message_id)
{
rd->last_message_id_received = message_id;
@@ -344,16 +346,18 @@ static void repl5_inc_result_threadmain(void *param)
}
conn_get_error_ex(conn, &operation_code, &connection_error, &ldap_error_string);
- slapi_log_error(SLAPI_LOG_REPL, NULL, "repl5_inc_result_threadmain: result %d, %d, %d, %s\n", operation_code,connection_error,conres,ldap_error_string);
- rd->result = repl5_inc_update_from_op_result(rd->prp, conres, connection_error, csn_str, uniqueid, replica_id, &finished, &(rd->num_changes_sent));
- if (rd->result)
+ slapi_log_error(SLAPI_LOG_REPL, NULL, "repl5_inc_result_threadmain: result %d, %d, %d, %d, %s\n", operation_code,connection_error,conres,message_id,ldap_error_string);
+ rd->result = repl5_inc_update_from_op_result(rd->prp, conres, connection_error, csn_str, uniqueid, replica_id, &should_finish, &(rd->num_changes_sent));
+ if (rd->result || should_finish)
{
- slapi_log_error(SLAPI_LOG_REPL, NULL, "repl5_inc_result_threadmain: got op result %d\n", rd->result);
+ slapi_log_error(SLAPI_LOG_REPL, NULL, "repl5_inc_result_threadmain: got op result %d should finish %d\n", rd->result, should_finish);
/* If so then we need to take steps to abort the update process */
PR_Lock(rd->lock);
rd->abort = 1;
PR_Unlock(rd->lock);
/* We also need to log the error, including details stored from when the operation was sent */
+ /* we cannot finish yet - we still need to waitfor the pending results, then
+ the main repl code will shut down this thread */
}
}
/* Should we stop ? */
| 0 |
0f21a5bb60c8825fa4fec37eecd1b81c0e03c702
|
389ds/389-ds-base
|
Ticket #271 - replication code cleanup
The function repl5_inc_run() was a complete disaster when it came to spacing and
indentation.
Reviewed by: richm (Thanks Rich)
|
commit 0f21a5bb60c8825fa4fec37eecd1b81c0e03c702
Author: Mark Reynolds <[email protected]>
Date: Thu Mar 8 16:41:30 2012 -0500
Ticket #271 - replication code cleanup
The function repl5_inc_run() was a complete disaster when it came to spacing and
indentation.
Reviewed by: richm (Thanks Rich)
diff --git a/ldap/servers/plugins/replication/repl5_inc_protocol.c b/ldap/servers/plugins/replication/repl5_inc_protocol.c
index a615a2943..e1fe4fe94 100644
--- a/ldap/servers/plugins/replication/repl5_inc_protocol.c
+++ b/ldap/servers/plugins/replication/repl5_inc_protocol.c
@@ -633,6 +633,11 @@ repl5_inc_run(Private_Repl_Protocol *prp)
PRBool use_busy_backoff_timer = PR_FALSE;
long pausetime = 0;
long busywaittime = 0;
+ long loops = 0;
+ int optype, ldaprc;
+ time_t next_fire_time;
+ time_t now;
+
prp->stopped = 0;
prp->terminate = 0;
@@ -640,665 +645,529 @@ repl5_inc_run(Private_Repl_Protocol *prp)
/* establish_protocol_callbacks(prp); */
done = 0;
do {
- int rc;
+ int rc;
- /* Take action, based on current state, and compute new state. */
- switch (current_state)
+ /* Take action, based on current state, and compute new state. */
+ switch (current_state)
{
- case STATE_START:
-
- dev_debug("repl5_inc_run(STATE_START)");
- if (PROTOCOL_IS_SHUTDOWN(prp))
- {
- done = 1;
- break;
- }
+ case STATE_START:
+ dev_debug("repl5_inc_run(STATE_START)");
+ if (PROTOCOL_IS_SHUTDOWN(prp)){
+ done = 1;
+ break;
+ }
+ /*
+ * Our initial state. See if we're in a schedule window. If
+ * so, then we're ready to acquire the replica and see if it
+ * needs any updates from us. If not, then wait for the window
+ * to open.
+ */
+ if (agmt_schedule_in_window_now(prp->agmt)){
+ next_state = STATE_READY_TO_ACQUIRE;
+ } else {
+ next_state = STATE_WAIT_WINDOW_OPEN;
+ }
+ /* we can get here from other states because some events happened and were
+ * not cleared. For instance when we wake up in STATE_WAIT_CHANGES state.
+ * Since this is a fresh start state, we should clear all events */
+ /* ONREPL - this does not feel right - we should take another look
+ * at this state machine */
+ reset_events (prp);
+ /* Cancel any linger timer that might be in effect... */
+ conn_cancel_linger(prp->conn);
+ /* ... and disconnect, if currently connected */
+ conn_disconnect(prp->conn);
+ /* get the new pause time, if any */
+ pausetime = agmt_get_pausetime(prp->agmt);
+ /* get the new busy wait time, if any */
+ busywaittime = agmt_get_busywaittime(prp->agmt);
+ if (pausetime || busywaittime){
+ /* helper function to make sure they are set correctly */
+ set_pause_and_busy_time(&pausetime, &busywaittime);
+ }
+ break;
+
+ case STATE_WAIT_WINDOW_OPEN:
+ /*
+ * We're waiting for a schedule window to open. If one did,
+ * or we receive a "replicate now" event, then start a protocol
+ * session immediately. If the replication schedule changed, go
+ * back to start. Otherwise, go back to sleep.
+ */
+ dev_debug("repl5_inc_run(STATE_WAIT_WINDOW_OPEN)");
+ if (PROTOCOL_IS_SHUTDOWN(prp)){
+ done = 1;
+ break;
+ } else if (event_occurred(prp, EVENT_WINDOW_OPENED)){
+ next_state = STATE_READY_TO_ACQUIRE;
+ } else if (event_occurred(prp, EVENT_REPLICATE_NOW)){
+ next_state = STATE_READY_TO_ACQUIRE;
+ } else if (event_occurred(prp, EVENT_AGMT_CHANGED)){
+ next_state = STATE_START;
+ conn_set_agmt_changed(prp->conn);
+ } else if (event_occurred(prp, EVENT_TRIGGERING_CRITERIA_MET)){ /* change available */
+ /* just ignore it and go to sleep */
+ protocol_sleep(prp, PR_INTERVAL_NO_TIMEOUT);
+ } else if ((e1 = event_occurred(prp, EVENT_WINDOW_CLOSED)) ||
+ event_occurred(prp, EVENT_BACKOFF_EXPIRED)){
+ /* this events - should not occur - log a warning and go to sleep */
+ slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name,
+ "%s: Incremental protocol: "
+ "event %s should not occur in state %s; going to sleep\n",
+ agmt_get_long_name(prp->agmt), e1 ? event2name(EVENT_WINDOW_CLOSED) :
+ event2name(EVENT_BACKOFF_EXPIRED), state2name(current_state));
+ protocol_sleep(prp, PR_INTERVAL_NO_TIMEOUT);
+ } else {
+ /* wait until window opens or an event occurs */
+ slapi_log_error(SLAPI_LOG_REPL, repl_plugin_name,
+ "%s: Incremental protocol: "
+ "waiting for update window to open\n", agmt_get_long_name(prp->agmt));
+ protocol_sleep(prp, PR_INTERVAL_NO_TIMEOUT);
+ }
+ break;
- /*
- * Our initial state. See if we're in a schedule window. If
- * so, then we're ready to acquire the replica and see if it
- * needs any updates from us. If not, then wait for the window
- * to open.
- */
- if (agmt_schedule_in_window_now(prp->agmt))
- {
- next_state = STATE_READY_TO_ACQUIRE;
- }
- else
- {
- next_state = STATE_WAIT_WINDOW_OPEN;
- }
-
- /* we can get here from other states because some events happened and were
- not cleared. For instance when we wake up in STATE_WAIT_CHANGES state.
- Since this is a fresh start state, we should clear all events */
- /* ONREPL - this does not feel right - we should take another look
- at this state machine */
- reset_events (prp);
-
- /* Cancel any linger timer that might be in effect... */
- conn_cancel_linger(prp->conn);
- /* ... and disconnect, if currently connected */
- conn_disconnect(prp->conn);
- /* get the new pause time, if any */
- pausetime = agmt_get_pausetime(prp->agmt);
- /* get the new busy wait time, if any */
- busywaittime = agmt_get_busywaittime(prp->agmt);
- if (pausetime || busywaittime)
- {
- /* helper function to make sure they are set correctly */
- set_pause_and_busy_time(&pausetime, &busywaittime);
- }
- break;
- case STATE_WAIT_WINDOW_OPEN:
- /*
- * We're waiting for a schedule window to open. If one did,
- * or we receive a "replicate now" event, then start a protocol
- * session immediately. If the replication schedule changed, go
- * back to start. Otherwise, go back to sleep.
- */
- dev_debug("repl5_inc_run(STATE_WAIT_WINDOW_OPEN)");
- if (PROTOCOL_IS_SHUTDOWN(prp))
- {
- done = 1;
- break;
- }
- else if (event_occurred(prp, EVENT_WINDOW_OPENED))
- {
- next_state = STATE_READY_TO_ACQUIRE;
- }
- else if (event_occurred(prp, EVENT_REPLICATE_NOW))
- {
- next_state = STATE_READY_TO_ACQUIRE;
- }
- else if (event_occurred(prp, EVENT_AGMT_CHANGED))
- {
- next_state = STATE_START;
- conn_set_agmt_changed(prp->conn);
- }
- else if (event_occurred(prp, EVENT_TRIGGERING_CRITERIA_MET)) /* change available */
- {
- /* just ignore it and go to sleep */
- protocol_sleep(prp, PR_INTERVAL_NO_TIMEOUT);
- }
- else if ((e1 = event_occurred(prp, EVENT_WINDOW_CLOSED)) ||
- event_occurred(prp, EVENT_BACKOFF_EXPIRED))
- {
- /* this events - should not occur - log a warning and go to sleep */
- slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name,
- "%s: Incremental protocol: "
- "event %s should not occur in state %s; going to sleep\n",
- agmt_get_long_name(prp->agmt),
- e1 ? event2name(EVENT_WINDOW_CLOSED) : event2name(EVENT_BACKOFF_EXPIRED),
- state2name(current_state));
- protocol_sleep(prp, PR_INTERVAL_NO_TIMEOUT);
- }
- else
- {
- /* wait until window opens or an event occurs */
- slapi_log_error(SLAPI_LOG_REPL, repl_plugin_name,
- "%s: Incremental protocol: "
- "waiting for update window to open\n", agmt_get_long_name(prp->agmt));
- protocol_sleep(prp, PR_INTERVAL_NO_TIMEOUT);
- }
- break;
case STATE_WAIT_CHANGES:
- /*
- * We're in a replication window, but we're waiting for more
- * changes to accumulate before we actually hook up and send
- * them.
- */
- dev_debug("repl5_inc_run(STATE_WAIT_CHANGES)");
- if (PROTOCOL_IS_SHUTDOWN(prp))
- {
- dev_debug("repl5_inc_run(STATE_WAIT_CHANGES): PROTOCOL_IS_SHUTING_DOWN -> end repl5_inc_run\n");
- done = 1;
- break;
- }
- else if (event_occurred(prp, EVENT_REPLICATE_NOW))
- {
- dev_debug("repl5_inc_run(STATE_WAIT_CHANGES): EVENT_REPLICATE_NOW received -> STATE_READY_TO_ACQUIRE\n");
- next_state = STATE_READY_TO_ACQUIRE;
- wait_change_timer_set = 0;
- }
- else if (event_occurred(prp, EVENT_AGMT_CHANGED))
- {
- dev_debug("repl5_inc_run(STATE_WAIT_CHANGES): EVENT_AGMT_CHANGED received -> STATE_START\n");
- next_state = STATE_START;
- conn_set_agmt_changed(prp->conn);
- wait_change_timer_set = 0;
- }
- else if (event_occurred(prp, EVENT_WINDOW_CLOSED))
- {
- dev_debug("repl5_inc_run(STATE_WAIT_CHANGES): EVENT_WINDOW_CLOSED received -> STATE_WAIT_WINDOW_OPEN\n");
- next_state = STATE_WAIT_WINDOW_OPEN;
- wait_change_timer_set = 0;
- }
- else if (event_occurred(prp, EVENT_TRIGGERING_CRITERIA_MET))
- {
- dev_debug("repl5_inc_run(STATE_WAIT_CHANGES): EVENT_TRIGGERING_CRITERIA_MET received -> STATE_READY_TO_ACQUIRE\n");
- next_state = STATE_READY_TO_ACQUIRE;
- wait_change_timer_set = 0;
- }
- else if ((e1 = event_occurred(prp, EVENT_WINDOW_OPENED)) ||
- event_occurred(prp, EVENT_BACKOFF_EXPIRED))
- {
- /* this events - should not occur - log a warning and clear the event */
- slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name, "%s: Incremental protocol: "
- "event %s should not occur in state %s\n",
- agmt_get_long_name(prp->agmt),
- e1 ? event2name(EVENT_WINDOW_OPENED) : event2name(EVENT_BACKOFF_EXPIRED),
- state2name(current_state));
- wait_change_timer_set = 0;
- }
- else
- {
- if (wait_change_timer_set)
- {
- /* We are here because our timer expired */
- dev_debug("repl5_inc_run(STATE_WAIT_CHANGES): wait_change_timer_set expired -> STATE_START\n");
- next_state = STATE_START;
- wait_change_timer_set = 0;
- }
- else
- {
- /* We are here because the last replication session
- * finished or aborted.
- */
- wait_change_timer_set = 1;
- protocol_sleep(prp, MAX_WAIT_BETWEEN_SESSIONS);
- }
- }
- break;
+ /*
+ * We're in a replication window, but we're waiting for more
+ * changes to accumulate before we actually hook up and send
+ * them.
+ */
+ dev_debug("repl5_inc_run(STATE_WAIT_CHANGES)");
+ if (PROTOCOL_IS_SHUTDOWN(prp)){
+ dev_debug("repl5_inc_run(STATE_WAIT_CHANGES): PROTOCOL_IS_SHUTING_DOWN -> end repl5_inc_run\n");
+ done = 1;
+ break;
+ } else if (event_occurred(prp, EVENT_REPLICATE_NOW)){
+ dev_debug("repl5_inc_run(STATE_WAIT_CHANGES): EVENT_REPLICATE_NOW received -> STATE_READY_TO_ACQUIRE\n");
+ next_state = STATE_READY_TO_ACQUIRE;
+ wait_change_timer_set = 0;
+ } else if (event_occurred(prp, EVENT_AGMT_CHANGED)){
+ dev_debug("repl5_inc_run(STATE_WAIT_CHANGES): EVENT_AGMT_CHANGED received -> STATE_START\n");
+ next_state = STATE_START;
+ conn_set_agmt_changed(prp->conn);
+ wait_change_timer_set = 0;
+ } else if (event_occurred(prp, EVENT_WINDOW_CLOSED)){
+ dev_debug("repl5_inc_run(STATE_WAIT_CHANGES): EVENT_WINDOW_CLOSED received -> STATE_WAIT_WINDOW_OPEN\n");
+ next_state = STATE_WAIT_WINDOW_OPEN;
+ wait_change_timer_set = 0;
+ } else if (event_occurred(prp, EVENT_TRIGGERING_CRITERIA_MET)){
+ dev_debug("repl5_inc_run(STATE_WAIT_CHANGES): EVENT_TRIGGERING_CRITERIA_MET received -> STATE_READY_TO_ACQUIRE\n");
+ next_state = STATE_READY_TO_ACQUIRE;
+ wait_change_timer_set = 0;
+ } else if ((e1 = event_occurred(prp, EVENT_WINDOW_OPENED)) || event_occurred(prp, EVENT_BACKOFF_EXPIRED)){
+ /* this events - should not occur - log a warning and clear the event */
+ slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name, "%s: Incremental protocol: "
+ "event %s should not occur in state %s\n",agmt_get_long_name(prp->agmt),
+ e1 ? event2name(EVENT_WINDOW_OPENED) : event2name(EVENT_BACKOFF_EXPIRED),
+ state2name(current_state));
+ wait_change_timer_set = 0;
+ } else {
+ if (wait_change_timer_set){
+ /* We are here because our timer expired */
+ dev_debug("repl5_inc_run(STATE_WAIT_CHANGES): wait_change_timer_set expired -> STATE_START\n");
+ next_state = STATE_START;
+ wait_change_timer_set = 0;
+ } else {
+ /*
+ * We are here because the last replication session
+ * finished or aborted.
+ */
+ wait_change_timer_set = 1;
+ protocol_sleep(prp, MAX_WAIT_BETWEEN_SESSIONS);
+ }
+ }
+ break;
+
case STATE_READY_TO_ACQUIRE:
-
- dev_debug("repl5_inc_run(STATE_READY_TO_ACQUIRE)");
- if (PROTOCOL_IS_SHUTDOWN(prp))
- {
- done = 1;
- break;
- }
-
- /* ONREPL - at this state we unconditionally acquire the replica
- ignoring all events. Not sure if this is good */
- object_acquire(prp->replica_object);
-
- rc = acquire_replica(prp, REPL_NSDS50_INCREMENTAL_PROTOCOL_OID, &ruv);
- use_busy_backoff_timer = PR_FALSE; /* default */
- if (rc == ACQUIRE_SUCCESS)
- {
- next_state = STATE_SENDING_UPDATES;
- }
- else if (rc == ACQUIRE_REPLICA_BUSY)
- {
- next_state = STATE_BACKOFF_START;
- use_busy_backoff_timer = PR_TRUE;
- }
- else if (rc == ACQUIRE_CONSUMER_WAS_UPTODATE)
- {
- next_state = STATE_WAIT_CHANGES;
- }
- else if (rc == ACQUIRE_TRANSIENT_ERROR)
- {
- next_state = STATE_BACKOFF_START;
- }
- else if (rc == ACQUIRE_FATAL_ERROR)
- {
- next_state = STATE_STOP_FATAL_ERROR;
- }
- if (rc != ACQUIRE_SUCCESS)
- {
- int optype, ldaprc;
- conn_get_error(prp->conn, &optype, &ldaprc);
- agmt_set_last_update_status(prp->agmt, ldaprc,
- prp->last_acquire_response_code, NULL);
- }
-
- object_release(prp->replica_object);
- break;
+ dev_debug("repl5_inc_run(STATE_READY_TO_ACQUIRE)");
+ if (PROTOCOL_IS_SHUTDOWN(prp)){
+ done = 1;
+ break;
+ }
+
+ /* ONREPL - at this state we unconditionally acquire the replica
+ ignoring all events. Not sure if this is good */
+ object_acquire(prp->replica_object);
+ rc = acquire_replica(prp, REPL_NSDS50_INCREMENTAL_PROTOCOL_OID, &ruv);
+ use_busy_backoff_timer = PR_FALSE; /* default */
+ if (rc == ACQUIRE_SUCCESS){
+ next_state = STATE_SENDING_UPDATES;
+ } else if (rc == ACQUIRE_REPLICA_BUSY){
+ next_state = STATE_BACKOFF_START;
+ use_busy_backoff_timer = PR_TRUE;
+ } else if (rc == ACQUIRE_CONSUMER_WAS_UPTODATE){
+ next_state = STATE_WAIT_CHANGES;
+ } else if (rc == ACQUIRE_TRANSIENT_ERROR){
+ next_state = STATE_BACKOFF_START;
+ } else if (rc == ACQUIRE_FATAL_ERROR){
+ next_state = STATE_STOP_FATAL_ERROR;
+ }
+
+ if (rc != ACQUIRE_SUCCESS){
+ int optype, ldaprc;
+ conn_get_error(prp->conn, &optype, &ldaprc);
+ agmt_set_last_update_status(prp->agmt, ldaprc,
+ prp->last_acquire_response_code, NULL);
+ }
+
+ object_release(prp->replica_object);
+ break;
+
case STATE_BACKOFF_START:
- dev_debug("repl5_inc_run(STATE_BACKOFF_START)");
- if (PROTOCOL_IS_SHUTDOWN(prp))
- {
- done = 1;
- break;
- }
- if (event_occurred(prp, EVENT_REPLICATE_NOW))
- {
- next_state = STATE_READY_TO_ACQUIRE;
- }
- else if (event_occurred(prp, EVENT_AGMT_CHANGED))
- {
- next_state = STATE_START;
- conn_set_agmt_changed(prp->conn);
- }
- else if (event_occurred (prp, EVENT_WINDOW_CLOSED))
- {
- next_state = STATE_WAIT_WINDOW_OPEN;
- }
- else if (event_occurred (prp, EVENT_TRIGGERING_CRITERIA_MET))
- {
- /* consume and ignore */
- }
- else if ((e1 = event_occurred (prp, EVENT_WINDOW_OPENED)) ||
- event_occurred (prp, EVENT_BACKOFF_EXPIRED))
- {
- /* This should never happen */
- /* this events - should not occur - log a warning and go to sleep */
- slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name,
- "%s: Incremental protocol: event %s should not occur in state %s\n",
- agmt_get_long_name(prp->agmt),
- e1 ? event2name(EVENT_WINDOW_OPENED) : event2name(EVENT_BACKOFF_EXPIRED),
- state2name(current_state));
- }
- else
- {
- /* Set up the backoff timer to wake us up at the appropriate time */
- if (use_busy_backoff_timer)
- {
- /* we received a busy signal from the consumer, wait for a while */
- if (!busywaittime)
- {
- busywaittime = PROTOCOL_BUSY_BACKOFF_MINIMUM;
- }
- prp_priv->backoff = backoff_new(BACKOFF_FIXED, busywaittime,
- busywaittime);
- }
- else
- {
- prp_priv->backoff = backoff_new(BACKOFF_EXPONENTIAL, PROTOCOL_BACKOFF_MINIMUM,
- PROTOCOL_BACKOFF_MAXIMUM);
- }
- next_state = STATE_BACKOFF;
- backoff_reset(prp_priv->backoff, repl5_inc_backoff_expired, (void *)prp);
- protocol_sleep(prp, PR_INTERVAL_NO_TIMEOUT);
- use_busy_backoff_timer = PR_FALSE;
- }
- break;
+ dev_debug("repl5_inc_run(STATE_BACKOFF_START)");
+ if (PROTOCOL_IS_SHUTDOWN(prp)){
+ done = 1;
+ break;
+ }
+ if (event_occurred(prp, EVENT_REPLICATE_NOW)){
+ next_state = STATE_READY_TO_ACQUIRE;
+ } else if (event_occurred(prp, EVENT_AGMT_CHANGED)){
+ next_state = STATE_START;
+ conn_set_agmt_changed(prp->conn);
+ } else if (event_occurred (prp, EVENT_WINDOW_CLOSED)){
+ next_state = STATE_WAIT_WINDOW_OPEN;
+ } else if (event_occurred (prp, EVENT_TRIGGERING_CRITERIA_MET)){
+ /* consume and ignore */
+ } else if ((e1 = event_occurred (prp, EVENT_WINDOW_OPENED)) ||
+ event_occurred (prp, EVENT_BACKOFF_EXPIRED)){
+ /* This should never happen */
+ /* this events - should not occur - log a warning and go to sleep */
+ slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name,
+ "%s: Incremental protocol: event %s should not occur in state %s\n",
+ agmt_get_long_name(prp->agmt),
+ e1 ? event2name(EVENT_WINDOW_OPENED) : event2name(EVENT_BACKOFF_EXPIRED),
+ state2name(current_state));
+ } else {
+ /* Set up the backoff timer to wake us up at the appropriate time */
+ if (use_busy_backoff_timer){
+ /* we received a busy signal from the consumer, wait for a while */
+ if (!busywaittime){
+ busywaittime = PROTOCOL_BUSY_BACKOFF_MINIMUM;
+ }
+ prp_priv->backoff = backoff_new(BACKOFF_FIXED, busywaittime, busywaittime);
+ } else {
+ prp_priv->backoff = backoff_new(BACKOFF_EXPONENTIAL, PROTOCOL_BACKOFF_MINIMUM,
+ PROTOCOL_BACKOFF_MAXIMUM);
+ }
+ next_state = STATE_BACKOFF;
+ backoff_reset(prp_priv->backoff, repl5_inc_backoff_expired, (void *)prp);
+ protocol_sleep(prp, PR_INTERVAL_NO_TIMEOUT);
+ use_busy_backoff_timer = PR_FALSE;
+ }
+ break;
+
case STATE_BACKOFF:
- /*
- * We're in a backoff state.
- */
- dev_debug("repl5_inc_run(STATE_BACKOFF)");
- if (PROTOCOL_IS_SHUTDOWN(prp))
- {
- if (prp_priv->backoff)
- backoff_delete(&prp_priv->backoff);
- done = 1;
- break;
- }
- else if (event_occurred(prp, EVENT_REPLICATE_NOW))
- {
- next_state = STATE_READY_TO_ACQUIRE;
- }
- else if (event_occurred(prp, EVENT_AGMT_CHANGED))
- {
- next_state = STATE_START;
-
- conn_set_agmt_changed(prp->conn);
- /* Destroy the backoff timer, since we won't need it anymore */
- if (prp_priv->backoff)
- backoff_delete(&prp_priv->backoff);
- }
- else if (event_occurred(prp, EVENT_WINDOW_CLOSED))
- {
- next_state = STATE_WAIT_WINDOW_OPEN;
- /* Destroy the backoff timer, since we won't need it anymore */
- if (prp_priv->backoff)
- backoff_delete(&prp_priv->backoff);
- }
- else if (event_occurred(prp, EVENT_BACKOFF_EXPIRED))
- {
- rc = acquire_replica(prp, REPL_NSDS50_INCREMENTAL_PROTOCOL_OID, &ruv);
- use_busy_backoff_timer = PR_FALSE;
- if (rc == ACQUIRE_SUCCESS)
- {
- next_state = STATE_SENDING_UPDATES;
- }
- else if (rc == ACQUIRE_REPLICA_BUSY)
- {
- next_state = STATE_BACKOFF;
- use_busy_backoff_timer = PR_TRUE;
- }
- else if (rc == ACQUIRE_CONSUMER_WAS_UPTODATE)
- {
- next_state = STATE_WAIT_CHANGES;
- }
- else if (rc == ACQUIRE_TRANSIENT_ERROR)
- {
- next_state = STATE_BACKOFF;
- }
- else if (rc == ACQUIRE_FATAL_ERROR)
- {
- next_state = STATE_STOP_FATAL_ERROR;
- }
- if (rc != ACQUIRE_SUCCESS)
- {
- int optype, ldaprc;
- conn_get_error(prp->conn, &optype, &ldaprc);
- agmt_set_last_update_status(prp->agmt, ldaprc,
- prp->last_acquire_response_code, NULL);
- }
- /*
- * We either need to step the backoff timer, or
- * destroy it if we don't need it anymore.
- */
- if (STATE_BACKOFF == next_state)
- {
- time_t next_fire_time;
- time_t now;
- /* Step the backoff timer */
- time(&now);
- next_fire_time = backoff_step(prp_priv->backoff);
- /* And go back to sleep */
- slapi_log_error(SLAPI_LOG_REPL, repl_plugin_name,
- "%s: Replication session backing off for %ld seconds\n",
- agmt_get_long_name(prp->agmt),
- next_fire_time - now);
-
- protocol_sleep(prp, PR_INTERVAL_NO_TIMEOUT);
- }
- else
- {
- /* Destroy the backoff timer, since we won't need it anymore */
- backoff_delete(&prp_priv->backoff);
- }
- use_busy_backoff_timer = PR_FALSE;
- }
- else if (event_occurred(prp, EVENT_TRIGGERING_CRITERIA_MET))
- {
- /* changes are available */
- if ( prp_priv->backoff == NULL || backoff_expired (prp_priv->backoff, 60) )
- {
- /*
- * Have seen cases that the agmt stuck here forever since
- * somehow the backoff timer was not in event queue anymore.
- * If the backoff timer has expired more than 60 seconds,
- * destroy it.
- */
- if ( prp_priv->backoff )
- backoff_delete(&prp_priv->backoff);
- next_state = STATE_READY_TO_ACQUIRE;
- }
- else
- {
- /* ignore changes and go to sleep */
- protocol_sleep(prp, PR_INTERVAL_NO_TIMEOUT);
- }
- }
- else if (event_occurred(prp, EVENT_WINDOW_OPENED))
- {
- /* this should never happen - log an error and go to sleep */
- slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name, "%s: Incremental protocol: "
- "event %s should not occur in state %s; going to sleep\n",
- agmt_get_long_name(prp->agmt),
- event2name(EVENT_WINDOW_OPENED), state2name(current_state));
- protocol_sleep(prp, PR_INTERVAL_NO_TIMEOUT);
- }
- break;
+ /*
+ * We're in a backoff state.
+ */
+ dev_debug("repl5_inc_run(STATE_BACKOFF)");
+ if (PROTOCOL_IS_SHUTDOWN(prp)){
+ if (prp_priv->backoff)
+ backoff_delete(&prp_priv->backoff);
+ done = 1;
+ break;
+ } else if (event_occurred(prp, EVENT_REPLICATE_NOW)){
+ next_state = STATE_READY_TO_ACQUIRE;
+ } else if (event_occurred(prp, EVENT_AGMT_CHANGED)){
+ next_state = STATE_START;
+ conn_set_agmt_changed(prp->conn);
+ /* Destroy the backoff timer, since we won't need it anymore */
+ if (prp_priv->backoff)
+ backoff_delete(&prp_priv->backoff);
+ } else if (event_occurred(prp, EVENT_WINDOW_CLOSED)){
+ next_state = STATE_WAIT_WINDOW_OPEN;
+ /* Destroy the backoff timer, since we won't need it anymore */
+ if (prp_priv->backoff)
+ backoff_delete(&prp_priv->backoff);
+ } else if (event_occurred(prp, EVENT_BACKOFF_EXPIRED)){
+ rc = acquire_replica(prp, REPL_NSDS50_INCREMENTAL_PROTOCOL_OID, &ruv);
+ use_busy_backoff_timer = PR_FALSE;
+ if (rc == ACQUIRE_SUCCESS){
+ next_state = STATE_SENDING_UPDATES;
+ } else if (rc == ACQUIRE_REPLICA_BUSY){
+ next_state = STATE_BACKOFF;
+ use_busy_backoff_timer = PR_TRUE;
+ } else if (rc == ACQUIRE_CONSUMER_WAS_UPTODATE){
+ next_state = STATE_WAIT_CHANGES;
+ } else if (rc == ACQUIRE_TRANSIENT_ERROR){
+ next_state = STATE_BACKOFF;
+ } else if (rc == ACQUIRE_FATAL_ERROR){
+ next_state = STATE_STOP_FATAL_ERROR;
+ }
+ if (rc != ACQUIRE_SUCCESS){
+ conn_get_error(prp->conn, &optype, &ldaprc);
+ agmt_set_last_update_status(prp->agmt, ldaprc, prp->last_acquire_response_code, NULL);
+ }
+ /*
+ * We either need to step the backoff timer, or
+ * destroy it if we don't need it anymore
+ */
+ if (STATE_BACKOFF == next_state){
+ /* Step the backoff timer */
+ time(&now);
+ next_fire_time = backoff_step(prp_priv->backoff);
+ /* And go back to sleep */
+ slapi_log_error(SLAPI_LOG_REPL, repl_plugin_name,
+ "%s: Replication session backing off for %ld seconds\n",
+ agmt_get_long_name(prp->agmt),next_fire_time - now);
+ protocol_sleep(prp, PR_INTERVAL_NO_TIMEOUT);
+ } else {
+ /* Destroy the backoff timer, since we won't need it anymore */
+ backoff_delete(&prp_priv->backoff);
+ }
+ use_busy_backoff_timer = PR_FALSE;
+ } else if (event_occurred(prp, EVENT_TRIGGERING_CRITERIA_MET)){
+ /* changes are available */
+ if ( prp_priv->backoff == NULL || backoff_expired (prp_priv->backoff, 60)){
+ /*
+ * Have seen cases that the agmt stuck here forever since
+ * somehow the backoff timer was not in event queue anymore.
+ * If the backoff timer has expired more than 60 seconds, destroy it.
+ */
+ if ( prp_priv->backoff )
+ backoff_delete(&prp_priv->backoff);
+ next_state = STATE_READY_TO_ACQUIRE;
+ } else {
+ /* ignore changes and go to sleep */
+ protocol_sleep(prp, PR_INTERVAL_NO_TIMEOUT);
+ }
+ } else if (event_occurred(prp, EVENT_WINDOW_OPENED)){
+ /* this should never happen - log an error and go to sleep */
+ slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name, "%s: Incremental protocol: "
+ "event %s should not occur in state %s; going to sleep\n",
+ agmt_get_long_name(prp->agmt), event2name(EVENT_WINDOW_OPENED),
+ state2name(current_state));
+ protocol_sleep(prp, PR_INTERVAL_NO_TIMEOUT);
+ }
+ break;
+
case STATE_SENDING_UPDATES:
- dev_debug("repl5_inc_run(STATE_SENDING_UPDATES)");
- agmt_set_update_in_progress(prp->agmt, PR_TRUE);
- num_changes_sent = 0;
- last_start_time = current_time();
- agmt_set_last_update_start(prp->agmt, last_start_time);
- /*
- * We've acquired the replica, and are ready to send any
- * needed updates.
- */
- if (PROTOCOL_IS_SHUTDOWN(prp))
- {
- release_replica (prp);
- done = 1;
- agmt_set_update_in_progress(prp->agmt, PR_FALSE);
- agmt_set_last_update_end(prp->agmt, current_time());
- /* MAB: I don't find the following status correct. How do we know it has
- been stopped by an admin and not by a total update request, for instance?
- In any case, how is this protocol shutdown situation different from all the
- other ones that are present in this state machine? */
- /* richm: We at least need to let monitors know that the protocol has been
- shutdown - maybe they can figure out why */
- agmt_set_last_update_status(prp->agmt, 0, 0, "Protocol stopped");
- break;
- }
-
- agmt_set_last_update_status(prp->agmt, 0, 0, "Incremental update started");
-
- /* ONREPL - in this state we send changes no matter what other events occur.
- This is because we can get because of the REPLICATE_NOW event which
- has high priority. Is this ok? */
- /* First, push new schema to the consumer if needed */
- /* ONREPL - should we push schema after we examine the RUV? */
- /*
- * GGOOREPL - I don't see why we should wait until we've
- * examined the RUV. The schema entry has its own CSN that is
- * used to decide if the remote schema needs to be updated.
- */
- cons_schema_csn = agmt_get_consumer_schema_csn ( prp->agmt );
- rc = conn_push_schema(prp->conn, &cons_schema_csn);
- if ( cons_schema_csn != agmt_get_consumer_schema_csn ( prp->agmt ))
- {
- agmt_set_consumer_schema_csn ( prp->agmt, cons_schema_csn );
- }
- if (CONN_SCHEMA_UPDATED != rc && CONN_SCHEMA_NO_UPDATE_NEEDED != rc)
- {
- slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name,
- "%s: Warning: unable to replicate schema: rc=%d\n",
- agmt_get_long_name(prp->agmt), rc);
- /* But keep going */
- }
- dev_debug("repl5_inc_run(STATE_SENDING_UPDATES) -> examine_update_vector");
- rc = examine_update_vector(prp, ruv);
- /*
- * Decide what to do next - proceed with incremental,
- * backoff, or total update
- */
- switch (rc)
- {
- case EXAMINE_RUV_PARAM_ERROR:
- /* this is really bad - we have NULL prp! */
- next_state = STATE_STOP_FATAL_ERROR;
- break;
- case EXAMINE_RUV_PRISTINE_REPLICA:
- slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name,
- "%s: Replica has no update vector. It has never been initialized.\n",
- agmt_get_long_name(prp->agmt));
- next_state = STATE_BACKOFF_START;
- break;
- case EXAMINE_RUV_GENERATION_MISMATCH:
- slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name,
- "%s: Replica has a different generation ID than the local data.\n",
- agmt_get_long_name(prp->agmt));
- next_state = STATE_BACKOFF_START;
- break;
- case EXAMINE_RUV_REPLICA_TOO_OLD:
- slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name,
- "%s: Replica update vector is too out of date to bring "
- "into sync using the incremental protocol. The replica "
- "must be reinitialized.\n", agmt_get_long_name(prp->agmt));
- next_state = STATE_BACKOFF_START;
- break;
- case EXAMINE_RUV_OK:
- /* update our csn generator state with the consumer's ruv data */
- dev_debug("repl5_inc_run(STATE_SENDING_UPDATES) -> examine_update_vector OK");
- object_acquire(prp->replica_object);
- replica = object_get_data(prp->replica_object);
- rc = replica_update_csngen_state (replica, ruv);
- object_release (prp->replica_object);
- replica = NULL;
- if (rc == CSN_LIMIT_EXCEEDED) /* too much skew */
- {
- slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name,
- "%s: Incremental protocol: fatal error - too much time skew between replicas!\n",
- agmt_get_long_name(prp->agmt));
- next_state = STATE_STOP_FATAL_ERROR;
- }
- else if (rc != 0) /* internal error */
- {
- slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name,
- "%s: Incremental protocol: fatal internal error updating the CSN generator!\n",
- agmt_get_long_name(prp->agmt));
- next_state = STATE_STOP_FATAL_ERROR;
- }
- else
- {
- rc = send_updates(prp, ruv, &num_changes_sent);
-
- if (rc == UPDATE_NO_MORE_UPDATES)
- {
- dev_debug("repl5_inc_run(STATE_SENDING_UPDATES) -> send_updates = UPDATE_NO_MORE_UPDATES -> STATE_WAIT_CHANGES");
- agmt_set_last_update_status(prp->agmt, 0, 0, "Incremental update succeeded");
- next_state = STATE_WAIT_CHANGES;
- }
- else if (rc == UPDATE_YIELD)
- {
- dev_debug("repl5_inc_run(STATE_SENDING_UPDATES) -> send_updates = UPDATE_YIELD -> STATE_BACKOFF_START");
- agmt_set_last_update_status(prp->agmt, 0, 0, "Incremental update succeeded and yielded");
- next_state = STATE_BACKOFF_START;
- }
- else if (rc == UPDATE_TRANSIENT_ERROR)
- {
- dev_debug("repl5_inc_run(STATE_SENDING_UPDATES) -> send_updates = UPDATE_TRANSIENT_ERROR -> STATE_BACKOFF_START");
- next_state = STATE_BACKOFF_START;
- }
- else if (rc == UPDATE_FATAL_ERROR)
- {
- dev_debug("repl5_inc_run(STATE_SENDING_UPDATES) -> send_updates = UPDATE_FATAL_ERROR -> STATE_STOP_FATAL_ERROR");
- next_state = STATE_STOP_FATAL_ERROR;
- }
- else if (rc == UPDATE_SCHEDULE_WINDOW_CLOSED)
- {
- dev_debug("repl5_inc_run(STATE_SENDING_UPDATES) -> send_updates = UPDATE_SCHEDULE_WINDOW_CLOSED -> STATE_WAIT_WINDOW_OPEN");
- /* ONREPL - I don't think we should check this. We might be
- here because of replicate_now event - so we don't care
- about the schedule */
- next_state = STATE_WAIT_WINDOW_OPEN;
- /* ONREPL - do we need to release the replica here ? */
- conn_disconnect (prp->conn);
- }
- else if (rc == UPDATE_CONNECTION_LOST)
- {
- dev_debug("repl5_inc_run(STATE_SENDING_UPDATES) -> send_updates = UPDATE_CONNECTION_LOST -> STATE_BACKOFF_START");
- next_state = STATE_BACKOFF_START;
- }
- else if (rc == UPDATE_TIMEOUT)
- {
- dev_debug("repl5_inc_run(STATE_SENDING_UPDATES) -> send_updates = UPDATE_TIMEOUT -> STATE_BACKOFF_START");
- next_state = STATE_BACKOFF_START;
- }
- }
- last_start_time = 0UL;
- break;
- }
- if (NULL != ruv)
- {
- ruv_destroy(&ruv); ruv = NULL;
- }
- agmt_set_last_update_end(prp->agmt, current_time());
- agmt_set_update_in_progress(prp->agmt, PR_FALSE);
- /* If timed out, close the connection after released the replica */
- release_replica(prp);
- if (rc == UPDATE_TIMEOUT) {
- conn_disconnect(prp->conn);
- }
+ dev_debug("repl5_inc_run(STATE_SENDING_UPDATES)");
+ agmt_set_update_in_progress(prp->agmt, PR_TRUE);
+ num_changes_sent = 0;
+ last_start_time = current_time();
+ agmt_set_last_update_start(prp->agmt, last_start_time);
+ /*
+ * We've acquired the replica, and are ready to send any needed updates.
+ */
+ if (PROTOCOL_IS_SHUTDOWN(prp)){
+ release_replica (prp);
+ done = 1;
+ agmt_set_update_in_progress(prp->agmt, PR_FALSE);
+ agmt_set_last_update_end(prp->agmt, current_time());
+ /* MAB: I don't find the following status correct. How do we know it has
+ * been stopped by an admin and not by a total update request, for instance?
+ * In any case, how is this protocol shutdown situation different from all the
+ * other ones that are present in this state machine? */
+ /* richm: We at least need to let monitors know that the protocol has been
+ * shutdown - maybe they can figure out why */
+ agmt_set_last_update_status(prp->agmt, 0, 0, "Protocol stopped");
+ break;
+ }
+
+ agmt_set_last_update_status(prp->agmt, 0, 0, "Incremental update started");
+ /* ONREPL - in this state we send changes no matter what other events occur.
+ * This is because we can get because of the REPLICATE_NOW event which
+ * has high priority. Is this ok? */
+ /* First, push new schema to the consumer if needed */
+ /* ONREPL - should we push schema after we examine the RUV? */
+ /*
+ * GGOOREPL - I don't see why we should wait until we've
+ * examined the RUV. The schema entry has its own CSN that is
+ * used to decide if the remote schema needs to be updated.
+ */
+ cons_schema_csn = agmt_get_consumer_schema_csn ( prp->agmt );
+ rc = conn_push_schema(prp->conn, &cons_schema_csn);
+ if ( cons_schema_csn != agmt_get_consumer_schema_csn ( prp->agmt )){
+ agmt_set_consumer_schema_csn ( prp->agmt, cons_schema_csn );
+ }
+ if (CONN_SCHEMA_UPDATED != rc && CONN_SCHEMA_NO_UPDATE_NEEDED != rc){
+ slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name,
+ "%s: Warning: unable to replicate schema: rc=%d\n", agmt_get_long_name(prp->agmt), rc);
+ /* But keep going */
+ }
+ dev_debug("repl5_inc_run(STATE_SENDING_UPDATES) -> examine_update_vector");
+ rc = examine_update_vector(prp, ruv);
+ /*
+ * Decide what to do next - proceed with incremental, backoff, or total update
+ */
+ switch (rc){
+ case EXAMINE_RUV_PARAM_ERROR:
+ /* this is really bad - we have NULL prp! */
+ next_state = STATE_STOP_FATAL_ERROR;
+ break;
+ case EXAMINE_RUV_PRISTINE_REPLICA:
+ slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name,
+ "%s: Replica has no update vector. It has never been initialized.\n",
+ agmt_get_long_name(prp->agmt));
+ next_state = STATE_BACKOFF_START;
+ break;
+ case EXAMINE_RUV_GENERATION_MISMATCH:
+ slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name,
+ "%s: Replica has a different generation ID than the local data.\n",
+ agmt_get_long_name(prp->agmt));
+ next_state = STATE_BACKOFF_START;
+ break;
+ case EXAMINE_RUV_REPLICA_TOO_OLD:
+ slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name,
+ "%s: Replica update vector is too out of date to bring "
+ "into sync using the incremental protocol. The replica "
+ "must be reinitialized.\n", agmt_get_long_name(prp->agmt));
+ next_state = STATE_BACKOFF_START;
+ break;
+ case EXAMINE_RUV_OK:
+ /* update our csn generator state with the consumer's ruv data */
+ dev_debug("repl5_inc_run(STATE_SENDING_UPDATES) -> examine_update_vector OK");
+ object_acquire(prp->replica_object);
+ replica = object_get_data(prp->replica_object);
+ rc = replica_update_csngen_state (replica, ruv);
+ object_release (prp->replica_object);
+ replica = NULL;
+ if (rc == CSN_LIMIT_EXCEEDED) /* too much skew */ {
+ slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name,
+ "%s: Incremental protocol: fatal error - too much time skew between replicas!\n",
+ agmt_get_long_name(prp->agmt));
+ next_state = STATE_STOP_FATAL_ERROR;
+ } else if (rc != 0) /* internal error */ {
+ slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name,
+ "%s: Incremental protocol: fatal internal error updating the CSN generator!\n",
+ agmt_get_long_name(prp->agmt));
+ next_state = STATE_STOP_FATAL_ERROR;
+ } else {
+ rc = send_updates(prp, ruv, &num_changes_sent);
+ if (rc == UPDATE_NO_MORE_UPDATES){
+ dev_debug("repl5_inc_run(STATE_SENDING_UPDATES) -> send_updates = UPDATE_NO_MORE_UPDATES -> STATE_WAIT_CHANGES");
+ agmt_set_last_update_status(prp->agmt, 0, 0, "Incremental update succeeded");
+ next_state = STATE_WAIT_CHANGES;
+ } else if (rc == UPDATE_YIELD){
+ dev_debug("repl5_inc_run(STATE_SENDING_UPDATES) -> send_updates = UPDATE_YIELD -> STATE_BACKOFF_START");
+ agmt_set_last_update_status(prp->agmt, 0, 0, "Incremental update succeeded and yielded");
+ next_state = STATE_BACKOFF_START;
+ } else if (rc == UPDATE_TRANSIENT_ERROR){
+ dev_debug("repl5_inc_run(STATE_SENDING_UPDATES) -> send_updates = UPDATE_TRANSIENT_ERROR -> STATE_BACKOFF_START");
+ next_state = STATE_BACKOFF_START;
+ } else if (rc == UPDATE_FATAL_ERROR){
+ dev_debug("repl5_inc_run(STATE_SENDING_UPDATES) -> send_updates = UPDATE_FATAL_ERROR -> STATE_STOP_FATAL_ERROR");
+ next_state = STATE_STOP_FATAL_ERROR;
+ } else if (rc == UPDATE_SCHEDULE_WINDOW_CLOSED){
+ dev_debug("repl5_inc_run(STATE_SENDING_UPDATES) -> send_updates = UPDATE_SCHEDULE_WINDOW_CLOSED -> STATE_WAIT_WINDOW_OPEN");
+ /*
+ * ONREPL - I don't think we should check this. We might be
+ * here because of replicate_now event - so we don't care
+ * about the schedule
+ */
+ next_state = STATE_WAIT_WINDOW_OPEN;
+ /* ONREPL - do we need to release the replica here ? */
+ conn_disconnect (prp->conn);
+ } else if (rc == UPDATE_CONNECTION_LOST){
+ dev_debug("repl5_inc_run(STATE_SENDING_UPDATES) -> send_updates = UPDATE_CONNECTION_LOST -> STATE_BACKOFF_START");
+ next_state = STATE_BACKOFF_START;
+ } else if (rc == UPDATE_TIMEOUT){
+ dev_debug("repl5_inc_run(STATE_SENDING_UPDATES) -> send_updates = UPDATE_TIMEOUT -> STATE_BACKOFF_START");
+ next_state = STATE_BACKOFF_START;
+ }
+ }
+ last_start_time = 0UL;
+ break;
+ }
+
+ if (NULL != ruv){
+ ruv_destroy(&ruv); ruv = NULL;
+ }
+ agmt_set_last_update_end(prp->agmt, current_time());
+ agmt_set_update_in_progress(prp->agmt, PR_FALSE);
+ /* If timed out, close the connection after released the replica */
+ release_replica(prp);
+ if (rc == UPDATE_TIMEOUT) {
+ conn_disconnect(prp->conn);
+ }
+ if (rc == UPDATE_NO_MORE_UPDATES && num_changes_sent > 0){
+ if (pausetime > 0){
+ /* richm - 20020219 - If we have acquired the consumer, and another master has gone
+ * into backoff waiting for us to release it, we may acquire the replica sooner
+ * than the other master has a chance to, and the other master may not be able
+ * to acquire the consumer for a long time (hours, days?) if this server is
+ * under a heavy load (see reliab06 et. al. system tests)
+ * So, this sleep gives the other master(s) a chance to acquire the consumer replica */
+ loops = pausetime;
+ /* the while loop is so that we don't just sleep and sleep if an
+ * event comes in that we should handle immediately (like shutdown) */
+ slapi_log_error(SLAPI_LOG_REPL, repl_plugin_name,
+ "%s: Pausing updates for %ld seconds to allow other suppliers to update consumer\n",
+ agmt_get_long_name(prp->agmt), pausetime);
+ while (loops-- && !(PROTOCOL_IS_SHUTDOWN(prp))){
+ DS_Sleep(PR_SecondsToInterval(1));
+ }
+ } else if (num_changes_sent > 10){
+ /* wait for consumer to write its ruv if the replication was busy */
+ /* When asked, consumer sends its ruv in cache to the supplier. */
+ /* DS_Sleep ( PR_SecondsToInterval(1) ); */
+ }
+ }
+ break;
- if (rc == UPDATE_NO_MORE_UPDATES && num_changes_sent > 0)
- {
- if (pausetime > 0)
- {
- /* richm - 20020219 - If we have acquired the consumer, and another master has gone
- into backoff waiting for us to release it, we may acquire the replica sooner
- than the other master has a chance to, and the other master may not be able
- to acquire the consumer for a long time (hours, days?) if this server is
- under a heavy load (see reliab06 et. al. system tests)
- So, this sleep gives the other master(s) a chance to acquire the consumer
- replica */
- long loops = pausetime;
- /* the while loop is so that we don't just sleep and sleep if an
- event comes in that we should handle immediately (like shutdown) */
- slapi_log_error(SLAPI_LOG_REPL, repl_plugin_name,
- "%s: Pausing updates for %ld seconds to allow other suppliers to update consumer\n",
- agmt_get_long_name(prp->agmt), pausetime);
- while (loops-- && !(PROTOCOL_IS_SHUTDOWN(prp)))
- {
- DS_Sleep(PR_SecondsToInterval(1));
- }
- }
- else if (num_changes_sent > 10)
- {
- /* wait for consumer to write its ruv if the replication was busy */
- /* When asked, consumer sends its ruv in cache to the supplier. */
- /* DS_Sleep ( PR_SecondsToInterval(1) ); */
- }
- }
- break;
case STATE_STOP_FATAL_ERROR:
- /*
- * We encountered some sort of a fatal error. Suspend.
- */
- /* XXXggood update state in replica */
- agmt_set_last_update_status(prp->agmt, -1, 0, "Incremental update has failed and requires administrator action");
- dev_debug("repl5_inc_run(STATE_STOP_FATAL_ERROR)");
- slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name,
- "%s: Incremental update failed and requires administrator action\n",
- agmt_get_long_name(prp->agmt));
- next_state = STATE_STOP_FATAL_ERROR_PART2;
- break;
+ /*
+ * We encountered some sort of a fatal error. Suspend.
+ */
+ /* XXXggood update state in replica */
+ agmt_set_last_update_status(prp->agmt, -1, 0, "Incremental update has failed and requires administrator action");
+ dev_debug("repl5_inc_run(STATE_STOP_FATAL_ERROR)");
+ slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name,
+ "%s: Incremental update failed and requires administrator action\n",
+ agmt_get_long_name(prp->agmt));
+ next_state = STATE_STOP_FATAL_ERROR_PART2;
+ break;
+
case STATE_STOP_FATAL_ERROR_PART2:
- if (PROTOCOL_IS_SHUTDOWN(prp))
- {
- done = 1;
- break;
- }
-
- /* MAB: This state is the FATAL state where we are supposed to get
- as a result of a FATAL error on send_updates. But, as bug
- states, send_updates was always returning TRANSIENT errors and never
- FATAL... In other words, this code has never been tested before...
-
- As of 01/16/01, this piece of code was in a very dangerous state. In particular,
- 1) it does not catch any events
- 2) it is a terminal state (once reached it never transitions to a different state)
-
- Both things combined make this state to become a consuming infinite loop
- that is useless after all (we are in a fatal place requiring manual admin jobs */
-
- /* MAB: The following lines fix problem number 1 above... When the code gets
- into this state, it should only get a chance to get out of it by an
- EVENT_AGMT_CHANGED event... All other events should be ignored */
- else if (event_occurred(prp, EVENT_AGMT_CHANGED))
- {
- dev_debug("repl5_inc_run(STATE_STOP_FATAL_ERROR): EVENT_AGMT_CHANGED received\n");
- /* Chance to recover for the EVENT_AGMT_CHANGED event.
- This is not mandatory, but fixes problem 2 above */
- next_state = STATE_STOP_NORMAL_TERMINATION;
- }
- else
- {
- dev_debug("repl5_inc_run(STATE_STOP_FATAL_ERROR): Event received. Clearing it\n");
- reset_events (prp);
- }
-
- protocol_sleep (prp, PR_INTERVAL_NO_TIMEOUT);
- break;
-
+ if (PROTOCOL_IS_SHUTDOWN(prp)){
+ done = 1;
+ break;
+ }
+ /* MAB: This state is the FATAL state where we are supposed to get
+ * as a result of a FATAL error on send_updates. But, as bug
+ * states, send_updates was always returning TRANSIENT errors and never
+ * FATAL... In other words, this code has never been tested before...
+ *
+ * As of 01/16/01, this piece of code was in a very dangerous state. In particular,
+ * 1) it does not catch any events
+ * 2) it is a terminal state (once reached it never transitions to a different state)
+ *
+ * Both things combined make this state to become a consuming infinite loop
+ * that is useless after all (we are in a fatal place requiring manual admin jobs */
+
+ /* MAB: The following lines fix problem number 1 above... When the code gets
+ * into this state, it should only get a chance to get out of it by an
+ * EVENT_AGMT_CHANGED event... All other events should be ignored */
+ else if (event_occurred(prp, EVENT_AGMT_CHANGED)){
+ dev_debug("repl5_inc_run(STATE_STOP_FATAL_ERROR): EVENT_AGMT_CHANGED received\n");
+ /* Chance to recover for the EVENT_AGMT_CHANGED event.
+ * This is not mandatory, but fixes problem 2 above */
+ next_state = STATE_STOP_NORMAL_TERMINATION;
+ } else {
+ dev_debug("repl5_inc_run(STATE_STOP_FATAL_ERROR): Event received. Clearing it\n");
+ reset_events (prp);
+ }
+
+ protocol_sleep (prp, PR_INTERVAL_NO_TIMEOUT);
+ break;
+
case STATE_STOP_NORMAL_TERMINATION:
- /*
- * We encountered some sort of a fatal error. Return.
- */
- /* XXXggood update state in replica */
- dev_debug("repl5_inc_run(STATE_STOP_NORMAL_TERMINATION)");
- done = 1;
- break;
+ /*
+ * We encountered some sort of a fatal error. Return.
+ */
+ /* XXXggood update state in replica */
+ dev_debug("repl5_inc_run(STATE_STOP_NORMAL_TERMINATION)");
+ done = 1;
+ break;
}
- slapi_log_error(SLAPI_LOG_REPL, repl_plugin_name,
- "%s: State: %s -> %s\n",
- agmt_get_long_name(prp->agmt),
- state2name(current_state), state2name(next_state));
+ slapi_log_error(SLAPI_LOG_REPL, repl_plugin_name,"%s: State: %s -> %s\n",
+ agmt_get_long_name(prp->agmt),state2name(current_state), state2name(next_state));
- current_state = next_state;
+ current_state = next_state;
} while (!done);
+
/* remove_protocol_callbacks(prp); */
prp->stopped = 1;
/* Cancel any linger timer that might be in effect... */
@@ -1307,8 +1176,6 @@ repl5_inc_run(Private_Repl_Protocol *prp)
conn_disconnect(prp->conn);
}
-
-
/*
* Go to sleep until awakened.
*/
| 0 |
db876c62309810e9c5ac14a553e5c1135af55bd7
|
389ds/389-ds-base
|
Issue 50546 - fix more UI issues(part 2)
Description: Fixed minor issues not fully addressed from the last commit
relates: https://pagure.io/389-ds-base/issue/50546
Reviewed by: mreynolds (one line commit rule)
|
commit db876c62309810e9c5ac14a553e5c1135af55bd7
Author: Mark Reynolds <[email protected]>
Date: Tue Sep 10 15:13:38 2019 -0400
Issue 50546 - fix more UI issues(part 2)
Description: Fixed minor issues not fully addressed from the last commit
relates: https://pagure.io/389-ds-base/issue/50546
Reviewed by: mreynolds (one line commit rule)
diff --git a/src/cockpit/389-console/src/ds.js b/src/cockpit/389-console/src/ds.js
index efe337c6b..702ff88d5 100644
--- a/src/cockpit/389-console/src/ds.js
+++ b/src/cockpit/389-console/src/ds.js
@@ -346,27 +346,7 @@ function popup_success(msg) {
// This is called when any Save button is clicked on the main page. We call
// all the save functions for all the pages here. This is not used for modal forms
function save_all () {
- if ("nsslapd-ldapilisten" in config_values || "nsslapd-ldapiautobind" in config_values) {
- if ( (!$("#nsslapd-ldapilisten").is(":checked") && config_values["nsslapd-ldapilisten"] == "on") ||
- (!$("#nsslapd-ldapiautobind").is(":checked") && config_values["nsslapd-ldapiautobind"] == "on") )
- {
- // Okay we are disabling some form of LDAPI that will break the UI, warn the user
- popup_confirm("Disabling LDAPI or LDAPI Autobind will make the UI unusable. Are you sure you want to proceed",
- "Confirmation", function (yes)
- {
- if (yes) {
- save_config();
- } else {
- // No, reset config
- get_and_set_config();
- }
- });
- } else {
- save_config();
- }
- } else {
- save_config();
- }
+ save_config(); // Server Config Page
}
function load_repl_suffix_dropdowns() {
diff --git a/src/cockpit/389-console/src/index.html b/src/cockpit/389-console/src/index.html
index 1a42c972d..91993cc0d 100644
--- a/src/cockpit/389-console/src/index.html
+++ b/src/cockpit/389-console/src/index.html
@@ -411,13 +411,14 @@
</div>
<hr>
<div>
- <label for="backend-name" class="ds-config-label" title="The name for the backend database, like 'userroot'">Backend Name (optional)</label><input
- class="ds-input ds-inst-input" placeholder="e.g. userRoot" size="40" type="text" id="backend-name">
+ <label for="backend-suffix" class="ds-config-label" title="Database suffix, like 'dc=example,dc=com'. The suffix must be a valid LDAP Distiguished Name (DN)">Database Suffix</label><input
+ class="ds-input ds-inst-input" size="40" placeholder="e.g. dc=example,dc=com" type="text" id="backend-suffix">
</div>
<div>
- <label for="backend-suffix" class="ds-config-label" title="Database suffix, like 'dc=example,dc=com'">Backend Suffix (optional)</label><input
- class="ds-input ds-inst-input" size="40" placeholder="e.g. dc=example,dc=com" type="text" id="backend-suffix">
+ <label for="backend-name" class="ds-config-label" title="The name for the backend database, like 'userroot'. The name can be a combination of alphanumeric characters, dashes (-), and underscores (_). No other characters are allowed.">Database Name</label><input
+ class="ds-input ds-inst-input" placeholder="e.g. userRoot" size="40" type="text" id="backend-name">
</div>
+
<div>
<label for="create-sample-entries" class="ds-config-label" title="Create sample entries in the suffix">Create Sample Entries </label><input
type="checkbox" class="ds-input ds-config-checkbox" id="create-sample-entries">
diff --git a/src/cockpit/389-console/src/schema.html b/src/cockpit/389-console/src/schema.html
index 872abab20..36f61be93 100644
--- a/src/cockpit/389-console/src/schema.html
+++ b/src/cockpit/389-console/src/schema.html
@@ -102,11 +102,11 @@
class="ds-input" type="text" id="attr-usage-view" size="40" readonly />
</div>
<div>
- <input type="checkbox" class="ds-config-checkbox" id="attr-multivalued-view" readonly /><label
+ <input type="checkbox" class="ds-config-checkbox" id="attr-multivalued-view" disabled="disabled" /><label
for="attr-multivalued-view" class="ds-label"> Attribute Multi-Valued </label>
</div>
<div>
- <input type="checkbox" class="ds-config-checkbox" id="attr-no-user-mod-view" readonly /><label
+ <input type="checkbox" class="ds-config-checkbox" id="attr-no-user-mod-view" disabled="disabled" /><label
for="attr-no-user-mod-view" class="ds-label"> Read-only (NO-USER-MODIFICATION flag) </label>
</div>
<div>
@@ -330,7 +330,7 @@
<hr>
<div class="ds-container">
<div name="available-attrs">
- <label class="ds-config-label" for="schema-list" title="The available attributes to choose from."><b>Available Attributes</b></label>
+ <label for="schema-list" title="The available attributes to choose from."><b>Available Attributes</b></label>
<select id="schema-list" class="ds-oc-form-list" name="availattrs" multiple>
</select>
</div>
diff --git a/src/cockpit/389-console/src/servers.html b/src/cockpit/389-console/src/servers.html
index 02e39abe7..04678e85f 100644
--- a/src/cockpit/389-console/src/servers.html
+++ b/src/cockpit/389-console/src/servers.html
@@ -948,24 +948,15 @@
-->
<div id="server-ldapi" class="all-pages ds-margin-left" hidden>
<h3 class="ds-config-header">LDAPI & Autobind Settings</h3>
- <div>
- <input type="checkbox" class="ds-config-checkbox" id="nsslapd-ldapilisten" checked><label
- for="nsslapd-ldapilisten" class="ds-label" title="Enable LDAPI (nsslapd-ldapilisten)."> Enable LDAPI</label>
- </div>
<div class="ldapi-attrs ds-inline" hidden>
<div>
- <label for="nsslapd-ldapifilepath" class="ds-config-indent-sm-label" title="The Unix socket file (nsslapd-ldapifilepath).">LDAPI Socket File Path</label><input
- class="ds-input" type="text" id="nsslapd-ldapifilepath" size="35"/>
- </div>
- <div>
- <p></p>
- <input type="checkbox" class="ds-config-checkbox" id="nsslapd-ldapiautobind"><label
- for="nsslapd-ldapiautobind" class="ds-label" title="Enable autobind (nsslapd-ldapiautobind)."> Enable Autobind</label>
+ <label for="nsslapd-ldapifilepath" class="ds-config-label" title="The Unix socket file (nsslapd-ldapifilepath).">LDAPI Socket File Path</label><input
+ class="ds-input" type="text" id="nsslapd-ldapifilepath" size="35" readonly/>
</div>
<div class="ds-inline">
- <div class="autobind-attrs" hidden>
+ <div class="autobind-attrs">
<div>
- <label for="nsslapd-ldapimaprootdn" class="ds-config-indent-sm-label" title="Map the Unix root entry to this Directory Manager DN (nsslapd-ldapimaprootdn).">DN to map "root" To</label><input
+ <label for="nsslapd-ldapimaprootdn" class="ds-config-label" title="Map the Unix root entry to this Directory Manager DN (nsslapd-ldapimaprootdn).">DN to map "root" To</label><input
class="ds-input" type="text" id="nsslapd-ldapimaprootdn" placeholder="e.g. cn=Directory Manager" size="35"/>
</div>
<div>
diff --git a/src/cockpit/389-console/src/servers.js b/src/cockpit/389-console/src/servers.js
index 3d1c4fa6d..b2a4b0fca 100644
--- a/src/cockpit/389-console/src/servers.js
+++ b/src/cockpit/389-console/src/servers.js
@@ -115,8 +115,8 @@ function clear_inst_form() {
$("#create-inst-rootdn").val("cn=Directory Manager");
$("#rootdn-pw").val("");
$("#rootdn-pw-confirm").val("");
- $("#backend-suffix").val("");
- $("#backend-name").val("");
+ $("#backend-suffix").val("dc=example,dc=com");
+ $("#backend-name").val("userRoot");
$("#create-sample-entries").prop('checked', false);
$("#create-inst-tls").prop('checked', true);
$(".ds-inst-input").css("border-color", "initial");
@@ -963,45 +963,6 @@ $(document).ready( function() {
});
// LDAPI form control
- $("#nsslapd-ldapilisten").change(function() {
- if(this.checked) {
- $('.ldapi-attrs').show();
- if ( $("#nsslapd-ldapiautobind").is(":checked") ){
- $(".autobind-attrs").show();
- if ( $("#nsslapd-ldapimaptoentries").is(":checked") ){
- $(".autobind-entry-attrs").show();
- } else {
- $(".autobind-entry-attrs").hide();
- }
- } else {
- $(".autobind-attrs").hide();
- $(".autobind-entry-attrs").hide();
- $("#nsslapd-ldapimaptoentries").prop("checked", false );
- }
- } else {
- $('.ldapi-attrs').hide();
- $(".autobind-attrs").hide();
- $(".autobind-entry-attrs").hide();
- $("#nsslapd-ldapiautobind").prop("checked", false );
- $("#nsslapd-ldapimaptoentries").prop("checked", false );
- }
- });
-
- $("#nsslapd-ldapiautobind").change(function() {
- if (this.checked){
- $(".autobind-attrs").show();
- if ( $("#nsslapd-ldapimaptoentries").is(":checked") ){
- $(".autobind-entry-attrs").show();
- } else {
- $(".autobind-entry-attrs").hide();
- }
- } else {
- $(".autobind-attrs").hide();
- $(".autobind-entry-attrs").hide();
- $("#nsslapd-ldapimaptoentries").prop("checked", false );
- }
- });
-
$("#nsslapd-ldapimaptoentries").change(function() {
if (this.checked){
$(".autobind-entry-attrs").show();
@@ -1524,7 +1485,7 @@ $(document).ready( function() {
$("#create-inst-serverid").css("border-color", "red");
return;
}
- if (new_server_id.match(/^[#%:-A-Za-z0-9_]+$/g)) {
+ if (new_server_id.match(/^[#%:A-Za-z0-9_\-]+$/g)) {
setup_inf = setup_inf.replace('INST_NAME', new_server_id);
} else {
report_err($("#create-inst-serverid"), 'Instance name can only contain letters, numbers, and: # % : - _');
| 0 |
c19bb9dd1e95ee98a53a06f3d7eefb4dce5bc0ef
|
389ds/389-ds-base
|
Bug 863576 - Dirsrv deadlock locking up IPA
https://bugzilla.redhat.com/show_bug.cgi?id=863576
Bug Description: Abandon of a Simple Paged Results request causes
the self deadlock. When abandoning a simple paged result request,
the mutex for the connection object c_mutex is locked in do_abandon.
But to free a pagedresult massage id, pagedresults_free_one_msgid
called from do_abandon tries to acquire lock on c_mutex again.
The NSPR lock function PR_Lock is not self re-entrant. Thus the
server hangs there due to the self-deadlock.
Fix Description: This patch is removing to call PR_Lock(c_mutex)
in pagedresults_free_one_msgid and renamed it to pagedresults_free_
one_msgid_nolock. To maintain the consistency, "_nolock" is added
to other pagedresults apis which do not call PR_Lock in it.
Also, stricter locking on c_mutex is being added to pagedresults_
parse_control_value to protect the pagedresults related field in
the connection object.
|
commit c19bb9dd1e95ee98a53a06f3d7eefb4dce5bc0ef
Author: Noriko Hosoi <[email protected]>
Date: Fri Oct 5 17:56:22 2012 -0700
Bug 863576 - Dirsrv deadlock locking up IPA
https://bugzilla.redhat.com/show_bug.cgi?id=863576
Bug Description: Abandon of a Simple Paged Results request causes
the self deadlock. When abandoning a simple paged result request,
the mutex for the connection object c_mutex is locked in do_abandon.
But to free a pagedresult massage id, pagedresults_free_one_msgid
called from do_abandon tries to acquire lock on c_mutex again.
The NSPR lock function PR_Lock is not self re-entrant. Thus the
server hangs there due to the self-deadlock.
Fix Description: This patch is removing to call PR_Lock(c_mutex)
in pagedresults_free_one_msgid and renamed it to pagedresults_free_
one_msgid_nolock. To maintain the consistency, "_nolock" is added
to other pagedresults apis which do not call PR_Lock in it.
Also, stricter locking on c_mutex is being added to pagedresults_
parse_control_value to protect the pagedresults related field in
the connection object.
diff --git a/ldap/servers/slapd/abandon.c b/ldap/servers/slapd/abandon.c
index 4f00da9cd..094ae9514 100644
--- a/ldap/servers/slapd/abandon.c
+++ b/ldap/servers/slapd/abandon.c
@@ -153,7 +153,7 @@ do_abandon( Slapi_PBlock *pb )
}
if ( op_is_pagedresults(o) ) {
- if ( 0 == pagedresults_free_one_msgid(pb->pb_conn, id) ) {
+ if ( 0 == pagedresults_free_one_msgid_nolock(pb->pb_conn, id) ) {
slapi_log_access( LDAP_DEBUG_STATS, "conn=%" NSPRIu64
" op=%d ABANDON targetop=Simple Paged Results\n",
pb->pb_conn->c_connid, pb->pb_op->o_opid );
diff --git a/ldap/servers/slapd/connection.c b/ldap/servers/slapd/connection.c
index 9e4310400..a3b1df52d 100644
--- a/ldap/servers/slapd/connection.c
+++ b/ldap/servers/slapd/connection.c
@@ -2094,7 +2094,7 @@ void connection_enter_leave_turbo(Connection *conn, int current_turbo_flag, int
PR_Lock(conn->c_mutex);
/* We can already be in turbo mode, or not */
current_mode = current_turbo_flag;
- if (pagedresults_in_use(conn)) {
+ if (pagedresults_in_use_nolock(conn)) {
/* PAGED_RESULTS does not need turbo mode */
new_mode = 0;
} else if (conn->c_private->operation_rate == 0) {
@@ -2780,7 +2780,7 @@ disconnect_server_nomutex( Connection *conn, PRUint64 opconnid, int opid, PRErro
connection_abandon_operations( conn );
/* needed here to ensure simple paged results timeout properly and
* don't impact subsequent ops */
- pagedresults_reset_timedout(conn);
+ pagedresults_reset_timedout_nolock(conn);
if (! config_check_referral_mode()) {
/*
diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c
index 0d3d81f6a..62a52fc93 100644
--- a/ldap/servers/slapd/daemon.c
+++ b/ldap/servers/slapd/daemon.c
@@ -1695,7 +1695,7 @@ setup_pr_read_pds(Connection_Table *ct, PRFileDesc **n_tcps, PRFileDesc **s_tcps
{
int add_fd = 1;
/* check timeout for PAGED RESULTS */
- if (pagedresults_is_timedout(c))
+ if (pagedresults_is_timedout_nolock(c))
{
/* Exceeded the timelimit; disconnect the client */
disconnect_server_nomutex(c, c->c_connid, -1,
diff --git a/ldap/servers/slapd/pagedresults.c b/ldap/servers/slapd/pagedresults.c
index ea7de14a0..d445c0670 100644
--- a/ldap/servers/slapd/pagedresults.c
+++ b/ldap/servers/slapd/pagedresults.c
@@ -64,6 +64,7 @@ pagedresults_parse_control_value( Slapi_PBlock *pb,
struct berval cookie = {0};
Connection *conn = pb->pb_conn;
Operation *op = pb->pb_op;
+ BerElement *ber = NULL;
LDAPDebug0Args(LDAP_DEBUG_TRACE, "--> pagedresults_parse_control_value\n");
if ( NULL == conn || NULL == op || NULL == pagesize || NULL == index ) {
@@ -76,70 +77,71 @@ pagedresults_parse_control_value( Slapi_PBlock *pb,
if ( psbvp->bv_len == 0 || psbvp->bv_val == NULL )
{
- rc = LDAP_PROTOCOL_ERROR;
+ LDAPDebug0Args(LDAP_DEBUG_ANY,
+ "<-- pagedresults_parse_control_value: no control value\n");
+ return LDAP_PROTOCOL_ERROR;
}
- else
+ ber = ber_init( psbvp );
+ if ( ber == NULL )
{
- BerElement *ber = ber_init( psbvp );
- if ( ber == NULL )
- {
- rc = LDAP_OPERATIONS_ERROR;
- }
- else
- {
- if ( ber_scanf( ber, "{io}", pagesize, &cookie ) == LBER_ERROR )
- {
- rc = LDAP_PROTOCOL_ERROR;
+ LDAPDebug0Args(LDAP_DEBUG_ANY,
+ "<-- pagedresults_parse_control_value: no control value\n");
+ return LDAP_PROTOCOL_ERROR;
+ }
+ if ( ber_scanf( ber, "{io}", pagesize, &cookie ) == LBER_ERROR )
+ {
+ LDAPDebug0Args(LDAP_DEBUG_ANY,
+ "<-- pagedresults_parse_control_value: corrupted control value\n");
+ return LDAP_PROTOCOL_ERROR;
+ }
+
+ PR_Lock(conn->c_mutex);
+ /* the ber encoding is no longer needed */
+ ber_free(ber, 1);
+ if ( cookie.bv_len <= 0 ) {
+ int i;
+ int maxlen;
+ /* first time? */
+ maxlen = conn->c_pagedresults.prl_maxlen;
+ if (conn->c_pagedresults.prl_count == maxlen) {
+ if (0 == maxlen) { /* first time */
+ conn->c_pagedresults.prl_maxlen = 1;
+ conn->c_pagedresults.prl_list =
+ (PagedResults *)slapi_ch_calloc(1,
+ sizeof(PagedResults));
+ } else {
+ /* new max length */
+ conn->c_pagedresults.prl_maxlen *= 2;
+ conn->c_pagedresults.prl_list =
+ (PagedResults *)slapi_ch_realloc(
+ (char *)conn->c_pagedresults.prl_list,
+ sizeof(PagedResults) *
+ conn->c_pagedresults.prl_maxlen);
+ /* initialze newly allocated area */
+ memset(conn->c_pagedresults.prl_list + maxlen, '\0',
+ sizeof(PagedResults) * maxlen);
}
- /* the ber encoding is no longer needed */
- ber_free(ber, 1);
- if ( cookie.bv_len <= 0 ) {
- int i;
- int maxlen;
- /* first time? */
- PR_Lock(conn->c_mutex);
- maxlen = conn->c_pagedresults.prl_maxlen;
- if (conn->c_pagedresults.prl_count == maxlen) {
- if (0 == maxlen) { /* first time */
- conn->c_pagedresults.prl_maxlen = 1;
- conn->c_pagedresults.prl_list =
- (PagedResults *)slapi_ch_calloc(1,
- sizeof(PagedResults));
- } else {
- /* new max length */
- conn->c_pagedresults.prl_maxlen *= 2;
- conn->c_pagedresults.prl_list =
- (PagedResults *)slapi_ch_realloc(
- (char *)conn->c_pagedresults.prl_list,
- sizeof(PagedResults) *
- conn->c_pagedresults.prl_maxlen);
- /* initialze newly allocated area */
- memset(conn->c_pagedresults.prl_list + maxlen, '\0',
- sizeof(PagedResults) * maxlen);
- }
- *index = maxlen; /* the first position in the new area */
- } else {
- for (i = 0; i < conn->c_pagedresults.prl_maxlen; i++) {
- if (!conn->c_pagedresults.prl_list[i].pr_current_be) {
- *index = i;
- break;
- }
- }
+ *index = maxlen; /* the first position in the new area */
+ } else {
+ for (i = 0; i < conn->c_pagedresults.prl_maxlen; i++) {
+ if (!conn->c_pagedresults.prl_list[i].pr_current_be) {
+ *index = i;
+ break;
}
- conn->c_pagedresults.prl_count++;
- PR_Unlock(conn->c_mutex);
- } else {
- /* Repeated paged results request.
- * PagedResults is already allocated. */
- char *ptr = slapi_ch_malloc(cookie.bv_len + 1);
- memcpy(ptr, cookie.bv_val, cookie.bv_len);
- *(ptr+cookie.bv_len) = '\0';
- *index = strtol(ptr, NULL, 10);
- slapi_ch_free_string(&ptr);
}
- slapi_ch_free((void **)&cookie.bv_val);
}
+ conn->c_pagedresults.prl_count++;
+ } else {
+ /* Repeated paged results request.
+ * PagedResults is already allocated. */
+ char *ptr = slapi_ch_malloc(cookie.bv_len + 1);
+ memcpy(ptr, cookie.bv_val, cookie.bv_len);
+ *(ptr+cookie.bv_len) = '\0';
+ *index = strtol(ptr, NULL, 10);
+ slapi_ch_free_string(&ptr);
}
+ slapi_ch_free((void **)&cookie.bv_val);
+
if ((*index > -1) && (*index < conn->c_pagedresults.prl_maxlen)) {
/* Need to keep the latest msgid to prepare for the abandon. */
conn->c_pagedresults.prl_list[*index].pr_msgid = op->o_msgid;
@@ -149,6 +151,7 @@ pagedresults_parse_control_value( Slapi_PBlock *pb,
"pagedresults_parse_control_value: invalid cookie: %d\n",
*index);
}
+ PR_Unlock(conn->c_mutex);
LDAPDebug1Arg(LDAP_DEBUG_TRACE,
"<-- pagedresults_parse_control_value: idx %d\n", *index);
@@ -261,7 +264,7 @@ pagedresults_free_one( Connection *conn, int index )
}
int
-pagedresults_free_one_msgid( Connection *conn, ber_int_t msgid )
+pagedresults_free_one_msgid_nolock( Connection *conn, ber_int_t msgid )
{
int rc = -1;
int i;
@@ -269,9 +272,9 @@ pagedresults_free_one_msgid( Connection *conn, ber_int_t msgid )
LDAPDebug1Arg(LDAP_DEBUG_TRACE,
"--> pagedresults_free_one: msgid=%d\n", msgid);
if (conn && (msgid > -1)) {
- PR_Lock(conn->c_mutex);
if (conn->c_pagedresults.prl_count <= 0) {
- LDAPDebug2Args(LDAP_DEBUG_TRACE, "pagedresults_free_one_msgid: "
+ LDAPDebug2Args(LDAP_DEBUG_TRACE,
+ "pagedresults_free_one_msgid_nolock: "
"conn=%d paged requests list count is %d\n",
conn->c_connid, conn->c_pagedresults.prl_count);
} else {
@@ -285,7 +288,6 @@ pagedresults_free_one_msgid( Connection *conn, ber_int_t msgid )
}
}
}
- PR_Unlock(conn->c_mutex);
}
LDAPDebug1Arg(LDAP_DEBUG_TRACE, "<-- pagedresults_free_one: %d\n", rc);
@@ -720,7 +722,7 @@ pagedresults_reset_processing(Connection *conn, int index)
/* Are all the paged results requests timed out? */
int
-pagedresults_is_timedout(Connection *conn)
+pagedresults_is_timedout_nolock(Connection *conn)
{
int i;
PagedResults *prp = NULL;
@@ -753,7 +755,7 @@ pagedresults_is_timedout(Connection *conn)
/* reset all timeout */
int
-pagedresults_reset_timedout(Connection *conn)
+pagedresults_reset_timedout_nolock(Connection *conn)
{
int i;
PagedResults *prp = NULL;
@@ -773,7 +775,7 @@ pagedresults_reset_timedout(Connection *conn)
/* paged results requests are in progress. */
int
-pagedresults_in_use(Connection *conn)
+pagedresults_in_use_nolock(Connection *conn)
{
LDAPDebug0Args(LDAP_DEBUG_TRACE, "--> pagedresults_in_use\n");
if (NULL == conn) {
diff --git a/ldap/servers/slapd/proto-slap.h b/ldap/servers/slapd/proto-slap.h
index 55c3a7867..905b0f45c 100644
--- a/ldap/servers/slapd/proto-slap.h
+++ b/ldap/servers/slapd/proto-slap.h
@@ -1427,11 +1427,11 @@ int pagedresults_set_timelimit(Connection *conn, time_t timelimit, int index);
int pagedresults_cleanup(Connection *conn, int needlock);
int pagedresults_check_or_set_processing(Connection *conn, int index);
int pagedresults_reset_processing(Connection *conn, int index);
-int pagedresults_is_timedout(Connection *conn);
-int pagedresults_reset_timedout(Connection *conn);
-int pagedresults_in_use(Connection *conn);
+int pagedresults_is_timedout_nolock(Connection *conn);
+int pagedresults_reset_timedout_nolock(Connection *conn);
+int pagedresults_in_use_nolock(Connection *conn);
int pagedresults_free_one(Connection *conn, int index);
-int pagedresults_free_one_msgid( Connection *conn, ber_int_t msgid );
+int pagedresults_free_one_msgid_nolock( Connection *conn, ber_int_t msgid );
int op_is_pagedresults(Operation *op);
int pagedresults_cleanup_all(Connection *conn, int needlock);
void op_set_pagedresults(Operation *op);
| 0 |
e710054f63f313fec20cacde8b78b105a8a211d8
|
389ds/389-ds-base
|
Issue 83 - lib389 - Replace topology agmt objects
Bug description: In the lib389 commit that improves
topology object, we remove "masterX_agmts" key from TopologyMain.
We use it in our dirsrvtests.
Fix description: Replace the "masterX_agmts" objects with
inst.agreement functions. We really should stick to one method only.
https://pagure.io/lib389/issue/83
Reviewed by: mreynolds (Thanks!)
|
commit e710054f63f313fec20cacde8b78b105a8a211d8
Author: Simon Pichugin <[email protected]>
Date: Wed Aug 9 17:17:34 2017 +0200
Issue 83 - lib389 - Replace topology agmt objects
Bug description: In the lib389 commit that improves
topology object, we remove "masterX_agmts" key from TopologyMain.
We use it in our dirsrvtests.
Fix description: Replace the "masterX_agmts" objects with
inst.agreement functions. We really should stick to one method only.
https://pagure.io/lib389/issue/83
Reviewed by: mreynolds (Thanks!)
diff --git a/dirsrvtests/tests/suites/replication/acceptance_test.py b/dirsrvtests/tests/suites/replication/acceptance_test.py
index f92e8744c..086cf9c73 100644
--- a/dirsrvtests/tests/suites/replication/acceptance_test.py
+++ b/dirsrvtests/tests/suites/replication/acceptance_test.py
@@ -305,14 +305,15 @@ def test_modify_stripattrs(topo_m4):
:expectedresults: It should be contain the value
"""
- agreement = topo_m4.ms["master1_agmts"]["m1_m2"]
+ m1 = topo_m4.ms["master1"]
+ agreement = m1.agreement.list(suffix=DEFAULT_SUFFIX)[0].dn
attr_value = 'modifiersname modifytimestamp'
log.info('Modify nsds5replicastripattrs with {}'.format(attr_value))
- topo_m4.ms["master1"].modify_s(agreement, [(ldap.MOD_REPLACE, 'nsds5replicastripattrs', attr_value)])
+ m1.modify_s(agreement, [(ldap.MOD_REPLACE, 'nsds5replicastripattrs', attr_value)])
log.info('Check nsds5replicastripattrs for {}'.format(attr_value))
- entries = topo_m4.ms['master1'].search_s(agreement, ldap.SCOPE_BASE, "objectclass=*", ['nsds5replicastripattrs'])
+ entries = m1.search_s(agreement, ldap.SCOPE_BASE, "objectclass=*", ['nsds5replicastripattrs'])
assert attr_value in entries[0].data['nsds5replicastripattrs']
diff --git a/dirsrvtests/tests/suites/replication/cleanallruv_test.py b/dirsrvtests/tests/suites/replication/cleanallruv_test.py
index a5a367305..f6cfb650e 100644
--- a/dirsrvtests/tests/suites/replication/cleanallruv_test.py
+++ b/dirsrvtests/tests/suites/replication/cleanallruv_test.py
@@ -194,7 +194,10 @@ def restore_master4(topology_m4):
topology_m4.ms["master{}".format(num)].start(timeout=30)
time.sleep(5)
topology_m4.ms["master1"].agreement.init(SUFFIX, host_to, port_to)
- topology_m4.ms["master1"].waitForReplInit(topology_m4.ms["master1_agmts"]["m1_m{}".format(num)])
+ agreement = topology_m4.ms["master1"].agreement.list(suffix=SUFFIX,
+ consumer_host=host_to,
+ consumer_port=port_to)[0].dn
+ topology_m4.ms["master1"].waitForReplInit(agreement)
time.sleep(5)
| 0 |
f847a62bcc808cd2c7cd77196bb4cdc23bad0a68
|
389ds/389-ds-base
|
Bug 619122 - fix coverify Defect Type: Resource leaks issues CID 11975 - 12053
https://bugzilla.redhat.com/show_bug.cgi?id=619122
Resolves: bug 619122
Bug description: fix coverify Defect Type: Resource leaks issues CID 12001.
description: The slapi_vattrspi_register_internal() has been modified to remove unnecessary NULL checking.
|
commit f847a62bcc808cd2c7cd77196bb4cdc23bad0a68
Author: Endi S. Dewata <[email protected]>
Date: Thu Jul 29 15:14:53 2010 -0500
Bug 619122 - fix coverify Defect Type: Resource leaks issues CID 11975 - 12053
https://bugzilla.redhat.com/show_bug.cgi?id=619122
Resolves: bug 619122
Bug description: fix coverify Defect Type: Resource leaks issues CID 12001.
description: The slapi_vattrspi_register_internal() has been modified to remove unnecessary NULL checking.
diff --git a/ldap/servers/slapd/vattr.c b/ldap/servers/slapd/vattr.c
index 1694846fc..025c71e06 100644
--- a/ldap/servers/slapd/vattr.c
+++ b/ldap/servers/slapd/vattr.c
@@ -1570,31 +1570,24 @@ int slapi_vattrspi_register_internal(vattr_sp_handle **h, vattr_get_fn_type get_
vattr_sp_handle *return_to_caller = NULL;
vattr_sp_handle *list_handle = NULL;
vattr_sp *new_sp = NULL;
+
/* Make a service provider handle */
new_sp = (vattr_sp*)slapi_ch_calloc(1,sizeof(vattr_sp));
- if (NULL == new_sp) {
- slapd_nasty(sourcefile,7,0);
- return ENOMEM;
- }
- return_to_caller = (vattr_sp_handle*)slapi_ch_calloc(1,sizeof(vattr_sp_handle));
- if (NULL == return_to_caller) {
- slapd_nasty(sourcefile,8,0);
- return ENOMEM;
- }
new_sp->sp_get_fn = get_fn;
new_sp->sp_get_ex_fn = get_ex_fn;
new_sp->sp_compare_fn = compare_fn;
new_sp->sp_types_fn = types_fn;
+
+ return_to_caller = (vattr_sp_handle*)slapi_ch_calloc(1,sizeof(vattr_sp_handle));
return_to_caller->sp = new_sp;
+
/* Add to the service provider list */
/* Make a handle for the list */
list_handle = (vattr_sp_handle*)slapi_ch_calloc(1, sizeof (vattr_sp_handle));
- if (NULL == list_handle) {
- return ENOMEM;
- }
*list_handle = *return_to_caller;
list_handle->next = vattr_sp_list;
vattr_sp_list = list_handle;
+
/* Return the handle to the caller */
*h = return_to_caller;
return 0;
| 0 |
2c484cc6e89e473bced0e9b25dd6e68d53024bb3
|
389ds/389-ds-base
|
Ticket #48223 - Winsync fails when AD users have multiple spaces (two)inside the value of the rdn attribute
Description: When the dirsync search returns a remote entry, winsync
search the entry with DN to retrieve the whole attribute value pairs.
The DN used for the search was normalized which replaced multiple white-
spaces with one in the DN. This patch does not used the normalized DN,
but the same DN given by AD.
The DN normalization behaviour was introduced to fix a ticket #529 -
dn normalization must handle multiple space characters in attributes.
Added additional debugging to get the info which entry failed to sync.
https://fedorahosted.org/389/ticket/48223
Reviewed by [email protected] (Thank you, Rich!!)
|
commit 2c484cc6e89e473bced0e9b25dd6e68d53024bb3
Author: Noriko Hosoi <[email protected]>
Date: Mon Jul 13 17:51:01 2015 -0700
Ticket #48223 - Winsync fails when AD users have multiple spaces (two)inside the value of the rdn attribute
Description: When the dirsync search returns a remote entry, winsync
search the entry with DN to retrieve the whole attribute value pairs.
The DN used for the search was normalized which replaced multiple white-
spaces with one in the DN. This patch does not used the normalized DN,
but the same DN given by AD.
The DN normalization behaviour was introduced to fix a ticket #529 -
dn normalization must handle multiple space characters in attributes.
Added additional debugging to get the info which entry failed to sync.
https://fedorahosted.org/389/ticket/48223
Reviewed by [email protected] (Thank you, Rich!!)
diff --git a/ldap/servers/plugins/posix-winsync/posix-group-func.c b/ldap/servers/plugins/posix-winsync/posix-group-func.c
index 5f841e50e..a497f3f4d 100644
--- a/ldap/servers/plugins/posix-winsync/posix-group-func.c
+++ b/ldap/servers/plugins/posix-winsync/posix-group-func.c
@@ -95,7 +95,7 @@ getEntry(const char *udn, char **attrs)
}
else {
slapi_log_error(SLAPI_LOG_FATAL, POSIX_WINSYNC_PLUGIN_NAME,
- "getEntry: error searching for uid: %d\n", rc);
+ "getEntry: error searching for uid %s: %d\n", udn, rc);
}
return NULL;
diff --git a/ldap/servers/plugins/replication/windows_protocol_util.c b/ldap/servers/plugins/replication/windows_protocol_util.c
index 4607251d1..6bf20b72a 100644
--- a/ldap/servers/plugins/replication/windows_protocol_util.c
+++ b/ldap/servers/plugins/replication/windows_protocol_util.c
@@ -3226,7 +3226,7 @@ windows_get_remote_entry (Private_Repl_Protocol *prp, const Slapi_DN* remote_dn,
const char *searchbase = NULL;
Slapi_Entry *found_entry = NULL;
- searchbase = slapi_sdn_get_dn(remote_dn);
+ searchbase = slapi_sdn_get_udn(remote_dn);
cres = windows_search_entry_ext(prp->conn, (char*)searchbase, filter, &found_entry, NULL, LDAP_SCOPE_BASE);
if (cres)
{
@@ -5886,13 +5886,16 @@ retry:
remote_entry = NULL;
} else
{
- slapi_log_error(SLAPI_LOG_FATAL, windows_repl_plugin_name,"%s: windows_process_dirsync_entry: failed to fetch inbound entry.\n",agmt_get_long_name(prp->agmt));
+ slapi_log_error(SLAPI_LOG_FATAL, windows_repl_plugin_name,
+ "%s: windows_process_dirsync_entry: failed to fetch inbound entry %s.\n",
+ agmt_get_long_name(prp->agmt), slapi_sdn_get_dn(slapi_entry_get_sdn_const(e)));
}
slapi_entry_free(local_entry);
if (rc) {
/* Something bad happened */
- slapi_log_error(SLAPI_LOG_REPL, windows_repl_plugin_name,"%s: windows_process_dirsync_entry: failed to update inbound entry for %s.\n",agmt_get_long_name(prp->agmt),
- slapi_sdn_get_dn(slapi_entry_get_sdn_const(e)));
+ slapi_log_error(SLAPI_LOG_REPL, windows_repl_plugin_name,
+ "%s: windows_process_dirsync_entry: failed to update inbound entry for %s.\n",
+ agmt_get_long_name(prp->agmt), slapi_sdn_get_dn(slapi_entry_get_sdn_const(e)));
}
} else
{
| 0 |
ca13f42da4d49fd30200fed960e1ad2d0fd1c763
|
389ds/389-ds-base
|
Ticket 50153 - Increase default max logs
Bug Description: The errors & audit logs default to max number of logs to 1.
This prevents the max log size rotation policy from working.
This is documented in the docs, but the problem is that this
can allow the server to fill up the FS on /var under certain
conditions.
Fix Description: Change the default max number of logs to "2". This is still
a small value, and it allows the rotation policy to be effective.
https://pagure.io/389-ds-base/issue/50153
Reviewed by: spichugi & firstyear (Thanks!!)
|
commit ca13f42da4d49fd30200fed960e1ad2d0fd1c763
Author: Mark Reynolds <[email protected]>
Date: Mon Jan 14 12:19:07 2019 -0500
Ticket 50153 - Increase default max logs
Bug Description: The errors & audit logs default to max number of logs to 1.
This prevents the max log size rotation policy from working.
This is documented in the docs, but the problem is that this
can allow the server to fill up the FS on /var under certain
conditions.
Fix Description: Change the default max number of logs to "2". This is still
a small value, and it allows the rotation policy to be effective.
https://pagure.io/389-ds-base/issue/50153
Reviewed by: spichugi & firstyear (Thanks!!)
diff --git a/ldap/servers/slapd/slap.h b/ldap/servers/slapd/slap.h
index 163cb19e4..6b80b3e71 100644
--- a/ldap/servers/slapd/slap.h
+++ b/ldap/servers/slapd/slap.h
@@ -320,8 +320,8 @@ typedef void (*VFPV)(); /* takes undefined arguments */
#define SLAPD_DEFAULT_LOG_ROTATIONTIME_STR "1"
#define SLAPD_DEFAULT_LOG_ACCESS_MAXNUMLOGS 10
#define SLAPD_DEFAULT_LOG_ACCESS_MAXNUMLOGS_STR "10"
-#define SLAPD_DEFAULT_LOG_MAXNUMLOGS 1
-#define SLAPD_DEFAULT_LOG_MAXNUMLOGS_STR "1"
+#define SLAPD_DEFAULT_LOG_MAXNUMLOGS 2
+#define SLAPD_DEFAULT_LOG_MAXNUMLOGS_STR "2"
#define SLAPD_DEFAULT_LOG_EXPTIME 1
#define SLAPD_DEFAULT_LOG_EXPTIME_STR "1"
/* This is in MB */
| 0 |
23c04f65d4f86d7e3d966d3bd1ae0f35f88b7a17
|
389ds/389-ds-base
|
Corrected ndd recommendations
|
commit 23c04f65d4f86d7e3d966d3bd1ae0f35f88b7a17
Author: Nathan Kinder <[email protected]>
Date: Mon Apr 4 19:57:03 2005 +0000
Corrected ndd recommendations
diff --git a/ldap/systools/idsktune.c b/ldap/systools/idsktune.c
index d9dfd1593..2709a7cbb 100644
--- a/ldap/systools/idsktune.c
+++ b/ldap/systools/idsktune.c
@@ -2048,9 +2048,11 @@ static void ndd_tests (void)
ndd_tcp_time_wait_interval,
ndd_tcp_time_wait_interval/1000);
#ifdef NAME_NDD_CFG_FILE
- printf("A line similar to the following\nshould be added to the %s file:\n", NAME_NDD_CFG_FILE);
+ printf("An entry similar to the following\nshould be added to the %s file:\n", NAME_NDD_CFG_FILE);
if (flag_html) printf("</P><PRE>\n");
- printf("ndd -set /dev/tcp %s %d\n\n", name_tcp_time_wait_interval, 30000);
+ printf("TRANSPORT_NAME[10]=tcp\n");
+ printf("NDD_NAME[10]=%s\n", name_tcp_time_wait_interval);
+ printf("NDD_VALUE[10]=%d\n\n", 30000);
if (flag_html) printf("</PRE><P>\n");
#endif
if (flag_carrier) flag_os_bad = 1;
@@ -2140,9 +2142,11 @@ static void ndd_tests (void)
printf("NOTICE : The %s value is currently %d, which will limit the\nvalue of listen backlog which can be configured. ",
NAME_TCP_CONN_REQ_MAX_Q, ndd_tcp_conn_req_max_q);
#ifdef NAME_NDD_CFG_FILE
- printf("It can be raised by adding\nto %s, after any adb command, a line similar to:\n", NAME_NDD_CFG_FILE);
+ printf("It can be raised by adding\nto %s, after any adb command, an entry similar to:\n", NAME_NDD_CFG_FILE);
if (flag_html) printf("</P><PRE>\n");
- printf("ndd -set /dev/tcp %s %d\n", NAME_TCP_CONN_REQ_MAX_Q, ndd_tcp_conn_req_max_q0);
+ printf("TRANSPORT_NAME[10]=tcp\n");
+ printf("NDD_NAME[10]=%s\n", NAME_TCP_CONN_REQ_MAX_Q);
+ printf("NDD_VALUE[10]=%d\n\n", ndd_tcp_conn_req_max_q0);
if (flag_html) printf("</PRE><P>\n");
#endif
if (tcp_max_listen == 1024) {
@@ -2204,9 +2208,11 @@ static void ndd_tests (void)
ndd_tcp_keepalive_interval / 60000);
if (flag_html) printf("</P><P>\n");
#ifdef NAME_NDD_CFG_FILE
- printf("A line similar to the following should be added to %s:\n", NAME_NDD_CFG_FILE);
+ printf("An entry similar to the following should be added to %s:\n", NAME_NDD_CFG_FILE);
if (flag_html) printf("</P><PRE>\n");
- printf("ndd -set /dev/tcp %s %d\n\n", NAME_TCP_KEEPALIVE_INTERVAL, 600000);
+ printf("TRANSPORT_NAME[10]=tcp\n");
+ printf("NDD_NAME[10]=%s\n", NAME_TCP_KEEPALIVE_INTERVAL);
+ printf("NDD_VALUE[10]=%d\n\n", 600000);
if (flag_html) printf("</PRE><P>\n");
#endif
} else if (flag_debug) {
@@ -2224,9 +2230,11 @@ static void ndd_tests (void)
} else {
if (flag_html) printf("</P><P>\n");
#ifdef NAME_NDD_CFG_FILE
- printf("NOTICE : The %s is currently not set. This could result in\neventual server congestion. The interval can be set by adding the following\ncommand to %s:\n",NAME_TCP_KEEPALIVE_INTERVAL, NAME_NDD_CFG_FILE);
+ printf("NOTICE : The %s is currently not set. This could result in\neventual server congestion. The interval can be set by adding an entry similar to the following to %s:\n",NAME_TCP_KEEPALIVE_INTERVAL, NAME_NDD_CFG_FILE);
if (flag_html) printf("</P><PRE>\n");
- printf("ndd -set /dev/tcp %s 60000\n",NAME_TCP_KEEPALIVE_INTERVAL);
+ printf("TRANSPORT_NAME[10]=tcp\n");
+ printf("NDD_NAME[10]=%s\n", NAME_TCP_KEEPALIVE_INTERVAL);
+ printf("NDD_VALUE[10]=%d\n\n", 60000);
if (flag_html) printf("</PRE><P>\n");
#endif
printf("\n");
@@ -2247,14 +2255,16 @@ static void ndd_tests (void)
#ifdef NAME_NDD_CFG_FILE
if (client) {
printf("NOTICE : For testing on a LAN or high speed WAN, this interval can be reduced\n"
- "by adding to %s file:\n", NAME_NDD_CFG_FILE);
+ "by adding an entry similar to the following to %s file:\n", NAME_NDD_CFG_FILE);
} else {
printf("NOTICE : If the directory service is intended only for LAN or private \n"
- "high-speed WAN environment, this interval can be reduced by adding to\n"
- "%s file:\n", NAME_NDD_CFG_FILE);
+ "high-speed WAN environment, this interval can be reduced by adding an\n"
+ "entry similar to the following to %s file:\n", NAME_NDD_CFG_FILE);
}
if (flag_html) printf("</P><PRE>\n");
- printf("ndd -set /dev/tcp tcp_rexmit_interval_initial 500\n\n");
+ printf("TRANSPORT_NAME[10]=tcp\n");
+ printf("NDD_NAME[10]=tcp_rexmit_interval_initial\n");
+ printf("NDD_VALUE[10]=%d\n\n", 500);
if (flag_html) printf("</PRE><P>\n");
#endif
} else {
@@ -2281,10 +2291,12 @@ static void ndd_tests (void)
if (flag_html) printf("</P><P>\n");
#ifdef NAME_NDD_CFG_FILE
printf("NOTICE : If the directory service is intended only for LAN or private \n"
- "high-speed WAN environment, this interval can be reduced by adding to\n"
- "%s file:\n", NAME_NDD_CFG_FILE);
+ "high-speed WAN environment, this interval can be reduced by adding an entry\n"
+ "similar to the following to %s file:\n", NAME_NDD_CFG_FILE);
if (flag_html) printf("</P><PRE>\n");
- printf("ndd -set /dev/tcp tcp_ip_abort_cinterval 10000\n\n");
+ printf("TRANSPORT_NAME[10]=tcp\n");
+ printf("NDD_NAME[10]=tcp_ip_abort_cinterval\n");
+ printf("NDD_VALUE[10]=%d\n\n", 10000);
if (flag_html) printf("</PRE><P>\n");
#endif
}
@@ -2297,9 +2309,11 @@ static void ndd_tests (void)
ndd_tcp_ip_abort_cinterval/1000);
if (flag_html) printf("</P><P>\n");
#ifdef NAME_NDD_CFG_FILE
- printf("NOTICE : If the directory service is intended only for LAN or private \nhigh-speed WAN environment, this interval can be reduced by adding to\n%s:\n", NAME_NDD_CFG_FILE);
+ printf("NOTICE : If the directory service is intended only for LAN or private \nhigh-speed WAN environment, this interval can be reduced by adding an entry\nsimilar to the following to %s:\n", NAME_NDD_CFG_FILE);
if (flag_html) printf("</P><PRE>\n");
- printf("ndd -set /dev/tcp tcp_ip_abort_interval 60000\n\n");
+ printf("TRANSPORT_NAME[10]=tcp\n");
+ printf("NDD_NAME[10]=tcp_ip_abort_interval\n");
+ printf("NDD_VALUE[10]=%d\n\n", 60000);
if (flag_html) printf("</PRE><P>\n");
#endif
}
@@ -2344,9 +2358,11 @@ static void ndd_tests (void)
65536 - ndd_tcp_smallest_anon_port);
if (flag_carrier) flag_os_bad = 1;
#ifdef NAME_NDD_CFG_FILE
- printf("More ports can be made available by\nadding a line to %s:\n", NAME_NDD_CFG_FILE);
+ printf("More ports can be made available by\nadding an entry similar to\nthe following to %s:\n", NAME_NDD_CFG_FILE);
if (flag_html) printf("</P><PRE>\n");
- printf("ndd -set /dev/tcp tcp_smallest_anon_port 8192\n");
+ printf("TRANSPORT_NAME[10]=tcp\n");
+ printf("NDD_NAME[10]=tcp_smallest_anon_port\n");
+ printf("NDD_VALUE[10]=%d\n\n", 8192);
if (flag_html) printf("</PRE><P>\n");
#endif
printf("\n");
@@ -2417,9 +2433,11 @@ static void ndd_tests (void)
ndd_tcp_deferred_ack_interval);
if (flag_carrier) flag_os_bad = 1;
#ifdef NAME_NDD_CFG_FILE
- printf("This line can be added to the %s file:\n", NAME_NDD_CFG_FILE);
+ printf("An entry similar to the following can be\nadded to the %s file:\n", NAME_NDD_CFG_FILE);
if (flag_html) printf("</P><PRE>\n");
- printf("ndd -set /dev/tcp tcp_deferred_ack_interval 5\n");
+ printf("TRANSPORT_NAME[10]=tcp\n");
+ printf("NDD_NAME[10]=tcp_deferred_ack_interval\n");
+ printf("NDD_VALUE[10]=%d\n\n", 5);
if (flag_html) printf("</PRE><P>\n");
#endif
printf("\n");
| 0 |
8583012c75c460ef2ba59a0909010a638cbb851c
|
389ds/389-ds-base
|
Ticket 47936: Create a global lock to serialize write operations over several backends
Bug Description:
Some txn-post plugin may trigger operation on other backend.
This can easily conduct to deadlock, with locks (backend locks, plugin locks
or db page lock) taken in opposite order.
Fix Description:
Creating a global lock that will be acquired when updating any backend
(any ldbm database, cn=config, cn=schema).
It introduces a new config attribute:
dn: cn=config
nsslapd-global-backend-lock: <on|off(default)>
https://fedorahosted.org/389/ticket/47936
Reviewed by: Rich, Ludwig, Noriko (thanks to all of you !)
Platforms tested: F20/F21
Flag Day: no
Doc impact: no
|
commit 8583012c75c460ef2ba59a0909010a638cbb851c
Author: Thierry bordaz (tbordaz) <[email protected]>
Date: Thu Feb 26 13:48:57 2015 +0100
Ticket 47936: Create a global lock to serialize write operations over several backends
Bug Description:
Some txn-post plugin may trigger operation on other backend.
This can easily conduct to deadlock, with locks (backend locks, plugin locks
or db page lock) taken in opposite order.
Fix Description:
Creating a global lock that will be acquired when updating any backend
(any ldbm database, cn=config, cn=schema).
It introduces a new config attribute:
dn: cn=config
nsslapd-global-backend-lock: <on|off(default)>
https://fedorahosted.org/389/ticket/47936
Reviewed by: Rich, Ludwig, Noriko (thanks to all of you !)
Platforms tested: F20/F21
Flag Day: no
Doc impact: no
diff --git a/ldap/servers/slapd/back-ldbm/dblayer.c b/ldap/servers/slapd/back-ldbm/dblayer.c
index ddf6bb017..21a560e48 100644
--- a/ldap/servers/slapd/back-ldbm/dblayer.c
+++ b/ldap/servers/slapd/back-ldbm/dblayer.c
@@ -3968,6 +3968,9 @@ void dblayer_lock_backend(backend *be)
ldbm_instance *inst;
PR_ASSERT(NULL != be);
+ if (global_backend_lock_requested()) {
+ global_backend_lock_lock();
+ }
inst = (ldbm_instance *) be->be_instance_info;
PR_ASSERT(NULL != inst);
@@ -3987,6 +3990,10 @@ void dblayer_unlock_backend(backend *be)
if (NULL != inst->inst_db_mutex) {
PR_ExitMonitor(inst->inst_db_mutex);
}
+
+ if (global_backend_lock_requested()) {
+ global_backend_lock_unlock();
+ }
}
diff --git a/ldap/servers/slapd/backend.c b/ldap/servers/slapd/backend.c
index 22f41eef3..08c185c63 100644
--- a/ldap/servers/slapd/backend.c
+++ b/ldap/servers/slapd/backend.c
@@ -43,6 +43,9 @@
/* backend.c - Slapi_Backend methods */
#include "slap.h"
+#include "nspr.h"
+
+static PRMonitor *global_backend_mutex = NULL;
void
be_init( Slapi_Backend *be, const char *type, const char *name, int isprivate, int logchanges, int sizelimit, int timelimit )
@@ -147,6 +150,32 @@ be_done(Slapi_Backend *be)
}
}
+void
+global_backend_lock_init()
+{
+ global_backend_mutex = PR_NewMonitor();
+}
+
+int
+global_backend_lock_requested()
+{
+ return config_get_global_backend_lock();
+}
+void
+global_backend_lock_lock()
+{
+ if (global_backend_mutex) {
+ PR_EnterMonitor(global_backend_mutex);
+ }
+}
+
+void
+global_backend_lock_unlock() {
+ if (global_backend_mutex) {
+ PR_ExitMonitor(global_backend_mutex);
+ }
+}
+
void
slapi_be_delete_onexit (Slapi_Backend *be)
{
diff --git a/ldap/servers/slapd/dse.c b/ldap/servers/slapd/dse.c
index b9ffc5f37..55e5aa5a8 100644
--- a/ldap/servers/slapd/dse.c
+++ b/ldap/servers/slapd/dse.c
@@ -1828,6 +1828,7 @@ dse_modify(Slapi_PBlock *pb) /* JCM There should only be one exit point from thi
int need_be_postop = 0;
int plugin_started = 0;
int internal_op = 0;
+ PRBool global_lock_owned = PR_FALSE;
PR_ASSERT(pb);
if (slapi_pblock_get( pb, SLAPI_PLUGIN_PRIVATE, &pdse ) < 0 ||
@@ -1871,6 +1872,12 @@ dse_modify(Slapi_PBlock *pb) /* JCM There should only be one exit point from thi
/* Modify a copy of the entry*/
ecc = slapi_entry_dup( ec );
err = entry_apply_mods( ecc, mods );
+
+ /* Possibly acquire the global backend lock */
+ if (global_backend_lock_requested()) {
+ global_backend_lock_lock();
+ global_lock_owned = PR_TRUE;
+ }
/* XXXmcs: should we expand objectclass values here?? */
/* give the dse callbacks the first crack at the modify */
@@ -2069,7 +2076,9 @@ dse_modify(Slapi_PBlock *pb) /* JCM There should only be one exit point from thi
}
}
}
-
+ if (global_lock_owned) {
+ global_backend_lock_unlock();
+ }
slapi_send_ldap_result( pb, returncode, NULL, returntext[0] ? returntext : NULL, 0, NULL );
return dse_modify_return(retval, ec, ecc);
@@ -2224,6 +2233,7 @@ dse_add(Slapi_PBlock *pb) /* JCM There should only be one exit point from this f
Slapi_DN *sdn = NULL;
Slapi_DN parent;
int need_be_postop = 0;
+ PRBool global_lock_owned = PR_FALSE;
/*
* Get the database, the dn and the entry to add
@@ -2345,6 +2355,12 @@ dse_add(Slapi_PBlock *pb) /* JCM There should only be one exit point from this f
goto done;
}
+ /* Possibly acquire the global backend lock */
+ if (global_backend_lock_requested()) {
+ global_backend_lock_lock();
+ global_lock_owned = PR_TRUE;
+ }
+
if(dse_call_callback(pdse, pb, SLAPI_OPERATION_ADD, DSE_FLAG_PREOP, e,
NULL, &returncode, returntext)!=SLAPI_DSE_CALLBACK_OK) {
if (!returncode) {
@@ -2424,7 +2440,9 @@ dse_add(Slapi_PBlock *pb) /* JCM There should only be one exit point from this f
slapi_pblock_get(pb, SLAPI_RESULT_CODE, &returncode);
}
}
-
+ if (global_lock_owned) {
+ global_backend_lock_unlock();
+ }
slapi_send_ldap_result(pb, returncode, NULL, returntext[0] ? returntext : NULL, 0, NULL );
return dse_add_return(rc, e);
}
@@ -2456,6 +2474,7 @@ dse_delete(Slapi_PBlock *pb) /* JCM There should only be one exit point from thi
Slapi_Entry *ec = NULL; /* copy of entry to delete */
Slapi_Entry *orig_entry = NULL;
int need_be_postop = 0;
+ PRBool global_lock_owned = PR_FALSE;
/*
* Get the database and the dn
@@ -2500,6 +2519,12 @@ dse_delete(Slapi_PBlock *pb) /* JCM There should only be one exit point from thi
goto done;
}
+ /* Possibly acquire the global backend lock */
+ if (global_backend_lock_requested()) {
+ global_backend_lock_lock();
+ global_lock_owned = PR_TRUE;
+ }
+
if(dse_call_callback(pdse, pb, SLAPI_OPERATION_DELETE, DSE_FLAG_PREOP, ec, NULL, &returncode,returntext)==SLAPI_DSE_CALLBACK_OK) {
slapi_pblock_set(pb, SLAPI_DELETE_BEPREOP_ENTRY, ec);
slapi_pblock_set(pb, SLAPI_RESULT_CODE, &returncode);
@@ -2552,6 +2577,9 @@ done:
slapi_pblock_get(pb, SLAPI_RESULT_CODE, &returncode);
}
}
+ if (global_lock_owned) {
+ global_backend_lock_unlock();
+ }
if (returncode && !returntext[0]) {
char *ldap_result_message = NULL;
slapi_pblock_get(pb, SLAPI_PB_RESULT_TEXT, &ldap_result_message);
diff --git a/ldap/servers/slapd/libglobs.c b/ldap/servers/slapd/libglobs.c
index 3fa7a9fe4..d03d39b3e 100644
--- a/ldap/servers/slapd/libglobs.c
+++ b/ldap/servers/slapd/libglobs.c
@@ -273,6 +273,7 @@ slapi_int_t init_listen_backlog_size;
slapi_onoff_t init_ignore_time_skew;
slapi_onoff_t init_dynamic_plugins;
slapi_onoff_t init_cn_uses_dn_syntax_in_dns;
+slapi_onoff_t init_global_backend_local;
#if defined (LINUX)
slapi_int_t init_malloc_mxfast;
slapi_int_t init_malloc_trim_threshold;
@@ -1122,7 +1123,11 @@ static struct config_get_and_set {
{CONFIG_IGNORE_TIME_SKEW, config_set_ignore_time_skew,
NULL, 0,
(void**)&global_slapdFrontendConfig.ignore_time_skew,
- CONFIG_ON_OFF, (ConfigGetFunc)config_get_ignore_time_skew, &init_ignore_time_skew}
+ CONFIG_ON_OFF, (ConfigGetFunc)config_get_ignore_time_skew, &init_ignore_time_skew},
+ {CONFIG_GLOBAL_BACKEND_LOCK, config_set_global_backend_lock,
+ NULL, 0,
+ (void**)&global_slapdFrontendConfig.global_backend_lock,
+ CONFIG_ON_OFF, (ConfigGetFunc)config_get_global_backend_lock, &init_global_backend_local}
#ifdef MEMPOOL_EXPERIMENTAL
,{CONFIG_MEMPOOL_SWITCH_ATTRIBUTE, config_set_mempool_switch,
NULL, 0,
@@ -1570,6 +1575,7 @@ FrontendConfig_init () {
init_ignore_time_skew = cfg->ignore_time_skew = LDAP_OFF;
init_dynamic_plugins = cfg->dynamic_plugins = LDAP_OFF;
init_cn_uses_dn_syntax_in_dns = cfg->cn_uses_dn_syntax_in_dns = LDAP_OFF;
+ init_global_backend_local = LDAP_OFF;
#if defined(LINUX)
init_malloc_mxfast = cfg->malloc_mxfast = DEFAULT_MALLOC_UNSET;
init_malloc_trim_threshold = cfg->malloc_trim_threshold = DEFAULT_MALLOC_UNSET;
@@ -7244,6 +7250,18 @@ config_get_ignore_time_skew(void)
return retVal;
}
+int
+config_get_global_backend_lock()
+{
+ int retVal;
+ slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
+ CFG_LOCK_READ(slapdFrontendConfig);
+ retVal = slapdFrontendConfig->global_backend_lock;
+ CFG_UNLOCK_READ(slapdFrontendConfig);
+
+ return retVal;
+}
+
int
config_set_enable_turbo_mode( const char *attrname, char *value,
char *errorbuf, int apply )
@@ -7283,6 +7301,19 @@ config_set_ignore_time_skew( const char *attrname, char *value,
return retVal;
}
+int
+config_set_global_backend_lock( const char *attrname, char *value,
+ char *errorbuf, int apply )
+{
+ int retVal = LDAP_SUCCESS;
+ slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
+
+ retVal = config_set_onoff(attrname, value,
+ &(slapdFrontendConfig->global_backend_lock),
+ errorbuf, apply);
+ return retVal;
+}
+
int
config_set_plugin_logging( const char *attrname, char *value,
char *errorbuf, int apply )
diff --git a/ldap/servers/slapd/main.c b/ldap/servers/slapd/main.c
index 6bad2a08d..a3ba99b9b 100644
--- a/ldap/servers/slapd/main.c
+++ b/ldap/servers/slapd/main.c
@@ -1057,6 +1057,7 @@ main( int argc, char **argv)
/* initialize the normalized DN cache */
ndn_cache_init();
+ global_backend_lock_init();
/*
* Detach ourselves from the terminal (unless running in debug mode).
* We must detach before we start any threads since detach forks() on
diff --git a/ldap/servers/slapd/proto-slap.h b/ldap/servers/slapd/proto-slap.h
index 35b45e317..bc88221e9 100644
--- a/ldap/servers/slapd/proto-slap.h
+++ b/ldap/servers/slapd/proto-slap.h
@@ -213,6 +213,10 @@ void be_addsuffix(Slapi_Backend *be,const Slapi_DN *suffix);
Slapi_DN *be_getconfigdn(Slapi_Backend *be,Slapi_DN *dn);
Slapi_DN *be_getmonitordn(Slapi_Backend *be,Slapi_DN *dn);
int be_writeconfig (Slapi_Backend *be);
+void global_backend_lock_init();
+int global_backend_lock_requested();
+void global_backend_lock_lock();
+void global_backend_lock_unlock();
/*
* backend_manager.c
@@ -404,6 +408,7 @@ int config_set_return_orig_type_switch(const char *attrname, char *value, char *
int config_set_sasl_maxbufsize(const char *attrname, char *value, char *errorbuf, int apply );
int config_set_listen_backlog_size(const char *attrname, char *value, char *errorbuf, int apply);
int config_set_ignore_time_skew(const char *attrname, char *value, char *errorbuf, int apply);
+int config_set_global_backend_lock(const char *attrname, char *value, char *errorbuf, int apply);
#if defined(LINUX)
int config_set_malloc_mxfast(const char *attrname, char *value, char *errorbuf, int apply);
int config_set_malloc_trim_threshold(const char *attrname, char *value, char *errorbuf, int apply);
@@ -596,6 +601,7 @@ int config_get_cn_uses_dn_syntax_in_dns();
PLHashNumber hashNocaseString(const void *key);
PRIntn hashNocaseCompare(const void *v1, const void *v2);
int config_get_ignore_time_skew();
+int config_get_global_backend_lock();
#if defined(LINUX)
int config_get_malloc_mxfast();
diff --git a/ldap/servers/slapd/slap.h b/ldap/servers/slapd/slap.h
index 3db8fe7c5..361263297 100644
--- a/ldap/servers/slapd/slap.h
+++ b/ldap/servers/slapd/slap.h
@@ -2117,6 +2117,7 @@ typedef struct _slapdEntryPoints {
#define CONFIG_REWRITE_RFC1274_ATTRIBUTE "nsslapd-rewrite-rfc1274"
#define CONFIG_PLUGIN_BINDDN_TRACKING_ATTRIBUTE "nsslapd-plugin-binddn-tracking"
#define CONFIG_MODDN_ACI_ATTRIBUTE "nsslapd-moddn-aci"
+#define CONFIG_GLOBAL_BACKEND_LOCK "nsslapd-global-backend-lock"
#define CONFIG_CONFIG_ATTRIBUTE "nsslapd-config"
#define CONFIG_INSTDIR_ATTRIBUTE "nsslapd-instancedir"
@@ -2423,6 +2424,7 @@ typedef struct _slapdFrontendConfig {
slapi_onoff_t ignore_time_skew;
slapi_onoff_t dynamic_plugins; /* allow plugins to be dynamically enabled/disabled */
slapi_onoff_t cn_uses_dn_syntax_in_dns; /* indicates the cn value in dns has dn syntax */
+ slapi_onoff_t global_backend_lock;
#if defined(LINUX)
int malloc_mxfast; /* mallopt M_MXFAST */
int malloc_trim_threshold; /* mallopt M_TRIM_THRESHOLD */
| 0 |
54e4fca35899550e0c25b25e7f7c756302d258ce
|
389ds/389-ds-base
|
Ticket 49246 - ns-slapd crashes in role cache creation
Bug Description: Using a nested filter for a filtered role can
cause a crash. This was due to the way the filter
was being checked by the roles plugin.
Fix Description: Properly resurse over a filter.
https://pagure.io/389-ds-base/issue/49246
Reviewed by: firstyear & tbordaz(Thanks!!)
|
commit 54e4fca35899550e0c25b25e7f7c756302d258ce
Author: Mark Reynolds <[email protected]>
Date: Tue May 9 16:31:52 2017 -0400
Ticket 49246 - ns-slapd crashes in role cache creation
Bug Description: Using a nested filter for a filtered role can
cause a crash. This was due to the way the filter
was being checked by the roles plugin.
Fix Description: Properly resurse over a filter.
https://pagure.io/389-ds-base/issue/49246
Reviewed by: firstyear & tbordaz(Thanks!!)
diff --git a/dirsrvtests/tests/tickets/ticket49122_test.py b/dirsrvtests/tests/tickets/ticket49122_test.py
index ff1e8d1d0..09451225a 100644
--- a/dirsrvtests/tests/tickets/ticket49122_test.py
+++ b/dirsrvtests/tests/tickets/ticket49122_test.py
@@ -2,8 +2,7 @@ import time
import ldap
import logging
import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
+from lib389 import Entry
from lib389._constants import *
from lib389.properties import *
from lib389.tasks import *
@@ -19,6 +18,15 @@ log = logging.getLogger(__name__)
USER_DN = 'uid=user,' + DEFAULT_SUFFIX
ROLE_DN = 'cn=Filtered_Role_That_Includes_Empty_Role,' + DEFAULT_SUFFIX
+filters = ['nsrole=cn=empty,dc=example,dc=com',
+ '(nsrole=cn=empty,dc=example,dc=com)',
+ '(&(nsrole=cn=empty,dc=example,dc=com))',
+ '(!(nsrole=cn=empty,dc=example,dc=com))',
+ '(&(|(objectclass=person)(sn=app*))(userpassword=*))',
+ '(&(|(objectclass=person)(nsrole=cn=empty,dc=example,dc=com))(userpassword=*))',
+ '(&(|(nsrole=cn=empty,dc=example,dc=com)(sn=app*))(userpassword=*))',
+ '(&(|(objectclass=person)(sn=app*))(nsrole=cn=empty,dc=example,dc=com))',
+ '(&(|(&(cn=*)(objectclass=person)(nsrole=cn=empty,dc=example,dc=com)))(uid=*))']
def test_ticket49122(topo):
@@ -29,18 +37,6 @@ def test_ticket49122(topo):
topo.standalone.plugins.enable(name=PLUGIN_ROLES)
topo.standalone.restart()
- # Add invalid role
- try:
- topo.standalone.add_s(Entry((
- ROLE_DN, {'objectclass': ['top', 'ldapsubentry', 'nsroledefinition',
- 'nscomplexroledefinition', 'nsfilteredroledefinition'],
- 'cn': 'Filtered_Role_That_Includes_Empty_Role',
- 'nsRoleFilter': '(!(nsrole=cn=This_Is_An_Empty_Managed_NsRoleDefinition,dc=example,dc=com))',
- 'description': 'A filtered role with filter that will crash the server'})))
- except ldap.LDAPError as e:
- topo.standalone.log.fatal('Failed to add filtered role: error ' + e.message['desc'])
- assert False
-
# Add test user
try:
topo.standalone.add_s(Entry((
@@ -51,16 +47,39 @@ def test_ticket49122(topo):
assert False
if DEBUGGING:
- # Add debugging steps(if any)...
print("Attach gdb")
time.sleep(20)
- # Search for the role
- try:
- topo.standalone.search_s(USER_DN, ldap.SCOPE_SUBTREE, 'objectclass=*', ['nsrole'])
- except ldap.LDAPError as e:
- topo.standalone.log.fatal('Search failed: error ' + str(e))
- assert False
+ # Loop over filters
+ for role_filter in filters:
+ log.info('Testing filter: ' + role_filter)
+
+ # Add invalid role
+ try:
+ topo.standalone.add_s(Entry((
+ ROLE_DN, {'objectclass': ['top', 'ldapsubentry', 'nsroledefinition',
+ 'nscomplexroledefinition', 'nsfilteredroledefinition'],
+ 'cn': 'Filtered_Role_That_Includes_Empty_Role',
+ 'nsRoleFilter': role_filter,
+ 'description': 'A filtered role with filter that will crash the server'})))
+ except ldap.LDAPError as e:
+ topo.standalone.log.fatal('Failed to add filtered role: error ' + e.message['desc'])
+ assert False
+
+ # Search for the role
+ try:
+ topo.standalone.search_s(USER_DN, ldap.SCOPE_SUBTREE, 'objectclass=*', ['nsrole'])
+ except ldap.LDAPError as e:
+ topo.standalone.log.fatal('Search failed: error ' + str(e))
+ assert False
+
+ # Cleanup
+ try:
+ topo.standalone.delete_s(ROLE_DN)
+ except ldap.LDAPError as e:
+ topo.standalone.log.fatal('delete failed: error ' + str(e))
+ assert False
+ time.sleep(1)
topo.standalone.log.info('Test Passed')
diff --git a/ldap/servers/plugins/roles/roles_cache.c b/ldap/servers/plugins/roles/roles_cache.c
index e48b5fc34..43b709100 100644
--- a/ldap/servers/plugins/roles/roles_cache.c
+++ b/ldap/servers/plugins/roles/roles_cache.c
@@ -1074,20 +1074,38 @@ static int roles_cache_create_role_under(roles_cache_def** roles_cache_suffix, S
}
/*
- * Check that we are not using nsrole in the filter
+ * Check that we are not using nsrole in the filter, recurse over all the
+ * nested filters.
*/
static int roles_check_filter(Slapi_Filter *filter_list)
{
Slapi_Filter *f;
char *type = NULL;
- for ( f = slapi_filter_list_first( filter_list );
- f != NULL;
- f = slapi_filter_list_next( filter_list, f ) )
- {
- slapi_filter_get_attribute_type(f, &type);
- if (strcasecmp(type, NSROLEATTR) == 0){
- return -1;
+ f = slapi_filter_list_first( filter_list );
+ if (f == NULL){
+ /* Single filter */
+ if (slapi_filter_get_attribute_type(filter_list, &type) == 0){
+ if (strcasecmp(type, NSROLEATTR) == 0){
+ return -1;
+ }
+ }
+ }
+ for ( ; f != NULL; f = slapi_filter_list_next(filter_list, f) ){
+ /* Complex filter */
+ if (slapi_filter_list_first(f)) {
+ /* Another filter list - recurse */
+ if (roles_check_filter(f) == -1){
+ /* Done, break out */
+ return -1;
+ }
+ } else {
+ /* Not a filter list, so check the type */
+ if (slapi_filter_get_attribute_type(f, &type) == 0){
+ if (strcasecmp(type, NSROLEATTR) == 0){
+ return -1;
+ }
+ }
}
}
| 0 |
824b3019beffa5bf2bc5ab2a2a3e579d50833577
|
389ds/389-ds-base
|
Ticket #47504 idlistscanlimit per index/type/value
https://fedorahosted.org/389/ticket/47504
Reviewed by: nhosoi (Thanks!)
Branch: master
Fix Description: Added a new attribute nsIndexIDListScanLimit to nsIndex
nsIndexIDListScanLimit: limit=NNN [type=eq[,sub,...]] [flags=ADD[,XXX..]] [values=val[,val...]]
* The limit is the idlistscanlimit to apply. The value can be -1 (unlimited), 0
(do not use the index at all), or a number to use for the idlistscanlimit.
* The type is a comma-delimited list of index types (eq,sub,etc.) to which the
limit applies. The index type must be one of the index types configured for the index (you can't configure an id list for type=sub if you do not already have
a substring index configured for the attribute).
* The flags are a comma-delimited list of additional criteria which must match.
** flags ADD - only apply the limit when the index is used in an AND (&) filter
The values are a comma-delimited list of values which must match in the search
filter in order for the limit to be applied. Since the matches are done one
at a time (we evaluate one filter component at a time), the values will match
if any of the values match.
* The values must be used with only one type at a time. If values are specified,
type must be specified, and type must be a type that deals with values, such
as eq or sub. There must be only one type specified - you can't specify values
if you use type=eq,pres or otherwise specify more than one type. The values
must correspond to the index type (eq, sub), and must correspond to the syntax
of the attribute to which the index is applied - that is, if you have
attribute uidNumber (integer) and it is indexed for eq, you can't specify
type=eq values=abc because "abc" is not integer syntax.
If the values contain spaces, commas, nulls, other values which require
escapes, the LDAP filter escape syntax should be used - backslash '\' followed
by the 2 hex digit code for the character.
The values are processed as if they were filter values - so for "sub" values,
values like "values=*sm*ith*" will be processed as if they were values in a
substring search filter like (sn=*sm*ith*)
* nsIndexIDListScanLimit is multi-valued. If a search matches more than one
nsIndexIDListScanLimit, the rules are applied in priority order.
The priority is as follows, from highest to lowest:
* * match type, flags, value
* * match type, value
* * match type, flags
* * match type
* * match flags
* For example, if you have
* dn: cn=objectclass,...
* objectclass: nsIndex
* nsIndexType: eq
* nsIndexIDListScanLimit: limit=0 type=eq flags=AND value=inetOrgPerson
* nsIndexIDListScanLimit: limit=1 type=eq value=inetOrgPerson
* nsIndexIDListScanLimit: limit=2 type=eq flags=AND
* nsIndexIDListScanLimit: limit=3 type=eq
* nsIndexIDListScanLimit: limit=4 flags=AND
* nsIndexIDListScanLimit: limit=5
* If the search filter is (&(objectclass=inetOrgPerson)(uid=foo)) then the limit=0 because all
* 3 of type, flags, and value match
* If the search filter is (objectclass=inetOrgPerson) then the limit=1 because type and value match
* but flag does not
* If the search filter is (&(objectclass=posixAccount)(uid=foo)) the the limit=2 because type and
* flags match
* If the search filter is (objectclass=posixAccount) then the limit=3 because only the type matches
* If the search filter is (&(objectclass=*account*)(objectclass=*)) then the limit=4 because only
* flags match but not the types (sub and pres)
* If the search filter is (objectclass=*account*) then the limit=5 because only the attribute matches
* but none of flags, type, or value matches
To add in testing/debugging, the LDAP_DEBUG_BACKLDBM log level is used to
print information about searches which exceed the idlistscanlimit.
Platforms tested: RHEL6 x86_64
Flag Day: no
Doc impact: yes - document new attribute
|
commit 824b3019beffa5bf2bc5ab2a2a3e579d50833577
Author: Rich Megginson <[email protected]>
Date: Mon Sep 16 09:49:14 2013 -0600
Ticket #47504 idlistscanlimit per index/type/value
https://fedorahosted.org/389/ticket/47504
Reviewed by: nhosoi (Thanks!)
Branch: master
Fix Description: Added a new attribute nsIndexIDListScanLimit to nsIndex
nsIndexIDListScanLimit: limit=NNN [type=eq[,sub,...]] [flags=ADD[,XXX..]] [values=val[,val...]]
* The limit is the idlistscanlimit to apply. The value can be -1 (unlimited), 0
(do not use the index at all), or a number to use for the idlistscanlimit.
* The type is a comma-delimited list of index types (eq,sub,etc.) to which the
limit applies. The index type must be one of the index types configured for the index (you can't configure an id list for type=sub if you do not already have
a substring index configured for the attribute).
* The flags are a comma-delimited list of additional criteria which must match.
** flags ADD - only apply the limit when the index is used in an AND (&) filter
The values are a comma-delimited list of values which must match in the search
filter in order for the limit to be applied. Since the matches are done one
at a time (we evaluate one filter component at a time), the values will match
if any of the values match.
* The values must be used with only one type at a time. If values are specified,
type must be specified, and type must be a type that deals with values, such
as eq or sub. There must be only one type specified - you can't specify values
if you use type=eq,pres or otherwise specify more than one type. The values
must correspond to the index type (eq, sub), and must correspond to the syntax
of the attribute to which the index is applied - that is, if you have
attribute uidNumber (integer) and it is indexed for eq, you can't specify
type=eq values=abc because "abc" is not integer syntax.
If the values contain spaces, commas, nulls, other values which require
escapes, the LDAP filter escape syntax should be used - backslash '\' followed
by the 2 hex digit code for the character.
The values are processed as if they were filter values - so for "sub" values,
values like "values=*sm*ith*" will be processed as if they were values in a
substring search filter like (sn=*sm*ith*)
* nsIndexIDListScanLimit is multi-valued. If a search matches more than one
nsIndexIDListScanLimit, the rules are applied in priority order.
The priority is as follows, from highest to lowest:
* * match type, flags, value
* * match type, value
* * match type, flags
* * match type
* * match flags
* For example, if you have
* dn: cn=objectclass,...
* objectclass: nsIndex
* nsIndexType: eq
* nsIndexIDListScanLimit: limit=0 type=eq flags=AND value=inetOrgPerson
* nsIndexIDListScanLimit: limit=1 type=eq value=inetOrgPerson
* nsIndexIDListScanLimit: limit=2 type=eq flags=AND
* nsIndexIDListScanLimit: limit=3 type=eq
* nsIndexIDListScanLimit: limit=4 flags=AND
* nsIndexIDListScanLimit: limit=5
* If the search filter is (&(objectclass=inetOrgPerson)(uid=foo)) then the limit=0 because all
* 3 of type, flags, and value match
* If the search filter is (objectclass=inetOrgPerson) then the limit=1 because type and value match
* but flag does not
* If the search filter is (&(objectclass=posixAccount)(uid=foo)) the the limit=2 because type and
* flags match
* If the search filter is (objectclass=posixAccount) then the limit=3 because only the type matches
* If the search filter is (&(objectclass=*account*)(objectclass=*)) then the limit=4 because only
* flags match but not the types (sub and pres)
* If the search filter is (objectclass=*account*) then the limit=5 because only the attribute matches
* but none of flags, type, or value matches
To add in testing/debugging, the LDAP_DEBUG_BACKLDBM log level is used to
print information about searches which exceed the idlistscanlimit.
Platforms tested: RHEL6 x86_64
Flag Day: no
Doc impact: yes - document new attribute
diff --git a/ldap/schema/01core389.ldif b/ldap/schema/01core389.ldif
index 8ef702d02..b9baae75f 100644
--- a/ldap/schema/01core389.ldif
+++ b/ldap/schema/01core389.ldif
@@ -66,6 +66,7 @@ attributeTypes: ( 2.16.840.1.113730.3.1.593 NAME 'nsSNMPName' DESC 'Netscape def
attributeTypes: ( 2.16.840.1.113730.3.1.242 NAME 'nsSystemIndex' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 X-ORIGIN 'Netscape Directory Server' )
attributeTypes: ( 2.16.840.1.113730.3.1.327 NAME 'nsIndexType' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 X-ORIGIN 'Netscape Directory Server' )
attributeTypes: ( 2.16.840.1.113730.3.1.328 NAME 'nsMatchingRule' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 X-ORIGIN 'Netscape Directory Server' )
+attributeTypes: ( 2.16.840.1.113730.3.1.2161 NAME 'nsIndexIDListScanLimit' DESC 'fine grained idlistscanlimit - per index/type/value' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 X-ORIGIN 'Netscape Directory Server' )
attributeTypes: ( 2.16.840.1.113730.3.1.542 NAME 'nsUniqueId' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE NO-USER-MODIFICATION USAGE directoryOperation X-ORIGIN 'Netscape Directory Server' )
attributeTypes: ( 2.16.840.1.113730.3.1.543 NAME 'nsState' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.5 SINGLE-VALUE X-ORIGIN 'Netscape Directory Server' )
attributeTypes: ( 2.16.840.1.113730.3.1.544 NAME 'nsParentUniqueId' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE X-ORIGIN 'Netscape Directory Server' )
@@ -158,7 +159,7 @@ attributeTypes: ( 2.16.840.1.113730.3.1.2156 NAME 'nsslapd-sasl-max-buffer-size'
#
objectClasses: ( 2.16.840.1.113730.3.2.40 NAME 'directoryServerFeature' DESC 'Netscape defined objectclass' SUP top MAY ( oid $ cn $ multiLineDescription ) X-ORIGIN 'Netscape Directory Server' )
objectClasses: ( 2.16.840.1.113730.3.2.41 NAME 'nsslapdPlugin' DESC 'Netscape defined objectclass' SUP top MUST ( cn $ nsslapd-pluginPath $ nsslapd-pluginInitFunc $ nsslapd-pluginType $ nsslapd-pluginId $ nsslapd-pluginVersion $ nsslapd-pluginVendor $ nsslapd-pluginDescription $ nsslapd-pluginEnabled ) MAY ( nsslapd-pluginConfigArea $ nsslapd-plugin-depends-on-type ) X-ORIGIN 'Netscape Directory Server' )
-objectClasses: ( 2.16.840.1.113730.3.2.44 NAME 'nsIndex' DESC 'Netscape defined objectclass' SUP top MUST ( cn $ nsSystemIndex ) MAY ( description $ nsIndexType $ nsMatchingRule ) X-ORIGIN 'Netscape Directory Server' )
+objectClasses: ( 2.16.840.1.113730.3.2.44 NAME 'nsIndex' DESC 'Netscape defined objectclass' SUP top MUST ( cn $ nsSystemIndex ) MAY ( description $ nsIndexType $ nsMatchingRule $ nsIndexIDListScanLimit ) X-ORIGIN 'Netscape Directory Server' )
objectClasses: ( 2.16.840.1.113730.3.2.109 NAME 'nsBackendInstance' DESC 'Netscape defined objectclass' SUP top MUST ( CN ) X-ORIGIN 'Netscape Directory Server' )
objectClasses: ( 2.16.840.1.113730.3.2.110 NAME 'nsMappingTree' DESC 'Netscape defined objectclass' SUP top MUST ( CN ) X-ORIGIN 'Netscape Directory Server' )
objectClasses: ( 2.16.840.1.113730.3.2.104 NAME 'nsContainer' DESC 'Netscape defined objectclass' SUP top MUST ( CN ) X-ORIGIN 'Netscape Directory Server' )
diff --git a/ldap/servers/slapd/back-ldbm/ancestorid.c b/ldap/servers/slapd/back-ldbm/ancestorid.c
index 2f32f8f3e..8722b6805 100644
--- a/ldap/servers/slapd/back-ldbm/ancestorid.c
+++ b/ldap/servers/slapd/back-ldbm/ancestorid.c
@@ -1008,7 +1008,7 @@ int ldbm_ancestorid_read_ext(
bv.bv_val = keybuf;
bv.bv_len = PR_snprintf(keybuf, sizeof(keybuf), "%lu", (u_long)id);
- *idl = index_read_ext_allids(be, LDBM_ANCESTORID_STR, indextype_EQUALITY, &bv, txn, &ret, NULL, allidslimit);
+ *idl = index_read_ext_allids(NULL, be, LDBM_ANCESTORID_STR, indextype_EQUALITY, &bv, txn, &ret, NULL, allidslimit);
return ret;
}
diff --git a/ldap/servers/slapd/back-ldbm/back-ldbm.h b/ldap/servers/slapd/back-ldbm/back-ldbm.h
index c0bea8d21..10489e3c7 100644
--- a/ldap/servers/slapd/back-ldbm/back-ldbm.h
+++ b/ldap/servers/slapd/back-ldbm/back-ldbm.h
@@ -462,6 +462,14 @@ typedef int (*dup_compare_fn_type)(
#endif
const DBT *,const DBT *);
+struct index_idlistsizeinfo {
+ int ai_idlistsizelimit; /* max id list size */
+ int ai_indextype; /* index type */
+ unsigned int ai_flags;
+#define INDEX_ALLIDS_FLAG_AND 0x01
+ Slapi_ValueSet *ai_values; /* index keys to apply the max id list size to */
+};
+
/* for the cache of attribute information (which are indexed, etc.) */
struct attrinfo {
char *ai_type; /* type name (cn, sn, ...) */
@@ -510,6 +518,7 @@ struct attrinfo {
* the default length triplet is 2, 3, 2.
*/
Slapi_Attr ai_sattr; /* interface to syntax and matching rule plugins */
+ DataList *ai_idlistinfo; /* fine grained id list */
};
#define MAXDBCACHE 20
diff --git a/ldap/servers/slapd/back-ldbm/filterindex.c b/ldap/servers/slapd/back-ldbm/filterindex.c
index 64b4e2db3..971e10b6c 100644
--- a/ldap/servers/slapd/back-ldbm/filterindex.c
+++ b/ldap/servers/slapd/back-ldbm/filterindex.c
@@ -67,6 +67,7 @@ static IDList * range_candidates(
);
static IDList *
keys2idl(
+ Slapi_PBlock *pb,
backend *be,
char *type,
const char *indextype,
@@ -313,7 +314,7 @@ ava_candidates(
ivals=ptr;
slapi_attr_assertion2keys_ava_sv( &sattr, &tmp, (Slapi_Value ***)&ivals, LDAP_FILTER_EQUALITY_FAST);
- idl = keys2idl( be, type, indextype, ivals, err, &unindexed, &txn, allidslimit );
+ idl = keys2idl( pb, be, type, indextype, ivals, err, &unindexed, &txn, allidslimit );
if ( unindexed ) {
unsigned int opnote = SLAPI_OP_NOTE_UNINDEXED;
slapi_pblock_set( pb, SLAPI_OPERATION_NOTES, &opnote );
@@ -345,7 +346,7 @@ ava_candidates(
idl = idl_allids( be );
goto done;
}
- idl = keys2idl( be, type, indextype, ivals, err, &unindexed, &txn, allidslimit );
+ idl = keys2idl( pb, be, type, indextype, ivals, err, &unindexed, &txn, allidslimit );
if ( unindexed ) {
unsigned int opnote = SLAPI_OP_NOTE_UNINDEXED;
slapi_pblock_set( pb, SLAPI_OPERATION_NOTES, &opnote );
@@ -382,7 +383,7 @@ presence_candidates(
return( NULL );
}
slapi_pblock_get(pb, SLAPI_TXN, &txn.back_txn_txn);
- idl = index_read_ext_allids( be, type, indextype_PRESENCE,
+ idl = index_read_ext_allids( pb, be, type, indextype_PRESENCE,
NULL, &txn, err, &unindexed, allidslimit );
if ( unindexed ) {
@@ -491,7 +492,7 @@ extensible_candidates(
{
int unindexed = 0;
IDList* idl3 = (mrOP == SLAPI_OP_EQUAL) ?
- index_read_ext_allids(be, mrTYPE, mrOID, *key, &txn,
+ index_read_ext_allids(pb, be, mrTYPE, mrOID, *key, &txn,
err, &unindexed, allidslimit) :
index_range_read_ext(pb, be, mrTYPE, mrOID, mrOP,
*key, NULL, 0, &txn, err, allidslimit);
@@ -963,7 +964,7 @@ substring_candidates(
* IDLists together.
*/
slapi_pblock_get(pb, SLAPI_TXN, &txn.back_txn_txn);
- idl = keys2idl( be, type, indextype_SUB, ivals, err, &unindexed, &txn, allidslimit );
+ idl = keys2idl( pb, be, type, indextype_SUB, ivals, err, &unindexed, &txn, allidslimit );
if ( unindexed ) {
slapi_pblock_set( pb, SLAPI_OPERATION_NOTES, &opnote );
pagedresults_set_unindexed( pb->pb_conn, pb->pb_op, pr_idx );
@@ -977,6 +978,7 @@ substring_candidates(
static IDList *
keys2idl(
+ Slapi_PBlock *pb,
backend *be,
char *type,
const char *indextype,
@@ -996,7 +998,7 @@ keys2idl(
for ( i = 0; ivals[i] != NULL; i++ ) {
IDList *idl2;
- idl2 = index_read_ext_allids( be, type, indextype, slapi_value_get_berval(ivals[i]), txn, err, unindexed, allidslimit );
+ idl2 = index_read_ext_allids( pb, be, type, indextype, slapi_value_get_berval(ivals[i]), txn, err, unindexed, allidslimit );
#ifdef LDAP_DEBUG
/* XXX if ( slapd_ldap_debug & LDAP_DEBUG_TRACE ) { XXX */
diff --git a/ldap/servers/slapd/back-ldbm/idl_new.c b/ldap/servers/slapd/back-ldbm/idl_new.c
index 2b52f33fc..50ad5cb15 100644
--- a/ldap/servers/slapd/back-ldbm/idl_new.c
+++ b/ldap/servers/slapd/back-ldbm/idl_new.c
@@ -274,7 +274,7 @@ idl_new_fetch(
}
memcpy(&id, dataret.data, sizeof(ID));
if (id == lastid) { /* dup */
- LDAPDebug1Arg(LDAP_DEBUG_TRACE, "Detedted duplicate id "
+ LDAPDebug1Arg(LDAP_DEBUG_TRACE, "Detected duplicate id "
"%d due to DB_MULTIPLE error - skipping\n",
id);
continue; /* get next one */
@@ -293,14 +293,17 @@ idl_new_fetch(
}
LDAPDebug(LDAP_DEBUG_TRACE, "bulk fetch buffer nids=%d\n", count, 0, 0);
-#if defined(DB_ALLIDS_ON_READ)
+#if defined(DB_ALLIDS_ON_READ)
/* enforce the allids read limit */
if ((NEW_IDL_NO_ALLID != *flag_err) && (NULL != a) &&
- (idl != NULL) && idl_new_exceeds_allidslimit(count, a, allidslimit)) {
- idl->b_nids = 1;
- idl->b_ids[0] = ALLID;
- ret = DB_NOTFOUND; /* fool the code below into thinking that we finished the dups */
- break;
+ (idl != NULL) && idl_new_exceeds_allidslimit(count, a, allidslimit)) {
+ idl->b_nids = 1;
+ idl->b_ids[0] = ALLID;
+ ret = DB_NOTFOUND; /* fool the code below into thinking that we finished the dups */
+ LDAPDebug(LDAP_DEBUG_BACKLDBM, "search for key for attribute index %s "
+ "exceeded allidslimit %d - count is %d\n",
+ a->ai_type, allidslimit, count);
+ break;
}
#endif
ret = cursor->c_get(cursor,&key,&data,DB_NEXT_DUP|DB_MULTIPLE);
diff --git a/ldap/servers/slapd/back-ldbm/index.c b/ldap/servers/slapd/back-ldbm/index.c
index 3957099d7..50c03c0ed 100644
--- a/ldap/servers/slapd/back-ldbm/index.c
+++ b/ldap/servers/slapd/back-ldbm/index.c
@@ -52,6 +52,8 @@
static const char *errmsg = "database index operation failed";
static int is_indexed (const char* indextype, int indexmask, char** index_rules);
+static int index_get_allids( int default_allids, const char *indextype, struct attrinfo *ai, const struct berval *val, unsigned int flags );
+
static Slapi_Value **
valuearray_minus_valuearray(
const Slapi_Attr *sattr,
@@ -894,6 +896,7 @@ index_read(
*/
IDList *
index_read_ext_allids(
+ Slapi_PBlock *pb,
backend *be,
char *type,
const char *indextype,
@@ -916,6 +919,8 @@ index_read_ext_allids(
char *basetmp, *basetype;
int retry_count = 0;
struct berval *encrypted_val = NULL;
+ int is_and = 0;
+ unsigned int ai_flags = 0;
*err = 0;
@@ -982,6 +987,23 @@ index_read_ext_allids(
slapi_ch_free_string( &basetmp );
return( idl );
}
+ if (pb) {
+ slapi_pblock_get(pb, SLAPI_SEARCH_IS_AND, &is_and);
+ }
+ ai_flags = is_and ? INDEX_ALLIDS_FLAG_AND : 0;
+ allidslimit = index_get_allids( allidslimit, indextype, ai, val, ai_flags );
+ if (allidslimit == 0) {
+ idl = idl_allids( be );
+ if (unindexed != NULL) *unindexed = 1;
+ LDAPDebug1Arg( LDAP_DEBUG_BACKLDBM, "<= index_read %lu candidates "
+ "(do not use index)\n", (u_long)IDL_NIDS(idl) );
+ LDAPDebug( LDAP_DEBUG_BACKLDBM, "<= index_read index attr %s type %s "
+ "for value %s does not use index\n", basetype, indextype,
+ (val && val->bv_val) ? val->bv_val : "ALL" );
+ index_free_prefix( prefix );
+ slapi_ch_free_string( &basetmp );
+ return( idl );
+ }
if ( (*err = dblayer_get_index_file( be, ai, &db, DBOPEN_CREATE )) != 0 ) {
LDAPDebug( LDAP_DEBUG_TRACE,
"<= index_read NULL (index file open for attr %s)\n",
@@ -1069,7 +1091,7 @@ index_read_ext(
int *unindexed
)
{
- return index_read_ext_allids(be, type, indextype, val, txn, err, unindexed, 0);
+ return index_read_ext_allids(NULL, be, type, indextype, val, txn, err, unindexed, 0);
}
/* This function compares two index keys. It is assumed
@@ -2347,3 +2369,100 @@ valuearray_minus_valuearray(
return c;
}
+
+/*
+ * Find the most specific match for the given index type, flags, and value, and return the allids value
+ * for that match. The priority is as follows, from highest to lowest:
+ * * match type, flags, value
+ * * match type, value
+ * * match type, flags
+ * * match type
+ * * match flags
+ * Note that for value to match, the type must be one that supports values e.g. eq or sub, so that
+ * in order for value to match, there must be a type
+ * For example, if you have
+ * dn: cn=objectclass,...
+ * objectclass: nsIndex
+ * nsIndexType: eq
+ * nsIndexIDListScanLimit: limit=0 type=eq flags=AND value=inetOrgPerson
+ * nsIndexIDListScanLimit: limit=1 type=eq value=inetOrgPerson
+ * nsIndexIDListScanLimit: limit=2 type=eq flags=AND
+ * nsIndexIDListScanLimit: limit=3 type=eq
+ * nsIndexIDListScanLimit: limit=4 flags=AND
+ * nsIndexIDListScanLimit: limit=5
+ * If the search filter is (&(objectclass=inetOrgPerson)(uid=foo)) then the limit=0 because all
+ * 3 of type, flags, and value match
+ * If the search filter is (objectclass=inetOrgPerson) then the limit=1 because type and value match
+ * but flag does not
+ * If the search filter is (&(objectclass=posixAccount)(uid=foo)) the the limit=2 because type and
+ * flags match
+ * If the search filter is (objectclass=posixAccount) then the limit=3 because only the type matches
+ * If the search filter is (&(objectclass=*account*)(objectclass=*)) then the limit=4 because only
+ * flags match but not the types (sub and pres)
+ * If the search filter is (objectclass=*account*) then the limit=5 because only the attribute matches
+ * but none of flags, type, or value matches
+ */
+#define AI_HAS_VAL 0x04
+#define AI_HAS_TYPE 0x02
+#define AI_HAS_FLAG 0x01
+static int
+index_get_allids( int default_allids, const char *indextype, struct attrinfo *ai, const struct berval *val, unsigned int flags )
+{
+ int allids = default_allids;
+ Slapi_Value sval;
+ struct index_idlistsizeinfo *iter; /* iterator */
+ int cookie = 0;
+ int best_score = 0;
+ struct index_idlistsizeinfo *best_match = NULL;
+
+ if (!ai->ai_idlistinfo) {
+ return allids;
+ }
+
+ if (val) { /* val should already be a Slapi_Value, but some paths do not use Slapi_Value */
+ sval.bv.bv_val = val->bv_val;
+ sval.bv.bv_len = val->bv_len;
+ sval.v_csnset = NULL;
+ sval.v_flags = SLAPI_ATTR_FLAG_NORMALIZED; /* the value must be a normalized key */
+ }
+
+ /* loop through all of the idlistinfo objects to find the best match */
+ for (iter = (struct index_idlistsizeinfo *)dl_get_first(ai->ai_idlistinfo, &cookie); iter;
+ iter = (struct index_idlistsizeinfo *)dl_get_next(ai->ai_idlistinfo, &cookie)) {
+ int iter_score = 0;
+
+ if (iter->ai_indextype != 0) { /* info defines a type which must match */
+ if (is_indexed(indextype, iter->ai_indextype, ai->ai_index_rules)) {
+ iter_score |= AI_HAS_TYPE;
+ } else {
+ continue; /* does not match, go to next one */
+ }
+ }
+ if (iter->ai_flags != 0) {
+ if (flags & iter->ai_flags) {
+ iter_score |= AI_HAS_FLAG;
+ } else {
+ continue; /* does not match, go to next one */
+ }
+ }
+ if (iter->ai_values != NULL) {
+ if ((val != NULL) && slapi_valueset_find(&ai->ai_sattr, iter->ai_values, &sval)) {
+ iter_score |= AI_HAS_VAL;
+ } else {
+ continue; /* does not match, go to next one */
+ }
+ }
+
+ if (iter_score >= best_score) {
+ best_score = iter_score;
+ best_match = iter;
+ }
+ }
+
+ if (best_match) {
+ allids = best_match->ai_idlistsizelimit;
+ }
+
+ return allids;
+}
+
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_attr.c b/ldap/servers/slapd/back-ldbm/ldbm_attr.c
index c13fd6d87..221a22ca9 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_attr.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_attr.c
@@ -44,6 +44,22 @@
#include "back-ldbm.h"
+static void
+attr_index_idlistsize_done(struct index_idlistsizeinfo *idlinfo)
+{
+ if (idlinfo) {
+ slapi_valueset_free(idlinfo->ai_values);
+ idlinfo->ai_values = NULL;
+ }
+}
+
+static void
+attr_index_idlistsize_free(struct index_idlistsizeinfo **idlinfo)
+{
+ attr_index_idlistsize_done(*idlinfo);
+ slapi_ch_free((void **)idlinfo);
+}
+
struct attrinfo *
attrinfo_new()
{
@@ -51,6 +67,15 @@ attrinfo_new()
return p;
}
+void
+attrinfo_delete_idlistinfo(DataList **idlinfo_dl)
+{
+ if (idlinfo_dl && *idlinfo_dl) {
+ dl_cleanup(*idlinfo_dl, (FREEFN)attr_index_idlistsize_free);
+ dl_free(idlinfo_dl);
+ }
+}
+
void
attrinfo_delete(struct attrinfo **pp)
{
@@ -62,6 +87,7 @@ attrinfo_delete(struct attrinfo **pp)
slapi_ch_free((void**)(*pp)->ai_index_rules);
slapi_ch_free((void**)&((*pp)->ai_attrcrypt));
attr_done(&((*pp)->ai_sattr));
+ attrinfo_delete_idlistinfo(&(*pp)->ai_idlistinfo);
slapi_ch_free((void**)pp);
*pp= NULL;
}
@@ -126,6 +152,10 @@ ainfo_dup(
if ( b->ai_indexmask & INDEX_RULES ) {
charray_merge( &a->ai_index_rules, b->ai_index_rules, 1 );
}
+ /* free the old idlistinfo from a - transfer the list from b to a */
+ attrinfo_delete_idlistinfo(&a->ai_idlistinfo);
+ a->ai_idlistinfo = b->ai_idlistinfo;
+ b->ai_idlistinfo = NULL;
return( 1 );
}
@@ -166,6 +196,464 @@ _set_attr_substrlen(int index, char *str, int **substrlens)
}
}
+#define NS_INDEX_IDLISTSCANLIMIT "nsIndexIDListScanLimit"
+#define LIMIT_KW "limit="
+#define LIMIT_LEN sizeof(LIMIT_KW)-1
+#define TYPE_KW "type="
+#define TYPE_LEN sizeof(TYPE_KW)-1
+#define FLAGS_KW "flags="
+#define FLAGS_LEN sizeof(FLAGS_KW)-1
+#define VALUES_KW "values="
+#define VALUES_LEN sizeof(VALUES_KW)-1
+#define FLAGS_AND_KW "AND"
+#define FLAGS_AND_LEN sizeof(FLAGS_AND_KW)-1
+
+static int
+attr_index_parse_idlistsize_values(Slapi_Attr *attr, struct index_idlistsizeinfo *idlinfo, char *values, const char *strval, char *returntext)
+{
+ int rc = 0;
+ /* if we are here, values is non-NULL and not an empty string - parse it */
+ char *ptr = NULL;
+ char *lasts = NULL;
+ char *val;
+ int syntaxcheck = config_get_syntaxcheck();
+ IFP syntax_validate_fn = syntaxcheck ? attr->a_plugin->plg_syntax_validate : NULL;
+ char staticfiltstrbuf[1024]; /* for small filter strings */
+ char *filtstrbuf = staticfiltstrbuf; /* default if not malloc'd */
+ size_t filtstrbuflen = sizeof(staticfiltstrbuf); /* default if not malloc'd */
+ Slapi_Filter *filt = NULL; /* for filter converting/unescaping config values */
+
+ /* caller should have already checked that values is valid and contains a "=" */
+ PR_ASSERT(values);
+ ptr = PL_strchr(values, '=');
+ PR_ASSERT(ptr);
+ ++ptr;
+ for (val = ldap_utf8strtok_r(ptr, ",", &lasts); val;
+ val = ldap_utf8strtok_r(NULL, ",", &lasts)) {
+ Slapi_Value **ivals= NULL; /* for config values converted to keys */
+ int ii;
+#define FILT_TEMPL_BEGIN "(a="
+#define FILT_TEMPL_END ")"
+ size_t filttemplen = sizeof(FILT_TEMPL_BEGIN) - 1 + sizeof(FILT_TEMPL_END) - 1;
+ size_t vallen = strlen(val);
+
+ if ((vallen + filttemplen + 1) > filtstrbuflen) {
+ filtstrbuflen = vallen + filttemplen + 1;
+ if (filtstrbuf == staticfiltstrbuf) {
+ filtstrbuf = (char *)slapi_ch_malloc(sizeof(char) * filtstrbuflen);
+ } else {
+ filtstrbuf = (char *)slapi_ch_realloc(filtstrbuf, sizeof(char) * filtstrbuflen);
+ }
+ }
+ /* each value is a value from a filter which should be escaped like a filter value
+ * for each value, create a dummy filter string, then parse and unescape it just
+ * like a filter
+ */
+ PR_snprintf(filtstrbuf, filtstrbuflen, FILT_TEMPL_BEGIN "%s" FILT_TEMPL_END, val);
+ filt = slapi_str2filter(filtstrbuf);
+ if (!filt) {
+ rc = LDAP_UNWILLING_TO_PERFORM;
+ PR_snprintf(returntext, SLAPI_DSE_RETURNTEXT_SIZE,
+ "attr_index_parse_idlistsize: invalid value %s in %s",
+ val, strval);
+ break;
+ }
+
+ if (idlinfo->ai_indextype == INDEX_SUB) {
+ if (syntax_validate_fn) {
+ /* see if the values match the syntax, but only if checking is enabled */
+ char **subany = filt->f_sub_any;
+ struct berval bv;
+
+ if (filt->f_sub_initial && *filt->f_sub_initial) {
+ bv.bv_val = filt->f_sub_initial;
+ bv.bv_len = strlen(bv.bv_val);
+ if ((rc = syntax_validate_fn(&bv))) {
+ rc = LDAP_UNWILLING_TO_PERFORM;
+ PR_snprintf(returntext, SLAPI_DSE_RETURNTEXT_SIZE,
+ "attr_index_parse_idlistsize: initial substring value %s "
+ "in value %s violates syntax for attribute %s",
+ filt->f_sub_initial, val, attr->a_type);
+ break;
+ }
+ }
+ for (; !rc && subany && *subany; ++subany) {
+ char *subval = *subany;
+ if (*subval) {
+ bv.bv_val = subval;
+ bv.bv_len = strlen(bv.bv_val);
+ if ((rc = syntax_validate_fn(&bv))) {
+ rc = LDAP_UNWILLING_TO_PERFORM;
+ PR_snprintf(returntext, SLAPI_DSE_RETURNTEXT_SIZE,
+ "attr_index_parse_idlistsize: initial substring value %s in "
+ "value %s violates syntax for attribute %s",
+ filt->f_sub_any[ii], val, attr->a_type);
+ break;
+ }
+ }
+ }
+ if (rc) {
+ break;
+ }
+ if (filt->f_sub_final) {
+ bv.bv_val = filt->f_sub_final;
+ bv.bv_len = strlen(bv.bv_val);
+ if ((rc = syntax_validate_fn(&bv))) {
+ rc = LDAP_UNWILLING_TO_PERFORM;
+ PR_snprintf(returntext, SLAPI_DSE_RETURNTEXT_SIZE,
+ "attr_index_parse_idlistsize: final substring value %s in value "
+ "%s violates syntax for attribute %s",
+ filt->f_sub_final, val, attr->a_type);
+ break;
+ }
+ }
+ }
+ /* if we are here, values passed syntax or no checking */
+ /* generate index keys */
+ (void)slapi_attr_assertion2keys_sub_sv(attr, filt->f_sub_initial, filt->f_sub_any, filt->f_sub_final, &ivals);
+
+ } else if (idlinfo->ai_indextype == INDEX_EQUALITY) {
+ Slapi_Value sval;
+ /* see if the value matches the syntax, but only if checking is enabled */
+ if (syntax_validate_fn && ((rc = syntax_validate_fn(&filt->f_avvalue)))) {
+ rc = LDAP_UNWILLING_TO_PERFORM;
+ PR_snprintf(returntext, SLAPI_DSE_RETURNTEXT_SIZE,
+ "attr_index_parse_idlistsize: value %s violates syntax for attribute %s",
+ val, attr->a_type);
+ break;
+ }
+
+ sval.bv.bv_val = filt->f_avvalue.bv_val;
+ sval.bv.bv_len = filt->f_avvalue.bv_len;
+ sval.v_flags = 0;
+ sval.v_csnset = NULL;
+ (void)slapi_attr_assertion2keys_ava_sv(attr, &sval, (Slapi_Value ***)&ivals, LDAP_FILTER_EQUALITY);
+ }
+ /* don't need filter any more */
+ slapi_filter_free(filt, 1);
+ filt = NULL;
+
+ /* add value(s) in ivals to our value set - disallow duplicates with error */
+ for (ii = 0; !rc && ivals && ivals[ii]; ++ii) {
+ if (idlinfo->ai_values &&
+ slapi_valueset_find(attr, idlinfo->ai_values, ivals[ii])) {
+ rc = LDAP_UNWILLING_TO_PERFORM;
+ PR_snprintf(returntext, SLAPI_DSE_RETURNTEXT_SIZE,
+ "attr_index_parse_idlistsize: duplicate value %s in %s",
+ slapi_value_get_string(ivals[ii]), val);
+ slapi_value_free(&ivals[ii]);
+ } else {
+ if (!idlinfo->ai_values) {
+ idlinfo->ai_values = slapi_valueset_new();
+ }
+ slapi_valueset_add_value_ext(idlinfo->ai_values, ivals[ii], SLAPI_VALUE_FLAG_PASSIN);
+ }
+ }
+ /* only free members of ivals that were not moved to ai_values */
+ valuearray_free_ext(&ivals, ii);
+ ivals = NULL;
+ }
+
+ slapi_filter_free(filt, 1);
+
+ if (filtstrbuf != staticfiltstrbuf) {
+ slapi_ch_free_string(&filtstrbuf);
+ }
+
+ return rc;
+}
+
+static int
+attr_index_parse_idlistsize_limit(char *ptr, struct index_idlistsizeinfo *idlinfo, char *returntext)
+{
+ int rc = 0;
+ char *endptr;
+
+ PR_ASSERT(ptr && (*ptr == '='));
+ ptr++;
+ idlinfo->ai_idlistsizelimit = strtol(ptr, &endptr, 10);
+ if (*endptr) { /* error in parsing */
+ rc = LDAP_UNWILLING_TO_PERFORM;
+ PR_snprintf(returntext, SLAPI_DSE_RETURNTEXT_SIZE,
+ "attr_index_parse_idlistsize: value %s for %s is not valid - "
+ "must be an integer >= -1",
+ ptr, LIMIT_KW);
+ } else if (idlinfo->ai_idlistsizelimit < -1) {
+ rc = LDAP_UNWILLING_TO_PERFORM;
+ PR_snprintf(returntext, SLAPI_DSE_RETURNTEXT_SIZE,
+ "attr_index_parse_idlistsize: value %s for %s "
+ "must be an integer >= -1",
+ ptr, LIMIT_KW);
+ }
+ return rc;
+}
+
+static int
+attr_index_parse_idlistsize_type(char *ptr, struct attrinfo *ai, struct index_idlistsizeinfo *idlinfo, const char *val, const char *strval, char *returntext)
+{
+ int rc = 0;
+ char *ptr_next;
+ size_t len;
+ size_t preslen = strlen(indextype_PRESENCE);
+ size_t eqlen = strlen(indextype_EQUALITY);
+ size_t sublen = strlen(indextype_SUB);
+
+ PR_ASSERT(ptr && (*ptr == '='));
+ do {
+ ++ptr;
+ ptr_next = PL_strchr(ptr, ','); /* find next comma */
+ if (!ptr_next) {
+ ptr_next = PL_strchr(ptr, '\0'); /* find end of string */
+ }
+ len = ptr_next-ptr;
+ if ((len == preslen) && !PL_strncmp(ptr, indextype_PRESENCE, len)) {
+ if (idlinfo->ai_indextype & INDEX_PRESENCE) {
+ rc = LDAP_UNWILLING_TO_PERFORM;
+ PR_snprintf(returntext, SLAPI_DSE_RETURNTEXT_SIZE,
+ "attr_index_parse_idlistsize: duplicate %s in value %s for %s",
+ indextype_PRESENCE, val, strval);
+ break;
+ }
+ if (!(ai->ai_indexmask & INDEX_PRESENCE)) {
+ rc = LDAP_UNWILLING_TO_PERFORM;
+ PR_snprintf(returntext, SLAPI_DSE_RETURNTEXT_SIZE,
+ "attr_index_parse_idlistsize: attribute %s does not have index type %s",
+ ai->ai_type, indextype_PRESENCE);
+ break;
+ }
+ idlinfo->ai_indextype |= INDEX_PRESENCE;
+ } else if ((len == eqlen) && !PL_strncmp(ptr, indextype_EQUALITY, len)) {
+ if (idlinfo->ai_indextype & INDEX_EQUALITY) {
+ rc = LDAP_UNWILLING_TO_PERFORM;
+ PR_snprintf(returntext, SLAPI_DSE_RETURNTEXT_SIZE,
+ "attr_index_parse_idlistsize: duplicate %s in value %s for %s",
+ indextype_EQUALITY, val, strval);
+ break;
+ }
+ if (!(ai->ai_indexmask & INDEX_EQUALITY)) {
+ rc = LDAP_UNWILLING_TO_PERFORM;
+ PR_snprintf(returntext, SLAPI_DSE_RETURNTEXT_SIZE,
+ "attr_index_parse_idlistsize: attribute %s does not have index type %s",
+ ai->ai_type, indextype_EQUALITY);
+ break;
+ }
+ idlinfo->ai_indextype |= INDEX_EQUALITY;
+ } else if ((len == sublen) && !PL_strncmp(ptr, indextype_SUB, len)) {
+ if (idlinfo->ai_indextype & INDEX_SUB) {
+ rc = LDAP_UNWILLING_TO_PERFORM;
+ PR_snprintf(returntext, SLAPI_DSE_RETURNTEXT_SIZE,
+ "attr_index_parse_idlistsize: duplicate %s in value %s for %s",
+ indextype_SUB, val, strval);
+ break;
+ }
+ if (!(ai->ai_indexmask & INDEX_SUB)) {
+ rc = LDAP_UNWILLING_TO_PERFORM;
+ PR_snprintf(returntext, SLAPI_DSE_RETURNTEXT_SIZE,
+ "attr_index_parse_idlistsize: attribute %s does not have index type %s",
+ ai->ai_type, indextype_SUB);
+ break;
+ }
+ idlinfo->ai_indextype |= INDEX_SUB;
+ } else {
+ rc = LDAP_UNWILLING_TO_PERFORM;
+ PR_snprintf(returntext, SLAPI_DSE_RETURNTEXT_SIZE,
+ "attr_index_parse_idlistsize: unknown or unsupported index type "
+ "%s in value %s for %s",
+ ptr, val, strval);
+ break;
+ }
+ } while ((ptr = PL_strchr(ptr, ',')));
+
+ return rc;
+}
+
+static int
+attr_index_parse_idlistsize_flags(char *ptr, struct index_idlistsizeinfo *idlinfo, const char *val, const char *strval, char *returntext)
+{
+ int rc = 0;
+ char *ptr_next;
+ size_t len;
+
+ PR_ASSERT(ptr && (*ptr == '='));
+ do {
+ ++ptr;
+ ptr_next = PL_strchr(ptr, ','); /* find next comma */
+ if (!ptr_next) {
+ ptr_next = PL_strchr(ptr, '\0'); /* find end of string */
+ }
+ len = ptr_next-ptr;
+ if ((len == FLAGS_AND_LEN) && !PL_strncmp(ptr, FLAGS_AND_KW, len)) {
+ if (idlinfo->ai_flags & INDEX_ALLIDS_FLAG_AND) {
+ rc = LDAP_UNWILLING_TO_PERFORM;
+ PR_snprintf(returntext, SLAPI_DSE_RETURNTEXT_SIZE,
+ "attr_index_parse_idlistsize: duplicate %s in value %s for %s",
+ FLAGS_AND_KW, val, strval);
+ break;
+ }
+ idlinfo->ai_flags |= INDEX_ALLIDS_FLAG_AND;
+ } else {
+ rc = LDAP_UNWILLING_TO_PERFORM;
+ PR_snprintf(returntext, SLAPI_DSE_RETURNTEXT_SIZE,
+ "attr_index_parse_idlistsize: unknown or unsupported flags %s in value %s for %s",
+ ptr, val, strval);
+ break;
+ }
+
+ } while ((ptr = PL_strchr(ptr, ',')));
+ return rc;
+}
+
+static int
+attr_index_parse_idlistsize(struct attrinfo *ai, const char *strval, struct index_idlistsizeinfo *idlinfo, char *returntext)
+{
+ int rc = 0; /* assume success */
+ char *mystr = slapi_ch_strdup(strval); /* copy for strtok */
+ char *values = NULL;
+ char *lasts, *val, *ptr;
+ int seen_limit = 0, seen_type = 0, seen_flags = 0, seen_values = 0;
+ Slapi_Attr *attr = &ai->ai_sattr;
+
+ if (!mystr) {
+ rc = LDAP_UNWILLING_TO_PERFORM;
+ PR_snprintf(returntext, SLAPI_DSE_RETURNTEXT_SIZE,
+ "attr_index_parse_idlistsize: value is empty");
+ goto done;
+ }
+
+ for (val = ldap_utf8strtok_r(mystr, " ", &lasts); val;
+ val = ldap_utf8strtok_r(NULL, " ", &lasts)) {
+ ptr = PL_strchr(val, '=');
+ if (!ptr || !(*(ptr+1))) {
+ rc = LDAP_UNWILLING_TO_PERFORM;
+ PR_snprintf(returntext, SLAPI_DSE_RETURNTEXT_SIZE,
+ "attr_index_parse_idlistsize: invalid value %s - should be keyword=value - in %s",
+ val, strval);
+ goto done;
+ }
+ /* ptr points at first '=' in val */
+ if (!PL_strncmp(val, LIMIT_KW, LIMIT_LEN)) {
+ if (seen_limit) {
+ rc = LDAP_UNWILLING_TO_PERFORM;
+ PR_snprintf(returntext, SLAPI_DSE_RETURNTEXT_SIZE,
+ "attr_index_parse_idlistsize: can have only 1 %s in value %s",
+ LIMIT_KW, strval);
+ goto done;
+ }
+ if ((rc = attr_index_parse_idlistsize_limit(ptr, idlinfo, returntext))) {
+ goto done;
+ }
+ seen_limit = 1;
+ } else if (!PL_strncmp(val, TYPE_KW, TYPE_LEN)) {
+ if (seen_type) {
+ rc = LDAP_UNWILLING_TO_PERFORM;
+ PR_snprintf(returntext, SLAPI_DSE_RETURNTEXT_SIZE,
+ "attr_index_parse_idlistsize: can have only 1 %s in value %s",
+ TYPE_KW, strval);
+ goto done;
+ }
+ if ((rc = attr_index_parse_idlistsize_type(ptr, ai, idlinfo, val, strval, returntext))) {
+ goto done;
+ }
+
+ seen_type = 1;
+ } else if (!PL_strncmp(val, FLAGS_KW, FLAGS_LEN)) {
+ if (seen_flags) {
+ rc = LDAP_UNWILLING_TO_PERFORM;
+ PR_snprintf(returntext, SLAPI_DSE_RETURNTEXT_SIZE,
+ "attr_index_parse_idlistsize: can have only 1 %s in value %s",
+ FLAGS_KW, strval);
+ goto done;
+ }
+ if ((rc = attr_index_parse_idlistsize_flags(ptr, idlinfo, val, strval, returntext))) {
+ goto done;
+ }
+ seen_flags = 1;
+ } else if (!PL_strncmp(val, VALUES_KW, VALUES_LEN)) {
+ if (seen_values) {
+ rc = LDAP_UNWILLING_TO_PERFORM;
+ PR_snprintf(returntext, SLAPI_DSE_RETURNTEXT_SIZE,
+ "attr_index_parse_idlistsize: can have only 1 %s in value %s",
+ VALUES_KW, strval);
+ goto done;
+ }
+ values = val;
+ seen_values = 1;
+ } else {
+ rc = LDAP_UNWILLING_TO_PERFORM;
+ PR_snprintf(returntext, SLAPI_DSE_RETURNTEXT_SIZE,
+ "attr_index_parse_idlistsize: unknown keyword %s in %s",
+ val, strval);
+ goto done;
+ }
+ }
+
+ if (!seen_limit) {
+ rc = LDAP_UNWILLING_TO_PERFORM;
+ PR_snprintf(returntext, SLAPI_DSE_RETURNTEXT_SIZE,
+ "attr_index_parse_idlistsize: no limit specified in %s",
+ strval);
+ goto done;
+ }
+
+ /* parse values last
+ * can only have values if type is eq or sub, and only eq by itself or sub by itself
+ * eq and sub type values cannot be mixed, so error in that case
+ * cannot have type pres,eq and values - pres must be by itself with no values
+ */
+ if (values) {
+ if (idlinfo->ai_indextype == INDEX_EQUALITY) {
+ ; /* ok */
+ } else if (idlinfo->ai_indextype == INDEX_SUB) {
+ ; /* ok */
+ } else {
+ rc = LDAP_UNWILLING_TO_PERFORM;
+ PR_snprintf(returntext, SLAPI_DSE_RETURNTEXT_SIZE,
+ "attr_index_parse_idlistsize: if %s is specified, the %s "
+ "must be %s or %s - not both, and not any other types",
+ VALUES_KW, TYPE_KW, indextype_PRESENCE, indextype_SUB);
+ goto done;
+ }
+ } else {
+ goto done;
+ }
+
+ /* if we are here, values contains something - parse it */
+ rc = attr_index_parse_idlistsize_values(attr, idlinfo, values, strval, returntext);
+
+done:
+ slapi_ch_free_string(&mystr);
+ return rc;
+}
+
+static int
+attr_index_idlistsize_config(Slapi_Entry *e, struct attrinfo *ai, char *returntext)
+{
+ int rc = 0;
+ int ii;
+ Slapi_Attr *idlattr;
+ Slapi_Value *sval;
+ struct index_idlistsizeinfo *idlinfo;
+
+ slapi_entry_attr_find(e, NS_INDEX_IDLISTSCANLIMIT, &idlattr);
+ if (!idlattr) {
+ return rc;
+ }
+ for (ii = slapi_attr_first_value(idlattr, &sval); !rc && (ii != -1); ii = slapi_attr_next_value(idlattr, ii, &sval)) {
+ idlinfo = (struct index_idlistsizeinfo *)slapi_ch_calloc(1, sizeof(struct index_idlistsizeinfo));
+ if ((rc = attr_index_parse_idlistsize(ai, slapi_value_get_string(sval), idlinfo, returntext))) {
+ attr_index_idlistsize_free(&idlinfo);
+ attrinfo_delete_idlistinfo(&ai->ai_idlistinfo);
+ } else {
+ if (!ai->ai_idlistinfo) {
+ ai->ai_idlistinfo = dl_new();
+ dl_init(ai->ai_idlistinfo, 1);
+ }
+ dl_add(ai->ai_idlistinfo, idlinfo);
+ }
+ }
+ return rc;
+}
+
void
attr_index_config(
backend *be,
@@ -188,6 +676,7 @@ attr_index_config(
Slapi_Value *sval;
Slapi_Attr *attr;
int mr_count = 0;
+ char myreturntext[SLAPI_DSE_RETURNTEXT_SIZE];
/* Get the cn */
if (0 == slapi_entry_attr_find(e, "cn", &attr)) {
@@ -364,6 +853,11 @@ attr_index_config(
}
}
+ if ((return_value = attr_index_idlistsize_config(e, a, myreturntext))) {
+ LDAPDebug(LDAP_DEBUG_ANY,"attr_index_config: %s: Failed to parse idscanlimit info: %d:%s\n",
+ fname, return_value, myreturntext);
+ }
+
/* initialize the IDL code's private data */
return_value = idl_init_private(be, a);
if (0 != return_value) {
diff --git a/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h b/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h
index a3261852d..7a7b7ff3b 100644
--- a/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h
+++ b/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h
@@ -304,7 +304,7 @@ int id_array_init(Id_Array *new_guy, int size);
IDList* index_read( backend *be, char *type, const char* indextype, const struct berval* val, back_txn *txn, int *err );
IDList* index_read_ext( backend *be, char *type, const char* indextype, const struct berval* val, back_txn *txn, int *err, int *unindexed );
-IDList* index_read_ext_allids( backend *be, char *type, const char* indextype, const struct berval* val, back_txn *txn, int *err, int *unindexed, int allidslimit );
+IDList* index_read_ext_allids( Slapi_PBlock *pb, backend *be, char *type, const char* indextype, const struct berval* val, back_txn *txn, int *err, int *unindexed, int allidslimit );
IDList* index_range_read( Slapi_PBlock *pb, backend *be, char *type, const char* indextype, int ftype, struct berval* val, struct berval* nextval, int range, back_txn *txn, int *err );
IDList* index_range_read_ext( Slapi_PBlock *pb, backend *be, char *type, const char* indextype, int ftype, struct berval* val, struct berval* nextval, int range, back_txn *txn, int *err, int allidslimit );
const char *encode( const struct berval* data, char buf[BUFSIZ] );
diff --git a/ldap/servers/slapd/proto-slap.h b/ldap/servers/slapd/proto-slap.h
index bbd1cf2ca..95bfafd40 100644
--- a/ldap/servers/slapd/proto-slap.h
+++ b/ldap/servers/slapd/proto-slap.h
@@ -157,6 +157,7 @@ int valuearray_init_bervalarray(struct berval **bvals, Slapi_Value ***cvals);
int valuearray_init_bervalarray_with_flags(struct berval **bvals, Slapi_Value ***cvals, unsigned long flags);
int valuearray_get_bervalarray(Slapi_Value **cvals, struct berval ***bvals); /* JCM SLOW FUNCTION */
void valuearray_free(Slapi_Value ***va);
+void valuearray_free_ext(Slapi_Value ***va, int ii);
Slapi_Value *valuearray_remove_value(const Slapi_Attr *a, Slapi_Value **va, const Slapi_Value *v);
void valuearray_remove_value_atindex(Slapi_Value **va, int index);
int valuearray_isempty( Slapi_Value **va);
| 0 |
f290f80e90c116ab5c04171f6a833aa4fdee98e6
|
389ds/389-ds-base
|
Bug 707384 - only allow FIPS approved cipher suites in FIPS mode
https://bugzilla.redhat.com/show_bug.cgi?id=707384
Resolves: bug 707384
Bug Description: only allow FIPS approved cipher suites in FIPS mode
Reviewed by: nhosoi (Thanks!)
Branch: master
Fix Description: These changes only affect the server if FIPS mode has been
set in the internal security module, that is, if
modutil -dbdir /etc/dirsrv/slapd-myhost -chkfips true
returns
FIPS mode enabled.
1) If cn=encryption,cn=config nsSSL3Ciphers is missing or set to "+all",
the server will silently use only FIPS approved cipher suites.
2) If cn=encryption,cn=config nsSSL3Ciphers has a list of ciphers, and at
least one non-FIPS approved cipher suites is enabled, the server will log
to the errors log the list of unapproved cipher suites specified, and will
restrict the server to only the FIPS approved ciphers specified in the list.
3) The attribute nsSSLSupportedCiphers in cn=encryption,cn=config will list
only FIPS approved ciphers
4) If the CONFIG log level (64) is set, more detailed information will be
logged to the errors log about cipher config processing
Platforms tested: RHEL6 x86_64
Flag Day: no
Doc impact: no
|
commit f290f80e90c116ab5c04171f6a833aa4fdee98e6
Author: Rich Megginson <[email protected]>
Date: Wed Jun 8 12:10:56 2011 -0600
Bug 707384 - only allow FIPS approved cipher suites in FIPS mode
https://bugzilla.redhat.com/show_bug.cgi?id=707384
Resolves: bug 707384
Bug Description: only allow FIPS approved cipher suites in FIPS mode
Reviewed by: nhosoi (Thanks!)
Branch: master
Fix Description: These changes only affect the server if FIPS mode has been
set in the internal security module, that is, if
modutil -dbdir /etc/dirsrv/slapd-myhost -chkfips true
returns
FIPS mode enabled.
1) If cn=encryption,cn=config nsSSL3Ciphers is missing or set to "+all",
the server will silently use only FIPS approved cipher suites.
2) If cn=encryption,cn=config nsSSL3Ciphers has a list of ciphers, and at
least one non-FIPS approved cipher suites is enabled, the server will log
to the errors log the list of unapproved cipher suites specified, and will
restrict the server to only the FIPS approved ciphers specified in the list.
3) The attribute nsSSLSupportedCiphers in cn=encryption,cn=config will list
only FIPS approved ciphers
4) If the CONFIG log level (64) is set, more detailed information will be
logged to the errors log about cipher config processing
Platforms tested: RHEL6 x86_64
Flag Day: no
Doc impact: no
diff --git a/ldap/servers/slapd/ssl.c b/ldap/servers/slapd/ssl.c
index 809a320b0..b0a3f4305 100644
--- a/ldap/servers/slapd/ssl.c
+++ b/ldap/servers/slapd/ssl.c
@@ -168,35 +168,126 @@ static cipherstruct _conf_ciphers[] = {
{NULL, NULL, 0}
};
+static void
+slapd_SSL_report(int degree, char *fmt, va_list args)
+{
+ char buf[2048];
+ PR_vsnprintf( buf, sizeof(buf), fmt, args );
+ LDAPDebug( LDAP_DEBUG_ANY, "SSL %s: %s\n",
+ (degree == LOG_FAILURE) ? "failure" : "alert",
+ buf, 0 );
+}
+
+void
+slapd_SSL_error(char *fmt, ...)
+{
+ va_list args;
+ va_start(args, fmt);
+ slapd_SSL_report(LOG_FAILURE, fmt, args);
+ va_end(args);
+}
+
+void
+slapd_SSL_warn(char *fmt, ...)
+{
+ va_list args;
+ va_start(args, fmt);
+ slapd_SSL_report(LOG_WARN, fmt, args);
+ va_end(args);
+}
+
char ** getSupportedCiphers()
{
SSLCipherSuiteInfo info;
char *sep = "::";
int number_of_ciphers = sizeof (_conf_ciphers) /sizeof(cipherstruct);
int i;
+ int idx = 0;
+ PRBool isFIPS = slapd_pk11_isFIPS();
if (cipher_names == NULL ) {
cipher_names = (char **) slapi_ch_calloc ((number_of_ciphers +1 ) , sizeof(char *));
for (i = 0 ; _conf_ciphers[i].name != NULL; i++ ) {
SSL_GetCipherSuiteInfo((PRUint16)_conf_ciphers[i].num,&info,sizeof(info));
- cipher_names[i] = PR_smprintf("%s%s%s%s%s%s%s%s%d",_conf_ciphers[i].version,sep,_conf_ciphers[i].name,sep,info.symCipherName,sep,info.macAlgorithmName,sep,info.symKeyBits);
+ /* only support FIPS approved ciphers in FIPS mode */
+ if (!isFIPS || info.isFIPS) {
+ cipher_names[idx++] = PR_smprintf("%s%s%s%s%s%s%s%s%d",_conf_ciphers[i].version,sep,_conf_ciphers[i].name,sep,info.symCipherName,sep,info.macAlgorithmName,sep,info.symKeyBits);
+ }
}
- cipher_names[i] = NULL;
+ cipher_names[idx] = NULL;
}
return cipher_names;
}
+
+static PRBool
+cipher_check_fips(int idx, char ***suplist, char ***unsuplist)
+{
+ PRBool rc = PR_TRUE;
+
+ if (slapd_pk11_isFIPS()) {
+ SSLCipherSuiteInfo info;
+ if (SECFailure == SSL_GetCipherSuiteInfo((PRUint16)_conf_ciphers[idx].num,
+ &info, sizeof info)) {
+ PRErrorCode errorCode = PR_GetError();
+ if (slapi_is_loglevel_set(SLAPI_LOG_CONFIG)) {
+ slapd_SSL_warn("Security Initialization: no information for cipher suite [%s] "
+ "error %d - %s", _conf_ciphers[idx].name,
+ errorCode, slapd_pr_strerror(errorCode));
+ }
+ rc = PR_FALSE;
+ }
+ if (rc && !info.isFIPS) {
+ if (slapi_is_loglevel_set(SLAPI_LOG_CONFIG)) {
+ slapd_SSL_warn("Security Initialization: FIPS mode is enabled but "
+ "cipher suite [%s] is not approved for FIPS - "
+ "the cipher suite will be disabled - if "
+ "you want to use this cipher suite, you must use modutil to "
+ "disable FIPS in the internal token.",
+ _conf_ciphers[idx].name);
+ }
+ rc = PR_FALSE;
+ }
+ if (!rc && unsuplist && !charray_inlist(*unsuplist, _conf_ciphers[idx].name)) {
+ charray_add(unsuplist, _conf_ciphers[idx].name);
+ }
+ if (rc && suplist && !charray_inlist(*suplist, _conf_ciphers[idx].name)) {
+ charray_add(suplist, _conf_ciphers[idx].name);
+ }
+ }
+ return rc;
+}
+
void
-_conf_setallciphers(int active)
+_conf_setallciphers(int active, char ***suplist, char ***unsuplist)
{
int x;
/* MLM - change: Because null_md5 is NOT encrypted at all, force
* them to activate it by name. */
for(x = 0; _conf_ciphers[x].name; x++) {
+ PRBool enabled = active ? PR_TRUE : PR_FALSE;
if(active && !strcmp(_conf_ciphers[x].name, "rsa_null_md5")) {
continue;
}
- SSL_CipherPrefSetDefault(_conf_ciphers[x].num, active ? PR_TRUE : PR_FALSE);
+ if (enabled) {
+ enabled = cipher_check_fips(x, suplist, unsuplist);
+ }
+ SSL_CipherPrefSetDefault(_conf_ciphers[x].num, enabled);
+ }
+}
+
+static char *
+charray2str(char **ary, const char *delim)
+{
+ char *str = NULL;
+ while (ary && *ary) {
+ if (str) {
+ str = PR_sprintf_append(str, "%s%s", delim, *ary++);
+ } else {
+ str = PR_smprintf("%s", *ary++);
+ }
}
+
+ return str;
}
char *
@@ -204,15 +295,30 @@ _conf_setciphers(char *ciphers)
{
char *t, err[MAGNUS_ERROR_LEN];
int x, active;
- char *raw = ciphers;
+ char *raw = ciphers;
+ char **suplist = NULL;
+ char **unsuplist = NULL;
/* Default is to activate all of them */
if(!ciphers || ciphers[0] == '\0') {
- _conf_setallciphers(1);
+ _conf_setallciphers(1, &suplist, NULL);
+ if (suplist && *suplist) {
+ if (slapi_is_loglevel_set(SLAPI_LOG_CONFIG)) {
+ char *str = charray2str(suplist, ",");
+ slapd_SSL_warn("Security Initialization: FIPS mode is enabled - only the following "
+ "cipher suites are approved for FIPS: [%s] - "
+ "all other cipher suites are disabled - if "
+ "you want to use other cipher suites, you must use modutil to "
+ "disable FIPS in the internal token.",
+ str ? str : "(none)");
+ slapi_ch_free_string(&str);
+ }
+ }
+ slapi_ch_free((void **)&suplist); /* strings inside are static */
return NULL;
}
/* Enable all the ciphers by default and the following while loop would disable the user disabled ones This is needed becuase we added a new set of ciphers in the table . Right now there is no support for this from the console */
- _conf_setallciphers(1);
+ _conf_setallciphers(1, &suplist, NULL);
t = ciphers;
while(t) {
@@ -232,22 +338,44 @@ _conf_setciphers(char *ciphers)
*t++ = '\0';
if(!strcasecmp(ciphers, "all"))
- _conf_setallciphers(active);
+ _conf_setallciphers(active, NULL, NULL);
else {
for(x = 0; _conf_ciphers[x].name; x++) {
if(!strcasecmp(ciphers, _conf_ciphers[x].name)) {
- SSL_CipherPrefSetDefault(_conf_ciphers[x].num, active ? PR_TRUE : PR_FALSE);
- break;
+ PRBool enabled = active ? PR_TRUE : PR_FALSE;
+ if (enabled) {
+ enabled = cipher_check_fips(x, NULL, &unsuplist);
+ }
+ SSL_CipherPrefSetDefault(_conf_ciphers[x].num, enabled);
+ break;
}
}
if(!_conf_ciphers[x].name) {
PR_snprintf(err, sizeof(err), "unknown cipher %s", ciphers);
+ slapi_ch_free((void **)&suplist); /* strings inside are static */
+ slapi_ch_free((void **)&unsuplist); /* strings inside are static */
return slapi_ch_strdup(err);
}
}
if(t)
ciphers = t;
}
+ if (unsuplist && unsuplist) {
+ char *strsup = charray2str(suplist, ",");
+ char *strunsup = charray2str(unsuplist, ",");
+ slapd_SSL_warn("Security Initialization: FIPS mode is enabled - only the following "
+ "cipher suites are approved for FIPS: [%s] - "
+ "the specified cipher suites [%s] are disabled - if "
+ "you want to use these unsupported cipher suites, you must use modutil to "
+ "disable FIPS in the internal token.",
+ strsup ? strsup : "(none)", strunsup ? strunsup : "(none)");
+ slapi_ch_free_string(&strsup);
+ slapi_ch_free_string(&strunsup);
+ }
+
+ slapi_ch_free((void **)&suplist); /* strings inside are static */
+ slapi_ch_free((void **)&unsuplist); /* strings inside are static */
+
return NULL;
}
@@ -270,34 +398,6 @@ SSLPLCY_Install(void)
}
-static void
-slapd_SSL_report(int degree, char *fmt, va_list args)
-{
- char buf[2048];
- PR_vsnprintf( buf, sizeof(buf), fmt, args );
- LDAPDebug( LDAP_DEBUG_ANY, "SSL %s: %s\n",
- (degree == LOG_FAILURE) ? "failure" : "alert",
- buf, 0 );
-}
-
-void
-slapd_SSL_error(char *fmt, ...)
-{
- va_list args;
- va_start(args, fmt);
- slapd_SSL_report(LOG_FAILURE, fmt, args);
- va_end(args);
-}
-
-void
-slapd_SSL_warn(char *fmt, ...)
-{
- va_list args;
- va_start(args, fmt);
- slapd_SSL_report(LOG_WARN, fmt, args);
- va_end(args);
-}
-
/**
* Get a particular entry
*/
@@ -1079,7 +1179,7 @@ int slapd_ssl_init2(PRFileDesc **fd, int startTLS)
"nsSSL3 explicitly set to on - SSLv3 is not approved "
"for use in FIPS mode - SSLv3 will be disabled - if "
"you want to use SSLv3, you must use modutil to "
- "disable FIPS in the internal token.\n");
+ "disable FIPS in the internal token.");
enableSSL3 = PR_FALSE;
}
}
| 0 |
05803a42892eaf472e863c527545968661b79802
|
389ds/389-ds-base
|
Ticket #608 - Posix Winsync plugin throws "posix_winsync_end_update_cb: failed to add task entry" error message
Bug description: When a task posixWinsyncCreateMemberOfTask is
already running, another same task request is received, the
Posix Winsync Plug-in issues an error "posix-winsync - posix_
winsync_end_update_cb: failed to add task entry". This is not
an "error" but an expected behaviour.
Fix description: Instead of filing the message as SLAPI_LOG_
FATAL, this patch logs clearer message "task entry <taskname>
already exists" if the log level is SLAPI_LOG_PLUGIN.
posix_winsync_end_update_cb
https://fedorahosted.org/389/ticket/608
Reviewed by Mark (Thank you!!)
|
commit 05803a42892eaf472e863c527545968661b79802
Author: Noriko Hosoi <[email protected]>
Date: Tue Apr 9 16:53:07 2013 -0700
Ticket #608 - Posix Winsync plugin throws "posix_winsync_end_update_cb: failed to add task entry" error message
Bug description: When a task posixWinsyncCreateMemberOfTask is
already running, another same task request is received, the
Posix Winsync Plug-in issues an error "posix-winsync - posix_
winsync_end_update_cb: failed to add task entry". This is not
an "error" but an expected behaviour.
Fix description: Instead of filing the message as SLAPI_LOG_
FATAL, this patch logs clearer message "task entry <taskname>
already exists" if the log level is SLAPI_LOG_PLUGIN.
posix_winsync_end_update_cb
https://fedorahosted.org/389/ticket/608
Reviewed by Mark (Thank you!!)
diff --git a/ldap/servers/plugins/posix-winsync/posix-winsync.c b/ldap/servers/plugins/posix-winsync/posix-winsync.c
index f40f5ff91..154e22aa8 100644
--- a/ldap/servers/plugins/posix-winsync/posix-winsync.c
+++ b/ldap/servers/plugins/posix-winsync/posix-winsync.c
@@ -1396,12 +1396,16 @@ posix_winsync_end_update_cb(void *cbdata, const Slapi_DN *ds_subtree, const Slap
slapi_log_error(SLAPI_LOG_PLUGIN, posix_winsync_plugin_name,
"--> posix_winsync_end_update_cb, retrieving return code\n");
slapi_pblock_get(pb, SLAPI_PLUGIN_INTOP_RESULT, &rc);
- if (rc != 0) {
+ if (LDAP_ALREADY_EXISTS == rc) {
+ slapi_log_error(SLAPI_LOG_PLUGIN, posix_winsync_plugin_name,
+ "posix_winsync_end_update_cb: "
+ "task entry %s already exists\n",
+ posix_winsync_plugin_name);
+ } else if (rc != 0) {
slapi_log_error(SLAPI_LOG_FATAL, posix_winsync_plugin_name,
"posix_winsync_end_update_cb: "
- "failed to add task entry\n");
+ "failed to add task entry (%d)\n", rc);
} else {
-
slapi_log_error(SLAPI_LOG_PLUGIN, posix_winsync_plugin_name,
"posix_winsync_end_update_cb: "
"add task entry\n");
| 0 |
72520a20a2abd545a246114b190e3f8ab6731689
|
389ds/389-ds-base
|
Ticket 47668 - test: port ticket47490_test to Replica/Agreement interface (47600)
Bug Description:
Ticket https://fedorahosted.org/389/ticket/47600 introduces new Replica/Agreement/Changelog interfaces.
This ticket is to take into account those changes into ticket47490 test case
Fix Description:
Call enableReplication from Replica Class.
Call Agreement.create with properties
https://fedorahosted.org/389/ticket/47668
Reviewed by: Rich Megginson (thanks)
Platforms tested: F17 / F19 (jenkins)
Flag Day: no
Doc impact: no
|
commit 72520a20a2abd545a246114b190e3f8ab6731689
Author: Thierry bordaz (tbordaz) <[email protected]>
Date: Tue Jan 7 09:19:07 2014 +0100
Ticket 47668 - test: port ticket47490_test to Replica/Agreement interface (47600)
Bug Description:
Ticket https://fedorahosted.org/389/ticket/47600 introduces new Replica/Agreement/Changelog interfaces.
This ticket is to take into account those changes into ticket47490 test case
Fix Description:
Call enableReplication from Replica Class.
Call Agreement.create with properties
https://fedorahosted.org/389/ticket/47668
Reviewed by: Rich Megginson (thanks)
Platforms tested: F17 / F19 (jenkins)
Flag Day: no
Doc impact: no
diff --git a/dirsrvtests/tickets/ticket47490_test.py b/dirsrvtests/tickets/ticket47490_test.py
index 48254b47b..58490d066 100644
--- a/dirsrvtests/tickets/ticket47490_test.py
+++ b/dirsrvtests/tickets/ticket47490_test.py
@@ -230,12 +230,17 @@ def topology(request):
# Now prepare the Master-Consumer topology
#
# First Enable replication
- master.enableReplication(suffix=SUFFIX, role="master", replicaId=REPLICAID_MASTER)
- consumer.enableReplication(suffix=SUFFIX, role="consumer")
+ master.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER)
+ consumer.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_CONSUMER)
# Initialize the supplier->consumer
- repl_agreement = master.agreement.create(consumer, SUFFIX, binddn=defaultProperties[REPLICATION_BIND_DN], bindpw=defaultProperties[REPLICATION_BIND_PW])
+ properties = {RA_NAME: r'meTo_$host:$port',
+ RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
+ RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
+ RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
+ RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
+ repl_agreement = master.agreement.create(suffix=SUFFIX, host=consumer.host, port=consumer.port, properties=properties)
if not repl_agreement:
log.fatal("Fail to create a replica agreement")
| 0 |
aa206345f4fa29e16c2e4118d26b2fb4c93ddd51
|
389ds/389-ds-base
|
Ticket 47968 - RFE send logs to journald
Bug Description: Instead of writing the debug logs directly to the
disk, it is possible to send them to journald, on platforms where
journald is available.
Fix Description:
* This patch enables a configuration for selecting log
backends, and defines the basic wrappers to route log entries
based on the backend selection. The backends for syslog and
journald have had identities reserved.
* This enables the configure option --with-systemd
which will allow us to link to the daemon and journald apis from
within slapd.
* Allows all backends to log simultaneously. Configuration is a
comma seperated set of values taking "dirsrv-log,syslog,journald'.
Unknown values are ignored, invalid is ignored. At least one valid
log backend must be in the list.
https://fedorahosted.org/389/ticket/47968
Author: wibrown
Review by: mreynolds (Thanks!)
|
commit aa206345f4fa29e16c2e4118d26b2fb4c93ddd51
Author: William Brown <[email protected]>
Date: Mon Dec 7 14:26:08 2015 +1030
Ticket 47968 - RFE send logs to journald
Bug Description: Instead of writing the debug logs directly to the
disk, it is possible to send them to journald, on platforms where
journald is available.
Fix Description:
* This patch enables a configuration for selecting log
backends, and defines the basic wrappers to route log entries
based on the backend selection. The backends for syslog and
journald have had identities reserved.
* This enables the configure option --with-systemd
which will allow us to link to the daemon and journald apis from
within slapd.
* Allows all backends to log simultaneously. Configuration is a
comma seperated set of values taking "dirsrv-log,syslog,journald'.
Unknown values are ignored, invalid is ignored. At least one valid
log backend must be in the list.
https://fedorahosted.org/389/ticket/47968
Author: wibrown
Review by: mreynolds (Thanks!)
diff --git a/Makefile.am b/Makefile.am
index 213922f7d..d90fb270e 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -13,6 +13,7 @@ NQBUILDNUM := $(subst \,,$(subst $(QUOTE),,$(BUILDNUM)))
DEBUG_DEFINES = @debug_defs@
GCCSEC_DEFINES = @gccsec_defs@
ASAN_DEFINES = @asan_defs@
+SYSTEMD_DEFINES = @systemd_defs@
# the -U undefines these symbols - should use the corresponding DS_ ones instead - see configure.ac
DS_DEFINES = -DBUILD_NUM=$(BUILDNUM) -DVENDOR="\"$(vendor)\"" -DBRAND="\"$(brand)\"" -DCAPBRAND="\"$(capbrand)\"" \
-UPACKAGE_VERSION -UPACKAGE_TARNAME -UPACKAGE_STRING -UPACKAGE_BUGREPORT
@@ -40,8 +41,8 @@ PATH_DEFINES = -DLOCALSTATEDIR="\"$(localstatedir)\"" -DSYSCONFDIR="\"$(sysconfd
-DDATADIR="\"$(datadir)\"" -DDOCDIR="\"$(docdir)\"" \
-DSBINDIR="\"$(sbindir)\"" -DPLUGINDIR="\"$(serverplugindir)\"" -DTEMPLATEDIR="\"$(sampledatadir)\""
-AM_CPPFLAGS = $(DEBUG_DEFINES) $(GCCSEC_DEFINES) $(ASAN_DEFINES) $(DS_DEFINES) $(DS_INCLUDES) $(PATH_DEFINES)
-PLUGIN_CPPFLAGS = $(AM_CPPFLAGS) @openldap_inc@ @ldapsdk_inc@ @nss_inc@ @nspr_inc@
+AM_CPPFLAGS = $(DEBUG_DEFINES) $(GCCSEC_DEFINES) $(ASAN_DEFINES) $(DS_DEFINES) $(DS_INCLUDES) $(PATH_DEFINES) $(SYSTEMD_DEFINES)
+PLUGIN_CPPFLAGS = $(AM_CPPFLAGS) @openldap_inc@ @ldapsdk_inc@ @nss_inc@ @nspr_inc@ @systemd_inc@
# We need to make sure that libpthread is linked before libc on HP-UX.
if HPUX
AM_LDFLAGS = -lpthread
@@ -77,6 +78,8 @@ NETSNMP_LINK = @netsnmp_lib@ @netsnmp_link@
PAM_LINK = -lpam
KERBEROS_LINK = $(kerberos_lib)
DLOPEN_LINK = -ldl
+SYSTEMD_LINK = @systemd_lib@
+
LIBSOCKET=@LIBSOCKET@
LIBNSL=@LIBNSL@
@@ -1028,7 +1031,7 @@ libslapd_la_CPPFLAGS = $(PLUGIN_CPPFLAGS) @sasl_inc@ @db_inc@ @svrcore_inc@ @ker
if SPARC
libslapd_la_SOURCES += ldap/servers/slapd/slapi_counter_sunos_sparcv9.S
endif
-libslapd_la_LIBADD = $(LDAPSDK_LINK) $(SASL_LINK) $(NUNC_STANS_LINK) $(SVRCORE_LINK) $(NSS_LINK) $(NSPR_LINK) $(KERBEROS_LINK) $(PCRE_LINK) $(THREADLIB)
+libslapd_la_LIBADD = $(LDAPSDK_LINK) $(SASL_LINK) $(NUNC_STANS_LINK) $(SVRCORE_LINK) $(NSS_LINK) $(NSPR_LINK) $(KERBEROS_LINK) $(PCRE_LINK) $(THREADLIB) $(SYSTEMD_LINK)
#////////////////////////////////////////////////////////////////
@@ -1706,9 +1709,9 @@ ns_slapd_SOURCES = ldap/servers/slapd/abandon.c \
$(GETSOCKETPEER)
ns_slapd_CPPFLAGS = $(AM_CPPFLAGS) @sasl_inc@ @openldap_inc@ @ldapsdk_inc@ @nss_inc@ \
- @nspr_inc@ @svrcore_inc@
+ @nspr_inc@ @svrcore_inc@ @systemd_inc@
ns_slapd_LDADD = libslapd.la libldaputil.a $(LDAPSDK_LINK) $(NSS_LINK) $(DLOPEN_LINK) \
- $(NSPR_LINK) $(SASL_LINK) $(SVRCORE_LINK) $(LIBNSL) $(LIBSOCKET) $(THREADLIB)
+ $(NSPR_LINK) $(SASL_LINK) $(SVRCORE_LINK) $(LIBNSL) $(LIBSOCKET) $(THREADLIB) $(SYSTEMD_LINK)
# We need to link ns-slapd with the C++ compiler on HP-UX since we load
# some C++ shared libraries (such as icu).
if HPUX
diff --git a/Makefile.in b/Makefile.in
index 745d6a853..2ad44c2ca 100644
--- a/Makefile.in
+++ b/Makefile.in
@@ -110,7 +110,7 @@ am__aclocal_m4_deps = $(top_srcdir)/m4/libtool.m4 \
$(top_srcdir)/m4/icu.m4 $(top_srcdir)/m4/netsnmp.m4 \
$(top_srcdir)/m4/kerberos.m4 $(top_srcdir)/m4/pcre.m4 \
$(top_srcdir)/m4/selinux.m4 $(top_srcdir)/m4/nunc-stans.m4 \
- $(top_srcdir)/configure.ac
+ $(top_srcdir)/m4/systemd.m4 $(top_srcdir)/configure.ac
am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
$(ACLOCAL_M4)
am__CONFIG_DISTCLEAN_FILES = config.status config.cache config.log \
@@ -713,7 +713,7 @@ libslapd_la_DEPENDENCIES = $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \
$(am__DEPENDENCIES_2) $(am__DEPENDENCIES_1) \
$(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \
$(am__DEPENDENCIES_3) $(am__DEPENDENCIES_1) \
- $(am__DEPENDENCIES_1)
+ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1)
am__libslapd_la_SOURCES_DIST = ldap/servers/slapd/add.c \
ldap/servers/slapd/agtmmap.c ldap/servers/slapd/apibroker.c \
ldap/servers/slapd/attr.c ldap/servers/slapd/attrlist.c \
@@ -1003,7 +1003,8 @@ am_migratecred_bin_OBJECTS = ldap/servers/slapd/tools/migratecred_bin-migratecre
migratecred_bin_OBJECTS = $(am_migratecred_bin_OBJECTS)
migratecred_bin_DEPENDENCIES = libslapd.la $(am__DEPENDENCIES_1) \
$(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \
- $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1)
+ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \
+ $(am__DEPENDENCIES_1)
am_mmldif_bin_OBJECTS = \
ldap/servers/slapd/tools/mmldif_bin-mmldif.$(OBJEXT)
mmldif_bin_OBJECTS = $(am_mmldif_bin_OBJECTS)
@@ -1066,7 +1067,7 @@ ns_slapd_DEPENDENCIES = libslapd.la libldaputil.a \
$(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \
$(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \
$(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \
- $(am__DEPENDENCIES_1)
+ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1)
am_pwdhash_bin_OBJECTS = \
ldap/servers/slapd/tools/pwdhash_bin-pwenc.$(OBJEXT)
pwdhash_bin_OBJECTS = $(am_pwdhash_bin_OBJECTS)
@@ -1482,6 +1483,9 @@ sttyexec = @sttyexec@
svrcore_inc = @svrcore_inc@
svrcore_lib = @svrcore_lib@
sysconfdir = @sysconfdir@
+systemd_defs = @systemd_defs@
+systemd_inc = @systemd_inc@
+systemd_lib = @systemd_lib@
target_alias = @target_alias@
top_build_prefix = @top_build_prefix@
top_builddir = @top_builddir@
@@ -1510,6 +1514,7 @@ NQBUILDNUM := $(subst \,,$(subst $(QUOTE),,$(BUILDNUM)))
DEBUG_DEFINES = @debug_defs@
GCCSEC_DEFINES = @gccsec_defs@
ASAN_DEFINES = @asan_defs@
+SYSTEMD_DEFINES = @systemd_defs@
# the -U undefines these symbols - should use the corresponding DS_ ones instead - see configure.ac
DS_DEFINES = -DBUILD_NUM=$(BUILDNUM) -DVENDOR="\"$(vendor)\"" -DBRAND="\"$(brand)\"" -DCAPBRAND="\"$(capbrand)\"" \
-UPACKAGE_VERSION -UPACKAGE_TARNAME -UPACKAGE_STRING -UPACKAGE_BUGREPORT
@@ -1535,8 +1540,8 @@ PATH_DEFINES = -DLOCALSTATEDIR="\"$(localstatedir)\"" -DSYSCONFDIR="\"$(sysconfd
-DDATADIR="\"$(datadir)\"" -DDOCDIR="\"$(docdir)\"" \
-DSBINDIR="\"$(sbindir)\"" -DPLUGINDIR="\"$(serverplugindir)\"" -DTEMPLATEDIR="\"$(sampledatadir)\""
-AM_CPPFLAGS = $(DEBUG_DEFINES) $(GCCSEC_DEFINES) $(ASAN_DEFINES) $(DS_DEFINES) $(DS_INCLUDES) $(PATH_DEFINES)
-PLUGIN_CPPFLAGS = $(AM_CPPFLAGS) @openldap_inc@ @ldapsdk_inc@ @nss_inc@ @nspr_inc@
+AM_CPPFLAGS = $(DEBUG_DEFINES) $(GCCSEC_DEFINES) $(ASAN_DEFINES) $(DS_DEFINES) $(DS_INCLUDES) $(PATH_DEFINES) $(SYSTEMD_DEFINES)
+PLUGIN_CPPFLAGS = $(AM_CPPFLAGS) @openldap_inc@ @ldapsdk_inc@ @nss_inc@ @nspr_inc@ @systemd_inc@
#AM_LDFLAGS = -Wl,-z,defs
@HPUX_FALSE@AM_LDFLAGS = $(ASAN_DEFINES)
# We need to make sure that libpthread is linked before libc on HP-UX.
@@ -1566,6 +1571,7 @@ NETSNMP_LINK = @netsnmp_lib@ @netsnmp_link@
PAM_LINK = -lpam
KERBEROS_LINK = $(kerberos_lib)
DLOPEN_LINK = -ldl
+SYSTEMD_LINK = @systemd_lib@
#------------------------
# Generated Sources
@@ -2387,7 +2393,7 @@ libslapd_la_SOURCES = ldap/servers/slapd/add.c \
ldap/servers/slapd/value.c ldap/servers/slapd/valueset.c \
ldap/servers/slapd/vattr.c $(libavl_a_SOURCES) $(am__append_1)
libslapd_la_CPPFLAGS = $(PLUGIN_CPPFLAGS) @sasl_inc@ @db_inc@ @svrcore_inc@ @kerberos_inc@ @pcre_inc@
-libslapd_la_LIBADD = $(LDAPSDK_LINK) $(SASL_LINK) $(NUNC_STANS_LINK) $(SVRCORE_LINK) $(NSS_LINK) $(NSPR_LINK) $(KERBEROS_LINK) $(PCRE_LINK) $(THREADLIB)
+libslapd_la_LIBADD = $(LDAPSDK_LINK) $(SASL_LINK) $(NUNC_STANS_LINK) $(SVRCORE_LINK) $(NSS_LINK) $(NSPR_LINK) $(KERBEROS_LINK) $(PCRE_LINK) $(THREADLIB) $(SYSTEMD_LINK)
#////////////////////////////////////////////////////////////////
#
@@ -2982,7 +2988,7 @@ ldif_bin_LDADD = $(NSPR_LINK) $(NSS_LINK) $(LDAPSDK_LINK_NOTHR) $(SASL_LINK)
#------------------------
migratecred_bin_SOURCES = ldap/servers/slapd/tools/migratecred.c
migratecred_bin_CPPFLAGS = $(AM_CPPFLAGS) @openldap_inc@ @ldapsdk_inc@ @nss_inc@ @nspr_inc@
-migratecred_bin_LDADD = libslapd.la $(NSPR_LINK) $(NSS_LINK) $(SVRCORE_LINK) $(LDAPSDK_LINK) $(SASL_LINK)
+migratecred_bin_LDADD = libslapd.la $(NSPR_LINK) $(NSS_LINK) $(SVRCORE_LINK) $(LDAPSDK_LINK) $(SASL_LINK) $(SYSTEMD_LINK)
#------------------------
# mmldif
@@ -3032,10 +3038,10 @@ ns_slapd_SOURCES = ldap/servers/slapd/abandon.c \
$(GETSOCKETPEER)
ns_slapd_CPPFLAGS = $(AM_CPPFLAGS) @sasl_inc@ @openldap_inc@ @ldapsdk_inc@ @nss_inc@ \
- @nspr_inc@ @svrcore_inc@
+ @nspr_inc@ @svrcore_inc@ @systemd_inc@
ns_slapd_LDADD = libslapd.la libldaputil.a $(LDAPSDK_LINK) $(NSS_LINK) $(DLOPEN_LINK) \
- $(NSPR_LINK) $(SASL_LINK) $(SVRCORE_LINK) $(LIBNSL) $(LIBSOCKET) $(THREADLIB)
+ $(NSPR_LINK) $(SASL_LINK) $(SVRCORE_LINK) $(LIBNSL) $(LIBSOCKET) $(THREADLIB) $(SYSTEMD_LINK)
@HPUX_FALSE@ns_slapd_LINK = $(LINK)
# We need to link ns-slapd with the C++ compiler on HP-UX since we load
diff --git a/configure b/configure
index e04359302..c266be0ff 100755
--- a/configure
+++ b/configure
@@ -639,6 +639,9 @@ LTLIBOBJS
vendor
capbrand
brand
+systemd_defs
+systemd_lib
+systemd_inc
localrundir
nunc_stans_libdir
nunc_stans_lib
@@ -973,6 +976,7 @@ with_selinux
with_nunc_stans
with_nunc_stans_inc
with_nunc_stans_lib
+with_systemd
'
ac_precious_vars='build_alias
host_alias
@@ -1744,6 +1748,7 @@ Optional Packages:
nunc-stans include file directory
--with-nunc-stans-lib=PATH
nunc-stans library directory
+ --with-systemd Enable Systemd native integration.
Some influential environment variables:
CXX C++ compiler command
@@ -21347,6 +21352,94 @@ $as_echo "no" >&6; }
fi
+# BEGIN COPYRIGHT BLOCK
+# Copyright (C) 2015 Red Hat, Inc.
+# All rights reserved.
+#
+# License: GPL (version 3 or any later version).
+# See LICENSE for details.
+# END COPYRIGHT BLOCK
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for Systemd..." >&5
+$as_echo "$as_me: checking for Systemd..." >&6;}
+
+# check for --with-systemd
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for --with-systemd" >&5
+$as_echo_n "checking for --with-systemd... " >&6; }
+
+# Check whether --with-systemd was given.
+if test "${with_systemd+set}" = set; then :
+ withval=$with_systemd;
+ if test "$withval" = yes
+ then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: using systemd native features" >&5
+$as_echo "using systemd native features" >&6; }
+ with_systemd=yes
+ else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+ fi
+
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+if test "$with_systemd" = yes; then
+ # Extract the first word of "pkg-config", so it can be a program name with args.
+set dummy pkg-config; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_path_PKG_CONFIG+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ case $PKG_CONFIG in
+ [\\/]* | ?:[\\/]*)
+ ac_cv_path_PKG_CONFIG="$PKG_CONFIG" # Let the user override the test with a path.
+ ;;
+ *)
+ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_path_PKG_CONFIG="$as_dir/$ac_word$ac_exec_ext"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+ ;;
+esac
+fi
+PKG_CONFIG=$ac_cv_path_PKG_CONFIG
+if test -n "$PKG_CONFIG"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $PKG_CONFIG" >&5
+$as_echo "$PKG_CONFIG" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for Systemd with pkg-config" >&5
+$as_echo_n "checking for Systemd with pkg-config... " >&6; }
+ if test -n "$PKG_CONFIG" && $PKG_CONFIG --exists systemd libsystemd-journal libsystemd-daemon ; then
+ systemd_inc=`$PKG_CONFIG --cflags-only-I systemd libsystemd-journal libsystemd-daemon`
+ systemd_lib=`$PKG_CONFIG --libs-only-l systemd libsystemd-journal libsystemd-daemon`
+ systemd_defs="-DWITH_SYSTEMD"
+ else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no Systemd pkg-config files" >&5
+$as_echo "no Systemd pkg-config files" >&6; }
+ fi
+
+fi
+
PACKAGE_BASE_VERSION=`echo $PACKAGE_VERSION | awk -F\. '{print $1"."$2}'`
@@ -21437,6 +21530,9 @@ fi
+
+
+
diff --git a/configure.ac b/configure.ac
index 468384c6b..d5045ef88 100644
--- a/configure.ac
+++ b/configure.ac
@@ -717,6 +717,7 @@ m4_include(m4/kerberos.m4)
m4_include(m4/pcre.m4)
m4_include(m4/selinux.m4)
m4_include(m4/nunc-stans.m4)
+m4_include(m4/systemd.m4)
PACKAGE_BASE_VERSION=`echo $PACKAGE_VERSION | awk -F\. '{print $1"."$2}'`
AC_SUBST(PACKAGE_BASE_VERSION)
@@ -778,6 +779,9 @@ AC_SUBST(nunc_stans_inc)
AC_SUBST(nunc_stans_lib)
AC_SUBST(nunc_stans_libdir)
AC_SUBST(localrundir)
+AC_SUBST(systemd_inc)
+AC_SUBST(systemd_lib)
+AC_SUBST(systemd_defs)
AC_SUBST(brand)
AC_SUBST(capbrand)
diff --git a/ldap/ldif/template-dse.ldif.in b/ldap/ldif/template-dse.ldif.in
index 1ec80099a..6c62aaa3f 100644
--- a/ldap/ldif/template-dse.ldif.in
+++ b/ldap/ldif/template-dse.ldif.in
@@ -62,6 +62,7 @@ nsslapd-ndn-cache-enabled: on
nsslapd-sasl-mapping-fallback: off
nsslapd-dynamic-plugins: off
nsslapd-allow-hashed-passwords: off
+nsslapd-logging-backend: 1
dn: cn=features,cn=config
objectclass: top
diff --git a/ldap/schema/01core389.ldif b/ldap/schema/01core389.ldif
index 42af40dee..0b14c0679 100644
--- a/ldap/schema/01core389.ldif
+++ b/ldap/schema/01core389.ldif
@@ -291,6 +291,7 @@ attributeTypes: ( 2.16.840.1.113730.3.1.2325 NAME 'nsslapd-auditfaillog-logging-
attributeTypes: ( 2.16.840.1.113730.3.1.2326 NAME 'nsslapd-auditfaillog-logging-hide-unhashed-pw' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE X-ORIGIN 'Netscape Directory Server' )
attributeTypes: ( 2.16.840.1.113730.3.1.2327 NAME 'nsslapd-auditfaillog' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE X-ORIGIN 'Netscape Directory Server' )
attributeTypes: ( 2.16.840.1.113730.3.1.2328 NAME 'nsslapd-auditfaillog-list' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 X-ORIGIN 'Netscape Directory Server' )
+attributeTypes: ( 2.16.840.1.113730.3.1.2330 NAME 'nsslapd-logging-backend' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 X-ORIGIN 'Netscape Directory Server' )
#
# objectclasses
#
diff --git a/ldap/servers/slapd/auditlog.c b/ldap/servers/slapd/auditlog.c
index 5b1389d17..1461feeef 100644
--- a/ldap/servers/slapd/auditlog.c
+++ b/ldap/servers/slapd/auditlog.c
@@ -309,10 +309,10 @@ write_audit_file(
switch (logtype)
{
case SLAPD_AUDIT_LOG:
- slapd_log_audit_proc (l->ls_buf, l->ls_len);
+ slapd_log_audit (l->ls_buf, l->ls_len);
break;
case SLAPD_AUDITFAIL_LOG:
- slapd_log_auditfail_proc (l->ls_buf, l->ls_len);
+ slapd_log_auditfail (l->ls_buf, l->ls_len);
break;
default:
/* Unsupported log type, we should make some noise */
diff --git a/ldap/servers/slapd/libglobs.c b/ldap/servers/slapd/libglobs.c
index d108bf3b9..94098bf06 100644
--- a/ldap/servers/slapd/libglobs.c
+++ b/ldap/servers/slapd/libglobs.c
@@ -169,6 +169,7 @@ static int invalid_sasl_mech(char *str);
#define ENTRYUSN_IMPORT_INIT "0"
#define DEFAULT_ALLOWED_TO_DELETE_ATTRS "nsslapd-listenhost nsslapd-securelistenhost nsslapd-defaultnamingcontext nsslapd-snmp-index"
#define SALTED_SHA1_SCHEME_NAME "SSHA"
+#define INIT_LOGGING_BACKEND_INTERNAL "dirsrv-log"
/* CONFIG_ON_OFF */
slapi_onoff_t init_accesslog_rotationsync_enabled;
@@ -1186,8 +1187,13 @@ static struct config_get_and_set {
{CONFIG_AUDITFAILFILE_ATTRIBUTE, config_set_auditfaillog,
NULL, 0,
(void**)&global_slapdFrontendConfig.auditfaillog,
- CONFIG_STRING_OR_EMPTY, NULL, NULL/* deletion is not allowed */}
+ CONFIG_STRING_OR_EMPTY, NULL, NULL/* deletion is not allowed */},
/* End audit fail log configuration */
+ /* warning: initialization makes pointer from integer without a cast [enabled by default]. Why do we get this? */
+ {CONFIG_LOGGING_BACKEND, NULL,
+ log_set_backend, 0,
+ (void**)&global_slapdFrontendConfig.logging_backend,
+ CONFIG_STRING_OR_EMPTY, NULL, INIT_LOGGING_BACKEND_INTERNAL}
};
/*
@@ -8259,7 +8265,9 @@ remove_commas(char *str)
static int
invalid_sasl_mech(char *str)
{
- char *mech = NULL, *token = NULL, *next = NULL;
+ char *mech = NULL;
+ char *token = NULL;
+ char *next = NULL;
int i;
if(str == NULL){
diff --git a/ldap/servers/slapd/log.c b/ldap/servers/slapd/log.c
index ef6e57b6b..cbfba5490 100644
--- a/ldap/servers/slapd/log.c
+++ b/ldap/servers/slapd/log.c
@@ -300,6 +300,7 @@ void g_log_init(int log_enabled)
loginfo.log_numof_auditfail_logs = 1;
loginfo.log_auditfail_fdes = NULL;
loginfo.log_auditfail_logchain = NULL;
+ loginfo.log_backend = LOGGING_BACKEND_INTERNAL;
if ((loginfo.log_auditfail_rwlock =slapi_new_rwlock())== NULL ) {
exit (-1);
}
@@ -388,6 +389,68 @@ log_set_logging(const char *attrname, char *value, int logtype, char *errorbuf,
return LDAP_SUCCESS;
}
+
+int
+log_set_backend(const char *attrname, char *value, int logtype, char *errorbuf, int apply) {
+
+ int retval = LDAP_SUCCESS;
+ int backend = 0;
+ char *backendstr = NULL; /* The backend we are looking at */
+ char *token = NULL; /* String to tokenise, need to dup value */
+ char *next = NULL; /* The next value */
+
+
+ slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
+
+ /* We don't need to bother checking log type ... */
+ if ( !apply || !value || !*value ) {
+ return retval;
+ }
+
+
+ /* We have a comma seperated list. So split it up */
+ token = slapi_ch_strdup(value);
+ for (backendstr = ldap_utf8strtok_r(token, ",", &next);
+ backendstr != NULL;
+ backendstr = ldap_utf8strtok_r(NULL, ",", &next))
+ {
+ if(strlen(backendstr) == 0) {
+ /* Probably means someone did ",,"*/
+ continue;
+ } else if (slapi_utf8ncasecmp(backendstr, "dirsrv-log", 10) ) {
+ backend |= LOGGING_BACKEND_INTERNAL;
+ } else if (slapi_utf8ncasecmp(backendstr, "syslog", 6) ) {
+ backend |= LOGGING_BACKEND_SYSLOG;
+#ifdef WITH_SYSTEMD
+ } else if (slapi_utf8ncasecmp(backendstr, "journald", 8) ) {
+ backend |= LOGGING_BACKEND_JOURNALD;
+#endif
+ }
+ }
+ slapi_ch_free_string(&token);
+
+ if ( !( backend & LOGGING_BACKEND_INTERNAL)
+ && ! (backend & LOGGING_BACKEND_SYSLOG)
+#ifdef WITH_SYSTEMD
+ && ! (backend & LOGGING_BACKEND_JOURNALD)
+#endif
+ ) {
+ /* There is probably a better error here .... */
+ retval = LDAP_OPERATIONS_ERROR;
+ } else {
+ /* We have a valid backend, set it */
+ /*
+ * We just need to use any lock here, doesn't matter which.
+ */
+ LOG_ACCESS_LOCK_WRITE( );
+ loginfo.log_backend = backend;
+ slapi_ch_free_string(&(slapdFrontendConfig->logging_backend));
+ slapdFrontendConfig->logging_backend = slapi_ch_strdup(value);
+ LOG_ACCESS_UNLOCK_WRITE( );
+ }
+
+ return retval;
+}
/******************************************************************************
* Tell me the access log file name inc path
******************************************************************************/
@@ -1900,8 +1963,36 @@ auditfail_log_openf( char *pathname, int locked)
/******************************************************************************
* write in the audit log
******************************************************************************/
+
+int
+slapd_log_audit (
+ char *buffer,
+ int buf_len)
+{
+ /* We use this to route audit log entries to where they need to go */
+ int retval = LDAP_SUCCESS;
+ int lbackend = loginfo.log_backend; /* We copy this to make these next checks atomic */
+ if (lbackend & LOGGING_BACKEND_INTERNAL) {
+ retval = slapd_log_audit_internal(buffer, buf_len);
+ }
+
+ if (retval != LDAP_SUCCESS) {
+ return retval;
+ }
+ if (lbackend & LOGGING_BACKEND_SYSLOG) {
+ /* This returns void, so we hope it worked */
+ syslog(LOG_NOTICE, buffer);
+ }
+#ifdef WITH_SYSTEMD
+ if (lbackend & LOGGING_BACKEND_JOURNALD) {
+ retval = sd_journal_print(LOG_NOTICE, buffer);
+ }
+#endif
+ return retval;
+}
+
int
-slapd_log_audit_proc (
+slapd_log_audit_internal (
char *buffer,
int buf_len)
{
@@ -1934,7 +2025,33 @@ slapd_log_audit_proc (
* write in the audit fail log
******************************************************************************/
int
-slapd_log_auditfail_proc (
+slapd_log_auditfail (
+ char *buffer,
+ int buf_len)
+{
+ /* We use this to route audit log entries to where they need to go */
+ int retval = LDAP_SUCCESS;
+ int lbackend = loginfo.log_backend; /* We copy this to make these next checks atomic */
+ if (lbackend & LOGGING_BACKEND_INTERNAL) {
+ retval = slapd_log_auditfail_internal(buffer, buf_len);
+ }
+ if (retval != LDAP_SUCCESS) {
+ return retval;
+ }
+ if (lbackend & LOGGING_BACKEND_SYSLOG) {
+ /* This returns void, so we hope it worked */
+ syslog(LOG_NOTICE, buffer);
+ }
+#ifdef WITH_SYSTEMD
+ if (lbackend & LOGGING_BACKEND_JOURNALD) {
+ retval = sd_journal_print(LOG_NOTICE, buffer);
+ }
+#endif
+ return retval;
+}
+
+int
+slapd_log_auditfail_internal (
char *buffer,
int buf_len)
{
@@ -1972,14 +2089,41 @@ slapd_log_error_proc(
char *fmt,
... )
{
- va_list ap_err;
- va_list ap_file;
- va_start( ap_err, fmt );
- va_start( ap_file, fmt );
- slapd_log_error_proc_internal(subsystem, fmt, ap_err, ap_file);
- va_end(ap_err);
- va_end(ap_file);
- return 0;
+ int rc = LDAP_SUCCESS;
+ va_list ap_err;
+ va_list ap_file;
+
+ if (loginfo.log_backend & LOGGING_BACKEND_INTERNAL) {
+ va_start( ap_err, fmt );
+ va_start( ap_file, fmt );
+ rc = slapd_log_error_proc_internal( subsystem, fmt, ap_err, ap_file );
+ va_end(ap_file);
+ va_end(ap_err);
+ }
+ if (rc != LDAP_SUCCESS) {
+ return(rc);
+ }
+ if (loginfo.log_backend & LOGGING_BACKEND_SYSLOG) {
+ va_start( ap_err, fmt );
+ /* va_start( ap_file, fmt ); */
+ /* This returns void, so we hope it worked */
+ vsyslog(LOG_ERROR, fmt, ap_err);
+ /* vsyslog(LOG_ERROR, fmt, ap_file); */
+ /* va_end(ap_file); */
+ va_end(ap_err);
+ }
+#ifdef WITH_SYSTEMD
+ if (loginfo.log_backend & LOGGING_BACKEND_JOURNALD) {
+ va_start( ap_err, fmt );
+ /* va_start( ap_file, fmt ); */
+ /* This isn't handling RC nicely ... */
+ rc = sd_journal_printv(LOG_ERROR, fmt, ap_err);
+ /* rc = sd_journal_printv(LOG_ERROR, fmt, ap_file); */
+ /* va_end(ap_file); */
+ va_end(ap_err);
+ }
+#endif
+ return rc;
}
static int
@@ -1989,7 +2133,7 @@ slapd_log_error_proc_internal(
va_list ap_err,
va_list ap_file)
{
- int rc = 0;
+ int rc = LDAP_SUCCESS;
if ( (loginfo.log_error_state & LOGGING_ENABLED) && (loginfo.log_error_file != NULL) ) {
LOG_ERROR_LOCK_WRITE( );
@@ -2163,9 +2307,10 @@ vslapd_log_error(
int
slapi_log_error( int severity, char *subsystem, char *fmt, ... )
{
- va_list ap1;
- va_list ap2;
- int rc;
+ va_list ap_err;
+ va_list ap_file;
+ int rc = LDAP_SUCCESS;
+ int lbackend = loginfo.log_backend; /* We copy this to make these next checks atomic */
if ( severity < SLAPI_LOG_MIN || severity > SLAPI_LOG_MAX ) {
(void)slapd_log_error_proc( subsystem,
@@ -2175,13 +2320,38 @@ slapi_log_error( int severity, char *subsystem, char *fmt, ... )
}
if ( slapd_ldap_debug & slapi_log_map[ severity ] ) {
- va_start( ap1, fmt );
- va_start( ap2, fmt );
- rc = slapd_log_error_proc_internal( subsystem, fmt, ap1, ap2 );
- va_end( ap1 );
- va_end( ap2 );
+ if (lbackend & LOGGING_BACKEND_INTERNAL) {
+ va_start( ap_err, fmt );
+ va_start( ap_file, fmt );
+ rc = slapd_log_error_proc_internal( subsystem, fmt, ap_err, ap_file );
+ va_end(ap_file);
+ va_end(ap_err);
+ }
+ if (rc != LDAP_SUCCESS) {
+ return(rc);
+ }
+ if (lbackend & LOGGING_BACKEND_SYSLOG) {
+ va_start( ap_err, fmt );
+ /* va_start( ap_file, fmt ); */
+ /* This returns void, so we hope it worked */
+ vsyslog(LOG_ERROR, fmt, ap_err);
+ /* vsyslog(LOG_ERROR, fmt, ap_file); */
+ /* va_end(ap_file); */
+ va_end(ap_err);
+ }
+#ifdef WITH_SYSTEMD
+ if (lbackend & LOGGING_BACKEND_JOURNALD) {
+ va_start( ap_err, fmt );
+ /* va_start( ap_file, fmt ); */
+ /* This isn't handling RC nicely ... */
+ rc = sd_journal_printv(LOG_ERROR, fmt, ap_err);
+ /* rc = sd_journal_printv(LOG_ERROR, fmt, ap_file); */
+ /* va_end(ap_file); */
+ va_end(ap_err);
+ }
+#endif
} else {
- rc = 0; /* nothing to be logged --> always return success */
+ rc = LDAP_SUCCESS; /* nothing to be logged --> always return success */
}
return( rc );
@@ -2226,7 +2396,8 @@ static int vslapd_log_access(char *fmt, va_list ap)
char sign;
char buffer[SLAPI_LOG_BUFSIZ];
char vbuf[SLAPI_LOG_BUFSIZ];
- int blen, vlen;
+ int blen;
+ int vlen;
/* info needed to keep us from calling localtime/strftime so often: */
static time_t old_time = 0;
static char old_tbuf[SLAPI_LOG_BUFSIZ];
@@ -2278,7 +2449,7 @@ static int vslapd_log_access(char *fmt, va_list ap)
log_append_buffer2(tnl, loginfo.log_access_buffer, buffer, blen, vbuf, vlen);
- return( 0 );
+ return( LDAP_SUCCESS );
}
int
@@ -2288,16 +2459,43 @@ slapi_log_access( int level,
{
va_list ap;
int rc=0;
+ int lbackend = loginfo.log_backend; /* We copy this to make these next checks atomic */
if (!(loginfo.log_access_state & LOGGING_ENABLED)) {
return 0;
}
- va_start( ap, fmt );
- if (( level & loginfo.log_access_level ) &&
- ( loginfo.log_access_fdes != NULL ) && (loginfo.log_access_file != NULL) ) {
- rc = vslapd_log_access(fmt, ap);
- }
- va_end( ap );
+
+ if (( level & loginfo.log_access_level ) &&
+ ( loginfo.log_access_fdes != NULL ) && (loginfo.log_access_file != NULL) ) {
+ /* How do we handle the RC?
+ *
+ * What we do is we log to the "best" backend first going down.
+ * "best" meaning most reliable.
+ * As we descend, if we encounter an issue, we bail before the "lesser"
+ * backends.
+ */
+ if (lbackend & LOGGING_BACKEND_INTERNAL) {
+ va_start( ap, fmt );
+ rc = vslapd_log_access(fmt, ap);
+ va_end( ap );
+ }
+ if (rc != LDAP_SUCCESS) {
+ return rc;
+ }
+ if (lbackend & LOGGING_BACKEND_SYSLOG) {
+ va_start( ap, fmt );
+ /* This returns void, so we hope it worked */
+ vsyslog(LOG_INFO, fmt, ap);
+ va_end( ap );
+ }
+#ifdef WITH_SYSTEMD
+ if (lbackend & LOGGING_BACKEND_JOURNALD) {
+ va_start (ap, fmt );
+ rc = sd_journal_printv(LOG_INFO, fmt, ap);
+ va_end( ap );
+ }
+#endif
+ }
return( rc );
}
@@ -4869,6 +5067,8 @@ check_log_max_size( char *maxdiskspace_str,
return rc;
}
+
+
/************************************************************************************/
/* E N D */
/************************************************************************************/
diff --git a/ldap/servers/slapd/log.h b/ldap/servers/slapd/log.h
index 6c5f4f1de..ff791e17e 100644
--- a/ldap/servers/slapd/log.h
+++ b/ldap/servers/slapd/log.h
@@ -201,6 +201,7 @@ struct logging_opts {
LogFileInfo *log_auditfail_logchain; /* all the logs info */
char *log_auditfailinfo_file; /* auditfail log rotation info file */
Slapi_RWLock *log_auditfail_rwlock; /* lock on auditfail */
+ int log_backend;
};
diff --git a/ldap/servers/slapd/proto-slap.h b/ldap/servers/slapd/proto-slap.h
index f0a525714..2c6a7af29 100644
--- a/ldap/servers/slapd/proto-slap.h
+++ b/ldap/servers/slapd/proto-slap.h
@@ -399,6 +399,8 @@ int config_set_mempool_maxfreelist( const char *attrname, char *value, char *err
int config_set_maxsimplepaged_per_conn( const char *attrname, char *value, char *errorbuf, int apply );
+int log_set_backend(const char *attrname, char *value, int logtype, char *errorbuf, int apply);
+
int config_get_SSLclientAuth();
int config_get_ssl_check_hostname();
char *config_get_SSL3ciphers();
@@ -751,8 +753,10 @@ int slapi_log_access( int level, char *fmt, ... )
#else
;
#endif
-int slapd_log_audit_proc(char *buffer, int buf_len);
-int slapd_log_auditfail_proc(char *buffer, int buf_len);
+int slapd_log_audit(char *buffer, int buf_len);
+int slapd_log_audit_internal(char *buffer, int buf_len);
+int slapd_log_auditfail(char *buffer, int buf_len);
+int slapd_log_auditfail_internal(char *buffer, int buf_len);
void log_access_flush();
diff --git a/ldap/servers/slapd/slap.h b/ldap/servers/slapd/slap.h
index 633cc455c..c4bae763c 100644
--- a/ldap/servers/slapd/slap.h
+++ b/ldap/servers/slapd/slap.h
@@ -125,6 +125,11 @@ typedef struct symbol_t {
#include <nunc-stans/nunc-stans.h>
#endif
+#ifdef WITH_SYSTEMD
+#include <systemd/sd-journal.h>
+#include <systemd/sd-daemon.h>
+#endif
+
#if defined(OS_solaris)
# include <thread.h>
# define GET_THREAD_ID() thr_self()
@@ -1886,6 +1891,12 @@ typedef struct _slapdEntryPoints {
#define SLAPD_AUDIT_LOG 0x4
#define SLAPD_AUDITFAIL_LOG 0x8
+#define LOGGING_BACKEND_INTERNAL 0x1
+#define LOGGING_BACKEND_SYSLOG 0x2
+#ifdef WITH_SYSTEMD
+#define LOGGING_BACKEND_JOURNALD 0x4
+#endif
+
#define CONFIG_DATABASE_ATTRIBUTE "nsslapd-database"
#define CONFIG_PLUGIN_ATTRIBUTE "nsslapd-plugin"
#define CONFIG_SIZELIMIT_ATTRIBUTE "nsslapd-sizelimit"
@@ -2110,6 +2121,7 @@ typedef struct _slapdEntryPoints {
#define CONFIG_CN_USES_DN_SYNTAX_IN_DNS "nsslapd-cn-uses-dn-syntax-in-dns"
#define CONFIG_MAXSIMPLEPAGED_PER_CONN_ATTRIBUTE "nsslapd-maxsimplepaged-per-conn"
+#define CONFIG_LOGGING_BACKEND "nsslapd-logging-backend"
/* getenv alternative */
#define CONFIG_MALLOC_MXFAST "nsslapd-malloc-mxfast"
@@ -2305,6 +2317,8 @@ typedef struct _slapdFrontendConfig {
char *auditfaillog_exptimeunit;
slapi_onoff_t auditfaillog_logging_hide_unhashed_pw;
+ char *logging_backend;
+
slapi_onoff_t return_exact_case; /* Return attribute names with the same case
as they appear in at.conf */
diff --git a/m4/systemd.m4 b/m4/systemd.m4
new file mode 100644
index 000000000..4f9533b19
--- /dev/null
+++ b/m4/systemd.m4
@@ -0,0 +1,36 @@
+# BEGIN COPYRIGHT BLOCK
+# Copyright (C) 2015 Red Hat, Inc.
+# All rights reserved.
+#
+# License: GPL (version 3 or any later version).
+# See LICENSE for details.
+# END COPYRIGHT BLOCK
+
+AC_CHECKING(for Systemd)
+
+# check for --with-systemd
+AC_MSG_CHECKING(for --with-systemd)
+AC_ARG_WITH(systemd, AS_HELP_STRING([--with-systemd],[Enable Systemd native integration.]),
+[
+ if test "$withval" = yes
+ then
+ AC_MSG_RESULT([using systemd native features])
+ with_systemd=yes
+ else
+ AC_MSG_RESULT(no)
+ fi
+],
+AC_MSG_RESULT(no))
+
+if test "$with_systemd" = yes; then
+ AC_PATH_PROG(PKG_CONFIG, pkg-config)
+ AC_MSG_CHECKING(for Systemd with pkg-config)
+ if test -n "$PKG_CONFIG" && $PKG_CONFIG --exists systemd libsystemd-journal libsystemd-daemon ; then
+ systemd_inc=`$PKG_CONFIG --cflags-only-I systemd libsystemd-journal libsystemd-daemon`
+ systemd_lib=`$PKG_CONFIG --libs-only-l systemd libsystemd-journal libsystemd-daemon`
+ systemd_defs="-DWITH_SYSTEMD"
+ else
+ AC_MSG_RESULT([no Systemd pkg-config files])
+ fi
+
+fi
| 0 |
508e0d08e1a93371e7ae92e70ddd60aaa4d7c782
|
389ds/389-ds-base
|
Makefile changes to allow building on Linux machines where uname reports four-part version
|
commit 508e0d08e1a93371e7ae92e70ddd60aaa4d7c782
Author: David Boreham <[email protected]>
Date: Wed Jan 26 22:10:48 2005 +0000
Makefile changes to allow building on Linux machines where uname reports four-part version
diff --git a/config/config.mk b/config/config.mk
index b962fae0d..2da430dbc 100644
--- a/config/config.mk
+++ b/config/config.mk
@@ -77,6 +77,14 @@ ifeq ($(OS_ARCH),Linux)
ifeq (,$(filter-out Linux FreeBSD,$(NSOS_ARCH)))
OS_RELEASE := $(shell echo $(OS_RELEASE) | sed 's/-.*//')
endif
+# If the release returned by uname has _4_ components, the original
+# logic here broke. The following lines detect this and add a second
+# 'basename' to fixup the version such that everything still works.
+OS_RELEASE_TEMP := $(subst ., ,$(OS_RELEASE))
+OS_RELEASE_COUNT := $(words $(OS_RELEASE_TEMP))
+ifeq ($(OS_RELEASE_COUNT), 4)
+ OS_RELEASE := $(basename $(OS_RELEASE))
+endif
OS_RELEASE := $(basename $(OS_RELEASE))
ifeq (86,$(findstring 86,$(OS_TEST)))
CPU_TAG = _x86
@@ -230,6 +238,7 @@ ifdef NSOS_RELEASE_OVERRIDE
OS_RELEASE := $(NSOS_RELEASE_OVERRIDE)
endif
+
include $(DEPTH)/config/$(OS_ARCH)$(OS_RELEASE).mk
OS_CONFIG := $(OS_ARCH)$(OS_RELEASE)
diff --git a/nsconfig.mk b/nsconfig.mk
index c4ab985d3..145fd96b1 100644
--- a/nsconfig.mk
+++ b/nsconfig.mk
@@ -179,6 +179,11 @@ ifeq ($(NSOS_ARCH),Linux)
ifeq (,$(filter-out Linux FreeBSD,$(NSOS_ARCH)))
NSOS_RELEASE := $(shell echo $(NSOS_RELEASE) | sed 's/-.*//')
endif
+NSOS_RELEASE_TEMP := $(subst ., ,$(NSOS_RELEASE))
+NSOS_RELEASE_COUNT := $(words $(NSOS_RELEASE_TEMP))
+ifeq ($(NSOS_RELEASE_COUNT), 4)
+ NSOS_RELEASE := $(basename $(NSOS_RELEASE))
+endif
NSOS_RELEASE := $(basename $(NSOS_RELEASE))
NSOS_ARCH := Linux
PRETTY_ARCH := Linux
| 0 |
82b362176a8874ace0fd53e69ca6838d33a4881a
|
389ds/389-ds-base
|
Bug 676689 - crash while adding a new user to be synced to windows
https://bugzilla.redhat.com/show_bug.cgi?id=676689
Resolves: bug 676689
Bug Description: crash while adding a new user to be synced to windows
Reviewed by: nkinder (Thanks!)
Branch: master
Fix Description: The OpenLDAP ldap_next_entry() function will assert and
abort if passed a NULL message. Mozldap ldap_next_entry() will just return
NULL. Fix the server to not pass NULL to ldap_next_entry().
Platforms tested: RHEL6 x86_64
Flag Day: no
Doc impact: no
|
commit 82b362176a8874ace0fd53e69ca6838d33a4881a
Author: Rich Megginson <[email protected]>
Date: Thu Feb 10 15:23:20 2011 -0700
Bug 676689 - crash while adding a new user to be synced to windows
https://bugzilla.redhat.com/show_bug.cgi?id=676689
Resolves: bug 676689
Bug Description: crash while adding a new user to be synced to windows
Reviewed by: nkinder (Thanks!)
Branch: master
Fix Description: The OpenLDAP ldap_next_entry() function will assert and
abort if passed a NULL message. Mozldap ldap_next_entry() will just return
NULL. Fix the server to not pass NULL to ldap_next_entry().
Platforms tested: RHEL6 x86_64
Flag Day: no
Doc impact: no
diff --git a/ldap/servers/plugins/replication/windows_connection.c b/ldap/servers/plugins/replication/windows_connection.c
index f0b823707..50c2abfdf 100644
--- a/ldap/servers/plugins/replication/windows_connection.c
+++ b/ldap/servers/plugins/replication/windows_connection.c
@@ -688,7 +688,8 @@ windows_search_entry_ext(Repl_Connection *conn, char* searchbase, char *filter,
/* See if there are any more entries : if so then that's an error
* but we still need to get them to avoid gumming up the connection
*/
- while (NULL != ( message = ldap_next_entry(conn->ld,message))) ;
+ /* have to check message first - cannot pass a NULL message */
+ while (message && (NULL != ( message = ldap_next_entry(conn->ld,message)))) ;
return_value = CONN_OPERATION_SUCCESS;
}
else if (IS_DISCONNECT_ERROR(ldap_rc))
diff --git a/ldap/servers/slapd/auth.c b/ldap/servers/slapd/auth.c
index 4b56e5ebc..2c254839c 100644
--- a/ldap/servers/slapd/auth.c
+++ b/ldap/servers/slapd/auth.c
@@ -186,7 +186,11 @@ slapu_next_entry( LDAP* ld, LDAPMessage* msg )
{
Slapi_Entry** entry = (Slapi_Entry**)msg;
if (ld != internal_ld) {
- return ldap_next_entry (ld, msg);
+ if (msg) {
+ return ldap_next_entry (ld, msg);
+ } else {
+ return NULL;
+ }
}
if (entry && *entry && *++entry) {
return (LDAPMessage*)entry;
| 0 |
8bf8f52b94bdf7df248eaca6f6feb58a0b835c8f
|
389ds/389-ds-base
|
Resolves: 453011
Summary: Redesigned algorithm used to update memberOf attribute.
|
commit 8bf8f52b94bdf7df248eaca6f6feb58a0b835c8f
Author: Nathan Kinder <[email protected]>
Date: Tue Jul 1 22:30:07 2008 +0000
Resolves: 453011
Summary: Redesigned algorithm used to update memberOf attribute.
diff --git a/ldap/ldif/template-dse.ldif.in b/ldap/ldif/template-dse.ldif.in
index ef71a1896..1767cfbe1 100644
--- a/ldap/ldif/template-dse.ldif.in
+++ b/ldap/ldif/template-dse.ldif.in
@@ -627,6 +627,13 @@ cn: member
nssystemindex: false
nsindextype: eq
+dn: cn=memberOf,cn=default indexes, cn=config,cn=ldbm database,cn=plugins,cn=config
+objectclass: top
+objectclass: nsIndex
+cn: memberOf
+nssystemindex: false
+nsindextype: eq
+
dn: cn=nsUniqueId,cn=default indexes, cn=config,cn=ldbm database,cn=plugins,cn=config
objectclass: top
objectclass: nsIndex
diff --git a/ldap/servers/plugins/memberof/memberof.c b/ldap/servers/plugins/memberof/memberof.c
index a8bb98f43..9583a6d04 100644
--- a/ldap/servers/plugins/memberof/memberof.c
+++ b/ldap/servers/plugins/memberof/memberof.c
@@ -87,6 +87,13 @@ typedef struct _memberofstringll
void *next;
} memberofstringll;
+typedef struct _memberof_get_groups_data
+{
+ MemberOfConfig *config;
+ Slapi_Value *memberdn_val;
+ Slapi_ValueSet **groupvals;
+} memberof_get_groups_data;
+
/*** function prototypes ***/
/* exported functions */
@@ -133,17 +140,15 @@ static void *memberof_get_plugin_id();
static int memberof_compare(MemberOfConfig *config, const void *a, const void *b);
static int memberof_qsort_compare(const void *a, const void *b);
static void memberof_load_array(Slapi_Value **array, Slapi_Attr *attr);
-static int memberof_is_legit_member(Slapi_PBlock *pb, MemberOfConfig *config,
- char *group_dn, char *op_this, char *op_to, memberofstringll *stack);
static int memberof_del_dn_from_groups(Slapi_PBlock *pb, MemberOfConfig *config, char *dn);
static int memberof_call_foreach_dn(Slapi_PBlock *pb, char *dn,
char *type, plugin_search_entry_callback callback, void *callback_data);
static int memberof_is_direct_member(MemberOfConfig *config, Slapi_Value *groupdn,
Slapi_Value *memberdn);
-static int memberof_is_member(MemberOfConfig *config, Slapi_Value *groupdn,
- Slapi_Value *memberdn);
-static int memberof_is_member_r(MemberOfConfig *config, Slapi_Value *groupdn,
- Slapi_Value *memberdn, memberofstringll *stack);
+static Slapi_ValueSet *memberof_get_groups(MemberOfConfig *config, char *memberdn);
+static int memberof_get_groups_r(MemberOfConfig *config, char *memberdn,
+ memberof_get_groups_data *data);
+static int memberof_get_groups_callback(Slapi_Entry *e, void *callback_data);
static int memberof_test_membership(Slapi_PBlock *pb, MemberOfConfig *config,
char *group_dn);
static int memberof_test_membership_callback(Slapi_Entry *e, void *callback_data);
@@ -154,9 +159,6 @@ static int memberof_replace_dn_from_groups(Slapi_PBlock *pb, MemberOfConfig *con
static int memberof_modop_one_replace_r(Slapi_PBlock *pb, MemberOfConfig *config,
int mod_op, char *group_dn, char *op_this, char *replace_with, char *op_to,
memberofstringll *stack);
-static int memberof_add_groups_search_callback(Slapi_Entry *e, void *callback_data);
-static int memberof_add_membership(Slapi_PBlock *pb, MemberOfConfig *config,
- char *op_this, char *op_to);
static int memberof_task_add(Slapi_PBlock *pb, Slapi_Entry *e,
Slapi_Entry *eAfter, int *returncode, char *returntext,
void *arg);
@@ -352,6 +354,8 @@ int memberof_postop_del(Slapi_PBlock *pb)
}
memberof_unlock();
+
+ memberof_free_config(&configCopy);
}
slapi_log_error( SLAPI_LOG_TRACE, MEMBEROF_PLUGIN_SUBSYSTEM,
@@ -359,15 +363,15 @@ int memberof_postop_del(Slapi_PBlock *pb)
return ret;
}
-typedef struct _del_dn_data
+typedef struct _memberof_del_dn_data
{
char *dn;
char *type;
-} del_dn_data;
+} memberof_del_dn_data;
int memberof_del_dn_from_groups(Slapi_PBlock *pb, MemberOfConfig *config, char *dn)
{
- del_dn_data data = {dn, config->groupattr};
+ memberof_del_dn_data data = {dn, config->groupattr};
return memberof_call_foreach_dn(pb, dn,
config->groupattr, memberof_del_dn_type_callback, &data);
@@ -386,11 +390,11 @@ int memberof_del_dn_type_callback(Slapi_Entry *e, void *callback_data)
mods[0] = &mod;
mods[1] = 0;
- val[0] = ((del_dn_data *)callback_data)->dn;
+ val[0] = ((memberof_del_dn_data *)callback_data)->dn;
val[1] = 0;
mod.mod_op = LDAP_MOD_DELETE;
- mod.mod_type = ((del_dn_data *)callback_data)->type;
+ mod.mod_type = ((memberof_del_dn_data *)callback_data)->type;
mod.mod_values = val;
slapi_modify_internal_set_pb(
@@ -524,6 +528,8 @@ int memberof_postop_modrdn(Slapi_PBlock *pb)
memberof_replace_dn_from_groups(pb, &configCopy, pre_dn, post_dn);
memberof_unlock();
+
+ memberof_free_config(&configCopy);
}
}
@@ -726,6 +732,11 @@ int memberof_postop_modify(Slapi_PBlock *pb)
smod = slapi_mods_get_next_smod(smods, next_mod);
}
+ if (config_copied)
+ {
+ memberof_free_config(&configCopy);
+ }
+
slapi_mod_free(&next_mod);
slapi_mods_free(&smods);
}
@@ -783,6 +794,8 @@ int memberof_postop_add(Slapi_PBlock *pb)
}
memberof_unlock();
+
+ memberof_free_config(&configCopy);
}
}
@@ -1012,7 +1025,7 @@ int memberof_modop_one_replace_r(Slapi_PBlock *pb, MemberOfConfig *config,
/* someone set up infinitely
recursive groups - bail out */
- slapi_log_error( SLAPI_LOG_FATAL,
+ slapi_log_error( SLAPI_LOG_PLUGIN,
MEMBEROF_PLUGIN_SUBSYSTEM,
"memberof_modop_one_replace_r: group recursion"
" detected in %s\n"
@@ -1065,38 +1078,13 @@ int memberof_modop_one_replace_r(Slapi_PBlock *pb, MemberOfConfig *config,
goto bail;
}
- /* We need to deal with delete cases separately. We may not
- * want to remove a memberof attribute from an entry since
- * it could still be a member in some other indirect manner. */
- if(stack && LDAP_MOD_DELETE == mod_op)
+ /* For add and del modify operations, we just regenerate the
+ * memberOf attribute. */
+ if(LDAP_MOD_DELETE == mod_op || LDAP_MOD_ADD == mod_op)
{
- if(memberof_is_legit_member(pb, config, group_dn,
- op_this, op_to, stack))
- {
- /* entry is member some other way too */
- slapi_log_error( SLAPI_LOG_PLUGIN,
- MEMBEROF_PLUGIN_SUBSYSTEM,
- "memberof_modop_one_replace_r: not deleting %s\n"
- ,op_to);
- goto bail;
- }
- }
-
- /* Check if the entry is still an indirect member. If it is, we
- * don't want to remove the memberOf value. */
- if((LDAP_MOD_DELETE != mod_op) ||
- (0 == memberof_is_member(config, this_dn_val, to_dn_val))) {
- /* If we're about to add a memberOf value to an entry, we should first check
- * if the value already exists. */
- if((LDAP_MOD_ADD == mod_op) && (slapi_entry_attr_has_syntax_value(e,
- config->memberof_attr, this_dn_val)))
- {
- slapi_log_error( SLAPI_LOG_PLUGIN, MEMBEROF_PLUGIN_SUBSYSTEM,
- "memberof_modop_one_replace_r: memberOf value %s already exists in "
- "entry %s\n", op_this, op_to);
- goto bail;
- }
-
+ /* find parent groups and replace our member attr */
+ memberof_fix_memberof_callback(e, config);
+ } else {
/* single entry - do mod */
mod_pb = slapi_pblock_new();
@@ -1113,7 +1101,6 @@ int memberof_modop_one_replace_r(Slapi_PBlock *pb, MemberOfConfig *config,
val[0] = op_this;
val[1] = 0;
-
mod.mod_op = LDAP_MOD_REPLACE == mod_op?LDAP_MOD_DELETE:mod_op;
mod.mod_type = config->memberof_attr;
mod.mod_values = val;
@@ -1141,22 +1128,6 @@ int memberof_modop_one_replace_r(Slapi_PBlock *pb, MemberOfConfig *config,
slapi_pblock_destroy(mod_pb);
}
-
- if(LDAP_MOD_DELETE == mod_op)
- {
- /* fix up membership for groups that have been orphaned */
- memberof_test_membership_callback(e, config);
- }
-
- if(LDAP_MOD_ADD == mod_op)
- {
- /* If we failed to update memberOf for op_to, we shouldn't
- * try to fix up membership for parent groups. */
- if (rc == 0) {
- /* fix up membership for groups that are now in scope */
- memberof_add_membership(pb, config, op_this, op_to);
- }
- }
}
bail:
@@ -1435,26 +1406,88 @@ int memberof_moddn_attr_list(Slapi_PBlock *pb, MemberOfConfig *config,
return rc;
}
-typedef struct _memberof_add_groups
+/* memberof_get_groups()
+ *
+ * Gets a list of all groups that an entry is a member of.
+ * This is done by looking only at member attribute values.
+ * A Slapi_ValueSet* is returned. It is up to the caller to
+ * free it.
+ */
+Slapi_ValueSet *memberof_get_groups(MemberOfConfig *config, char *memberdn)
{
- MemberOfConfig *config;
- char *target_dn;
- char *group_dn;
-} memberof_add_groups;
+ Slapi_Value *memberdn_val = slapi_value_new_string(memberdn);
+ Slapi_ValueSet *groupvals = slapi_valueset_new();
+ memberof_get_groups_data data = {config, memberdn_val, &groupvals};
-int memberof_add_membership(Slapi_PBlock *pb, MemberOfConfig *config,
- char *op_this, char *op_to)
-{
- memberof_add_groups data = {config, op_to, op_this};
+ memberof_get_groups_r(config, memberdn, &data);
- return memberof_call_foreach_dn(pb, op_this, config->groupattr,
- memberof_add_groups_search_callback, &data);
+ slapi_value_free(&memberdn_val);
+
+ return groupvals;
}
-int memberof_add_groups_search_callback(Slapi_Entry *e, void *callback_data)
+int memberof_get_groups_r(MemberOfConfig *config, char *memberdn, memberof_get_groups_data *data)
{
- return memberof_add_one(0, ((memberof_add_groups*)callback_data)->config, slapi_entry_get_dn(e),
- ((memberof_add_groups*)callback_data)->target_dn);
+ /* Search for member=<memberdn>
+ * For each match, add it to the list, recurse and do same search */
+ memberof_call_foreach_dn(0, memberdn, config->groupattr,
+ memberof_get_groups_callback, data);
+}
+
+/* memberof_get_groups_callback()
+ *
+ * Callback to perform work of memberof_get_groups()
+ */
+int memberof_get_groups_callback(Slapi_Entry *e, void *callback_data)
+{
+ char *group_dn = slapi_entry_get_dn(e);
+ Slapi_Value *group_dn_val = 0;
+ Slapi_ValueSet *groupvals = *((memberof_get_groups_data*)callback_data)->groupvals;
+
+ /* get the DN of the group */
+ group_dn_val = slapi_value_new_string(group_dn);
+
+ /* check if e is the same as our original member entry */
+ if (0 == memberof_compare(((memberof_get_groups_data*)callback_data)->config,
+ &((memberof_get_groups_data*)callback_data)->memberdn_val, &group_dn_val))
+ {
+ /* A recursive group caused us to find our original
+ * entry we passed to memberof_get_groups(). We just
+ * skip processing this entry. */
+ slapi_log_error( SLAPI_LOG_PLUGIN, MEMBEROF_PLUGIN_SUBSYSTEM,
+ "memberof_get_groups_callback: group recursion"
+ " detected in %s\n" ,group_dn);
+ slapi_value_free(&group_dn_val);
+ goto bail;
+
+ }
+
+ /* have we been here before? */
+ if (groupvals &&
+ slapi_valueset_find(((memberof_get_groups_data*)callback_data)->config->group_slapiattr,
+ groupvals, group_dn_val))
+ {
+ /* we either hit a recursive grouping, or an entry is
+ * a member of a group through multiple paths. Either
+ * way, we can just skip processing this entry since we've
+ * already gone through this part of the grouping hierarchy. */
+ slapi_log_error( SLAPI_LOG_PLUGIN, MEMBEROF_PLUGIN_SUBSYSTEM,
+ "memberof_get_groups_callback: possible group recursion"
+ " detected in %s\n" ,group_dn);
+ slapi_value_free(&group_dn_val);
+ goto bail;
+ }
+
+ /* Push group_dn_val into the valueset. This memory is now owned
+ * by the valueset. */
+ slapi_valueset_add_value_ext(groupvals, group_dn_val, SLAPI_VALUE_FLAG_PASSIN);
+
+ /* now recurse to find parent groups of e */
+ memberof_get_groups_r(((memberof_get_groups_data*)callback_data)->config,
+ group_dn, callback_data);
+
+ bail:
+ return 0;
}
/* memberof_is_direct_member()
@@ -1491,160 +1524,6 @@ int memberof_is_direct_member(MemberOfConfig *config, Slapi_Value *groupdn,
return rc;
}
-/* memberof_is_member()
- *
- * tests for membership of memberdn in group groupdn. This
- * will check for both direct and indirect membership.
- * returns non-zero when true, zero otherwise
- */
-int memberof_is_member(MemberOfConfig *config, Slapi_Value *groupdn,
- Slapi_Value *memberdn)
-{
- memberofstringll *stack = 0;
-
- /* Do a quick check to see if the entry is a direct
- * member before tracing through nested groups. */
- if(memberof_is_direct_member(config, groupdn, memberdn))
- {
- /* entry is a direct member */
- return 1;
- }
-
- return memberof_is_member_r(config, groupdn, memberdn, stack);
-}
-
-/* memberof_is_member_r()
- *
- * Recursive function to do the work for the memberof_is_member()
- * function. This will basically check if "memberdn" is a member
- * of the group represented by "groupdn". Only "member" attribute
- * values will be used to make this determination, not "memberOf"
- * attribute values.
- *
- * returns non-zero when true, zero otherwise
- */
-int memberof_is_member_r(MemberOfConfig *config, Slapi_Value *groupdn,
- Slapi_Value *memberdn, memberofstringll *stack)
-{
- Slapi_DN *member_sdn = 0;
- Slapi_DN *base_sdn = 0;
- Slapi_PBlock *search_pb = slapi_pblock_new();
- Slapi_Backend *be = 0;
- Slapi_Value *ll_dn_val = 0;
- memberofstringll *ll = stack;
- char *filter_str = 0;
- int rc = 0;
-
- /* Check if we've processed memberdn already to detect looped
- * groupings. We want to do this right away to avoid any
- * unecessary processing. */
- while(ll)
- {
- ll_dn_val = slapi_value_new_string(ll->dn);
-
- if(0 == memberof_compare(config, &ll_dn_val, &memberdn))
- {
- slapi_value_free(&ll_dn_val);
-
- /* someone set up infinitely
- * recursive groups - bail out */
- slapi_log_error( SLAPI_LOG_FATAL,
- MEMBEROF_PLUGIN_SUBSYSTEM,
- "memberof_is_member_r: group recursion"
- " detected in %s\n"
- ,slapi_value_get_string(memberdn));
- /* We set this to null to avoid freeing it twice.
- * If we don't do this, we'd free ll in the bail section
- * and the caller (ourselves since we're using recursion)
- * would free it as well. */
- ll = 0;
- goto bail;
- }
-
- slapi_value_free(&ll_dn_val);
- ll = ll->next;
- }
-
- /* push memberdn onto the stack to detect loops */
- ll = (memberofstringll*)slapi_ch_malloc(sizeof(memberofstringll));
- ll->dn = slapi_value_get_string(memberdn);
- ll->next = stack;
-
- /* Find the backend suffix that memberdn is in so we can
- * use it as a search base. */
- member_sdn = slapi_sdn_new_dn_byref(slapi_value_get_string(memberdn));
- be = slapi_be_select(member_sdn);
- if(be)
- {
- base_sdn = (Slapi_DN*)slapi_be_getsuffix(be,0);
- }
-
- /* Do a search for "member=<memberdn>". Go through matches to
- * see if it is our group. If not, search for "member=<matchdn>"
- * and keep looping until we've exhausted it. */
- if(base_sdn)
- {
- filter_str = slapi_ch_smprintf("(%s=%s)", config->groupattr,
- slapi_value_get_string(memberdn));
- }
-
- if(filter_str)
- {
- slapi_search_internal_set_pb(search_pb, slapi_sdn_get_dn(base_sdn),
- LDAP_SCOPE_SUBTREE, filter_str, 0, 0,
- 0, 0,
- memberof_get_plugin_id(),
- 0);
-
- if(slapi_search_internal_pb(search_pb))
- {
- /* get result and log an error */
- int res = 0;
- slapi_pblock_get(search_pb, SLAPI_PLUGIN_INTOP_RESULT, &res);
- slapi_log_error( SLAPI_LOG_FATAL, MEMBEROF_PLUGIN_SUBSYSTEM,
- "memberof_is_member_r: error searching for groups: %d",
- res);
- goto bail;
- } else {
- Slapi_Entry **entries = NULL;
- slapi_pblock_get(search_pb, SLAPI_PLUGIN_INTOP_SEARCH_ENTRIES, &entries);
- if ( NULL != entries && NULL != entries[0])
- {
- int i;
-
- for(i = 0; entries[i] != NULL; i++)
- {
- /* Iterate through the matches checking if the dn is our groupdn. */
- if(strcasecmp(slapi_entry_get_ndn(entries[i]), slapi_value_get_string(groupdn)) == 0)
- {
- /* This is the group we've been searching for, so
- * set rc and bail. */
- rc = 1;
- break;
- } else {
- /* This is not the group you're looking for...
- * Find all of the groups that this group is a member of to
- * see if any of them are the group we are trying to find.
- * We do this by doing a recursive call on this function. */
- Slapi_Value *entrydn = slapi_value_new_string(slapi_entry_get_ndn(entries[i]));
- rc = memberof_is_member_r(config, groupdn, entrydn, ll);
- slapi_value_free(&entrydn);
- }
- }
- }
- }
- }
-
- bail:
- slapi_ch_free((void **)&ll);
- slapi_ch_free_string(&filter_str);
- slapi_sdn_free(&member_sdn);
- slapi_free_search_results_internal(search_pb);
- slapi_pblock_destroy(search_pb);
-
- return rc;
-}
-
/* memberof_test_membership()
*
* Finds all entries who are a "memberOf" the group
@@ -2038,156 +1917,6 @@ int memberof_qsort_compare(const void *a, const void *b)
slapi_value_get_berval(val2));
}
-/* memberof_is_legit_member()
- *
- * before we rush to remove this group from the entry
- * we need to be sure that the entry is not a member
- * of the group for another legitimate reason i.e.
- * that it is not itself a direct member of the group,
- * and that all groups in its memberof attribute except
- * the second from bottom one of our stack do not appear
- * in the membership attribute of the group
-*/
-int memberof_is_legit_member(Slapi_PBlock *pb, MemberOfConfig *config,
- char *group_dn, char *op_this, char *op_to, memberofstringll *stack)
-{
- int rc = 0;
- Slapi_DN *group_sdn = 0;
- Slapi_Entry *group_e = 0;
- Slapi_DN *opto_sdn = 0;
- Slapi_Entry *opto_e = 0;
- char *filter_str = 0;
- Slapi_Filter *filter = 0;
- memberofstringll *ll = 0;
- char *attrlist[2] = {config->groupattr,0};
- char *optolist[2] = {config->memberof_attr,0};
- Slapi_Attr *memberof = 0;
- Slapi_Value *memberdn = 0;
- int hint = 0;
- const char *delete_group_dn = 0;
-
- slapi_log_error( SLAPI_LOG_TRACE, MEMBEROF_PLUGIN_SUBSYSTEM,
- "--> memberof_is_legit_member\n" );
-
- /* first test entry */
- group_sdn = slapi_sdn_new_dn_byref(op_this);
- slapi_search_internal_get_entry( group_sdn, attrlist,
- &group_e, memberof_get_plugin_id());
- slapi_sdn_free(&group_sdn);
-
- if(!group_e)
- {
- goto bail;
- }
-
- filter_str = slapi_ch_smprintf("(%s=%s)", config->groupattr, op_to);
- filter = slapi_str2filter(filter_str);
-
- if(!slapi_filter_test_simple(group_e, filter))
- {
- /* entry is direct member */
- slapi_log_error( SLAPI_LOG_PLUGIN, MEMBEROF_PLUGIN_SUBSYSTEM,
- "memberof_is_legit_member: %s direct member of %s\n"
- ,op_to,op_this);
- slapi_filter_free(filter,0);
- rc = 1;
- goto bail;
- }
-
- slapi_filter_free(filter,0);
-
- /* test all group dns in stack
- the top dn is the group we remove the entry from
- second from bottom dn is being removed from the
- bottom group, we ignore those two
- */
- ll = stack;
-
- /* need to be 2 items left on the stack */
- while( ll &&
- ll->next &&
- ((memberofstringll*)ll->next)->next)
- {
- ll = ll->next;
- }
-
- if(!ll || !ll->next)
- {
- /* tight recursion, bail */
- goto bail;
- }
-
- delete_group_dn = ((memberofstringll*)ll->next)->dn;
-
- /* get the target entry memberof attribute */
- opto_sdn = slapi_sdn_new_dn_byref(op_to);
- slapi_search_internal_get_entry( opto_sdn, optolist,
- &opto_e, memberof_get_plugin_id());
- slapi_sdn_free(&opto_sdn);
-
- if(opto_e)
- {
- slapi_entry_attr_find(opto_e, config->memberof_attr, &memberof);
- }
-
- if(0 == memberof)
- {
- goto bail;
- }
-
- /* iterate through memberof values and test against group membership */
- hint = slapi_attr_first_value(memberof, &memberdn);
-
- while(memberdn)
- {
- char *dn = (char*)slapi_value_get_string(memberdn);
- int current_size =
- (strlen(config->groupattr) +
- strlen(dn) + 4); /* 4 for (=) + null */
-
- /* disregard the group being removed */
- if(0 == strcmp(dn, delete_group_dn))
- {
- hint = slapi_attr_next_value(memberof, hint, &memberdn);
- continue;
- }
-
- if (current_size > strlen(filter_str))
- {
- int filter_size = 2 * current_size;
- filter_str = slapi_ch_realloc(filter_str, filter_size);
- }
-
- sprintf(filter_str, "(%s=%s)", config->groupattr, dn);
- filter = slapi_str2filter(filter_str);
-
- if(!slapi_filter_test_simple(group_e, filter))
- {
- /* another group allows entry */
- slapi_log_error( SLAPI_LOG_PLUGIN, MEMBEROF_PLUGIN_SUBSYSTEM,
- "memberof_is_legit_member: %s is group member of %s\n"
- ,op_to,dn);
- slapi_filter_free(filter,0);
-
- rc = 1;
- goto bail;
- }
-
- slapi_filter_free(filter,0);
-
- hint = slapi_attr_next_value(memberof, hint, &memberdn);
- }
-
-bail:
- slapi_entry_free(group_e);
- slapi_entry_free(opto_e);
- slapi_ch_free_string(&filter_str);
-
- slapi_log_error( SLAPI_LOG_TRACE, MEMBEROF_PLUGIN_SUBSYSTEM,
- "<-- memberof_is_legit_member\n" );
- return rc;
-}
-
void memberof_lock()
{
slapi_lock_mutex(memberof_operation_lock);
@@ -2235,6 +1964,8 @@ void memberof_fixup_task_thread(void *arg)
/* release the memberOf operation lock */
memberof_unlock();
+ memberof_free_config(&configCopy);
+
slapi_task_log_notice(task, "Memberof task finished.");
slapi_task_log_status(task, "Memberof task finished.");
slapi_task_inc_progress(task);
@@ -2372,15 +2103,57 @@ int memberof_fix_memberof_callback(Slapi_Entry *e, void *callback_data)
int rc = 0;
char *dn = slapi_entry_get_dn(e);
MemberOfConfig *config = (MemberOfConfig *)callback_data;
- memberof_add_groups data = {config, dn, dn};
+ memberof_del_dn_data del_data = {0, config->memberof_attr};
+ Slapi_ValueSet *groups = 0;
+
+ /* get a list of all of the groups this user belongs to */
+ groups = memberof_get_groups(config, dn);
+
+ /* If we found some groups, replace the existing memberOf attribute
+ * with the found values. */
+ if (groups && slapi_valueset_count(groups))
+ {
+ Slapi_PBlock *mod_pb = slapi_pblock_new();
+ Slapi_Value *val = 0;
+ Slapi_Mod smod;
+ LDAPMod **mods = (LDAPMod **) slapi_ch_malloc(2 * sizeof(LDAPMod *));
+ int hint = 0;
+
+ slapi_mod_init(&smod, 0);
+ slapi_mod_set_operation(&smod, LDAP_MOD_REPLACE | LDAP_MOD_BVALUES);
+ slapi_mod_set_type(&smod, config->memberof_attr);
+
+ /* Loop through all of our values and add them to smod */
+ hint = slapi_valueset_first_value(groups, &val);
+ while (val)
+ {
+ /* this makes a copy of the berval */
+ slapi_mod_add_value(&smod, slapi_value_get_berval(val));
+ hint = slapi_valueset_next_value(groups, hint, &val);
+ }
+
+ mods[0] = slapi_mod_get_ldapmod_passout(&smod);
+ mods[1] = 0;
+
+ slapi_modify_internal_set_pb(
+ mod_pb, dn, mods, 0, 0,
+ memberof_get_plugin_id(), 0);
- /* step 1 */
- slapi_entry_attr_delete(e, config->memberof_attr);
+ slapi_modify_internal_pb(mod_pb);
- /* step 2 and 3 */
- rc = memberof_call_foreach_dn(0, dn, config->groupattr,
- memberof_add_groups_search_callback, &data);
+ slapi_pblock_get(mod_pb, SLAPI_PLUGIN_INTOP_RESULT, &rc);
+ ldap_mods_free(mods, 1);
+ slapi_mod_done(&smod);
+ slapi_pblock_destroy(mod_pb);
+ } else {
+ /* No groups were found, so remove the memberOf attribute
+ * from this entry. */
+ memberof_del_dn_type_callback(e, &del_data);
+ }
+
+ slapi_valueset_free(groups);
+
return rc;
}
diff --git a/ldap/servers/plugins/memberof/memberof.h b/ldap/servers/plugins/memberof/memberof.h
index 139be7843..268f16c7f 100644
--- a/ldap/servers/plugins/memberof/memberof.h
+++ b/ldap/servers/plugins/memberof/memberof.h
@@ -84,6 +84,7 @@ typedef struct memberofconfig {
*/
int memberof_config(Slapi_Entry *config_e);
void memberof_copy_config(MemberOfConfig *dest, MemberOfConfig *src);
+void memberof_free_config(MemberOfConfig *config);
MemberOfConfig *memberof_get_config();
void memberof_lock();
void memberof_unlock();
diff --git a/ldap/servers/plugins/memberof/memberof_config.c b/ldap/servers/plugins/memberof/memberof_config.c
index 63f3f7a8b..60679b695 100644
--- a/ldap/servers/plugins/memberof/memberof_config.c
+++ b/ldap/servers/plugins/memberof/memberof_config.c
@@ -290,6 +290,23 @@ memberof_copy_config(MemberOfConfig *dest, MemberOfConfig *src)
}
}
+/*
+ * memberof_free_config()
+ *
+ * Free's the contents of a config structure.
+ */
+void
+memberof_free_config(MemberOfConfig *config)
+{
+ if (config)
+ {
+ slapi_ch_free_string(&config->groupattr);
+ slapi_filter_free(config->group_filter, 1);
+ slapi_attr_free(&config->group_slapiattr);
+ slapi_ch_free_string(&config->memberof_attr);
+ }
+}
+
/*
* memberof_get_config()
*
| 0 |
50a963bba741c75d0d4479ec70f4b9832d781a2d
|
389ds/389-ds-base
|
Resolves: #471998
Summary: dbverify: support integer type index
Description:
1) changed dblayer_bt_compare to public (proto-back-ldbm.h, dblayer.c)
2) set dblayer_bt_compare by dbp->set_bt_compare if the attribute has a
comparison function set in ai->ai_key_cmp_fn (dbverify.c)
3) cleaned up the function dbverify_ext; set the right page size based upon the
idl type (new idl or old idl), also set dup compare function only when the idl
type is new. (dbverify.c)
|
commit 50a963bba741c75d0d4479ec70f4b9832d781a2d
Author: Noriko Hosoi <[email protected]>
Date: Wed Nov 19 02:52:42 2008 +0000
Resolves: #471998
Summary: dbverify: support integer type index
Description:
1) changed dblayer_bt_compare to public (proto-back-ldbm.h, dblayer.c)
2) set dblayer_bt_compare by dbp->set_bt_compare if the attribute has a
comparison function set in ai->ai_key_cmp_fn (dbverify.c)
3) cleaned up the function dbverify_ext; set the right page size based upon the
idl type (new idl or old idl), also set dup compare function only when the idl
type is new. (dbverify.c)
diff --git a/ldap/servers/slapd/back-ldbm/dblayer.c b/ldap/servers/slapd/back-ldbm/dblayer.c
index 9e539abaf..5a1075e47 100644
--- a/ldap/servers/slapd/back-ldbm/dblayer.c
+++ b/ldap/servers/slapd/back-ldbm/dblayer.c
@@ -243,7 +243,7 @@ static int dblayer_db_remove_ex(dblayer_private_env *env, char const path[], cha
see also DBTcmp
*/
-static int
+int
dblayer_bt_compare(DB *db, const DBT *dbt1, const DBT *dbt2)
{
struct berval bv1, bv2;
diff --git a/ldap/servers/slapd/back-ldbm/dbverify.c b/ldap/servers/slapd/back-ldbm/dbverify.c
index aaece1fff..992bc2fc3 100644
--- a/ldap/servers/slapd/back-ldbm/dbverify.c
+++ b/ldap/servers/slapd/back-ldbm/dbverify.c
@@ -111,25 +111,67 @@ dbverify_ext( ldbm_instance *inst, int verbose )
"Unable to create id2entry db file %d\n", rval);
return rval;
}
+
#define VLVPREFIX "vlv#"
- if ((0 != strncmp(direntry->name, ID2ENTRY, strlen(ID2ENTRY))) &&
- (0 != strncmp(direntry->name, VLVPREFIX, strlen(VLVPREFIX))))
+ if (0 != strncmp(direntry->name, ID2ENTRY, strlen(ID2ENTRY)))
{
- rval = dbp->set_flags(dbp, DB_DUP | DB_DUPSORT);
- if (0 != rval)
+ struct attrinfo *ai = NULL;
+ char *p = NULL;
+ p = strstr(filep, LDBM_FILENAME_SUFFIX); /* since already checked,
+ it must have it */
+ *p = '\0';
+ ainfo_get( inst->inst_be, filep+1, &ai );
+ *p = '.';
+ if (ai->ai_key_cmp_fn) {
+ dbp->app_private = (void *)ai->ai_key_cmp_fn;
+ dbp->set_bt_compare(dbp, dblayer_bt_compare);
+ }
+ if (idl_get_idl_new())
{
- slapi_log_error(SLAPI_LOG_FATAL, "DB verify",
- "Unable to set DUP flags to db %d\n", rval);
- return rval;
+ rval = dbp->set_pagesize(dbp,
+ (priv->dblayer_index_page_size == 0) ?
+ DBLAYER_INDEX_PAGESIZE : priv->dblayer_index_page_size);
+ }
+ else
+ {
+ rval = dbp->set_pagesize(dbp,
+ (priv->dblayer_page_size == 0) ?
+ DBLAYER_PAGESIZE : priv->dblayer_page_size);
}
-
- rval = dbp->set_dup_compare(dbp, idl_new_compare_dups);
if (0 != rval)
{
slapi_log_error(SLAPI_LOG_FATAL, "DB verify",
- "Unable to set dup_compare to db %d\n", rval);
+ "Unable to set pagesize flags to db (%d)\n", rval);
return rval;
}
+ if (0 == strncmp(direntry->name, VLVPREFIX, strlen(VLVPREFIX)))
+ {
+ rval = dbp->set_flags(dbp, DB_RECNUM);
+ if (0 != rval)
+ {
+ slapi_log_error(SLAPI_LOG_FATAL, "DB verify",
+ "Unable to set RECNUM flag to vlv index (%d)\n", rval);
+ return rval;
+ }
+ }
+ else if (idl_get_idl_new())
+ {
+ rval = dbp->set_flags(dbp, DB_DUP | DB_DUPSORT);
+ if (0 != rval)
+ {
+ slapi_log_error(SLAPI_LOG_FATAL, "DB verify",
+ "Unable to set DUP flags to db (%d)\n", rval);
+ return rval;
+ }
+
+ rval = dbp->set_dup_compare(dbp, idl_new_compare_dups);
+ if (0 != rval)
+ {
+ slapi_log_error(SLAPI_LOG_FATAL, "DB verify",
+ "Unable to set dup_compare to db (%d)\n", rval);
+ return rval;
+ }
+ }
}
#undef VLVPREFIX
rval = dbp->verify(dbp, dbdir, NULL, NULL, 0);
diff --git a/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h b/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h
index 2ca1647d0..341e64cb7 100644
--- a/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h
+++ b/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h
@@ -170,6 +170,8 @@ int dblayer_db_uses_locking(DB_ENV *db_env);
int dblayer_db_uses_transactions(DB_ENV *db_env);
int dblayer_db_uses_mpool(DB_ENV *db_env);
int dblayer_db_uses_logging(DB_ENV *db_env);
+int dblayer_bt_compare(DB *db, const DBT *dbt1, const DBT *dbt2);
+
/*
* dn2entry.c
| 0 |
c39c7bbca984922dc8c9f33add65a5f5662fcba4
|
389ds/389-ds-base
|
Ticket 50798 - incorrect bytes in format string
Bug Description: We did not use ensure_bytes on a command output in
format strings. Python 3 subprocess returens bytes, but format string
expects utf8
Fix Description: Wrap the values in the correct safety wrappers.
https://pagure.io/389-ds-base/issue/50798
Author: William Brown <[email protected]>
Review by: mreynolds, mhonek (Thanks)
|
commit c39c7bbca984922dc8c9f33add65a5f5662fcba4
Author: William Brown <[email protected]>
Date: Mon Dec 30 14:18:16 2019 +1100
Ticket 50798 - incorrect bytes in format string
Bug Description: We did not use ensure_bytes on a command output in
format strings. Python 3 subprocess returens bytes, but format string
expects utf8
Fix Description: Wrap the values in the correct safety wrappers.
https://pagure.io/389-ds-base/issue/50798
Author: William Brown <[email protected]>
Review by: mreynolds, mhonek (Thanks)
diff --git a/src/lib389/lib389/instance/remove.py b/src/lib389/lib389/instance/remove.py
index c9a872eb7..41eaeb4a6 100644
--- a/src/lib389/lib389/instance/remove.py
+++ b/src/lib389/lib389/instance/remove.py
@@ -102,7 +102,10 @@ def remove_ds_instance(dirsrv, force=False):
result = subprocess.run(["systemctl", "disable", "dirsrv@{}".format(dirsrv.serverid)],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- _log.debug(f"CMD: {' '.join(result.args)} ; STDOUT: {result.stdout} ; STDERR: {result.stderr}")
+ args = ' '.join(ensure_list_str(result.args))
+ stdout = ensure_str(result.stdout)
+ stderr = ensure_str(result.stderr)
+ _log.debug(f"CMD: {args} ; STDOUT: {stdout} ; STDERR: {stderr}")
_log.debug("Removing %s" % tmpfiles_d_path)
try:
diff --git a/src/lib389/lib389/instance/setup.py b/src/lib389/lib389/instance/setup.py
index c1a85e595..ead3db1b6 100644
--- a/src/lib389/lib389/instance/setup.py
+++ b/src/lib389/lib389/instance/setup.py
@@ -783,7 +783,10 @@ class SetupDs(object):
# Should create the symlink we need, but without starting it.
result = subprocess.run(["systemctl", "enable", "dirsrv@%s" % slapd['instance_name']],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- self.log.debug(f"CMD: {' '.join(result.args)} ; STDOUT: {result.stdout} ; STDERR: {result.stderr}")
+ args = ' '.join(ensure_list_str(result.args))
+ stdout = ensure_str(result.stdout)
+ stderr = ensure_str(result.stderr)
+ self.log.debug(f"CMD: {args} ; STDOUT: {stdout} ; STDERR: {stderr}")
# Setup tmpfiles_d
tmpfile_d = ds_paths.tmpfiles_d + "/dirsrv-" + slapd['instance_name'] + ".conf"
diff --git a/src/lib389/lib389/utils.py b/src/lib389/lib389/utils.py
index a2e57c111..b3a72721e 100644
--- a/src/lib389/lib389/utils.py
+++ b/src/lib389/lib389/utils.py
@@ -309,7 +309,10 @@ def selinux_label_port(port, remove_label=False):
"-p", "tcp", str(port)],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
- log.debug(f"CMD: {' '.join(result.args)} ; STDOUT: {result.stdout} ; STDERR: {result.stderr}")
+ args = ' '.join(ensure_list_str(result.args))
+ stdout = ensure_str(result.stdout)
+ stderr = ensure_str(result.stderr)
+ log.debug(f"CMD: {args} ; STDOUT: {stdout} ; STDERR: {stderr}")
return
except (OSError, subprocess.CalledProcessError) as e:
label_ex = e
| 0 |
43325bbecf2eb1955a59b3ba7cd2c0d8e805842b
|
389ds/389-ds-base
|
[Bug 153175] SSL 6.x -> 7.0 migration script problems
Applied the proposed fix from HP.
|
commit 43325bbecf2eb1955a59b3ba7cd2c0d8e805842b
Author: Noriko Hosoi <[email protected]>
Date: Sat Apr 2 00:34:18 2005 +0000
[Bug 153175] SSL 6.x -> 7.0 migration script problems
Applied the proposed fix from HP.
diff --git a/ldap/admin/src/scripts/template-migrate6to7 b/ldap/admin/src/scripts/template-migrate6to7
index de58c85fd..169974776 100644
--- a/ldap/admin/src/scripts/template-migrate6to7
+++ b/ldap/admin/src/scripts/template-migrate6to7
@@ -443,6 +443,7 @@ SWITCH: {
@nsds5replicaAttrs = (
'objectclass',
+ 'nsDS5ReplicaName',
'nsDS5ReplicaRoot',
'nsDS5ReplicaType',
'nsDS5ReplicaLegacyConsumer',
@@ -486,6 +487,12 @@ printTrace("\nBackup $serverHome${PATHSEP}config on $serverHome${PATHSEP}config_
printTrace("\nMigrate the schema...",0);
MigrateSchema();
+#migrate key/cert databases
+# We will migrate the key/cert databases before the dse file because the
+# dse migration may cause SSL to be enabled.
+printTrace("\n\nMigrate key/cert databases...",0,1);
+&MigrateSSL();
+
# start the server unless it is already started
&startServer() unless (isDirectoryAlive());
@@ -527,10 +534,6 @@ printTrace("\n\nMigrate replicas...",0,1);
printTrace("\n\nMigrate replication agreements...",0,1);
&MigrateNSDS_replication_agreement();
-#migrate key/cert databases
-printTrace("\n\nMigrate key/cert databases...",0,1);
-&MigrateSSL();
-
# migrate certmap.conf
printTrace("\n\nMigrate Certmap.conf...",0,1);
&MigrateCertmap() ;
@@ -1600,7 +1603,7 @@ sub MigrateNSDS5_replica{
foreach $replica (@new6replicas) {
my $DN = $replica->getDN(1);
my $newReplica;
- my @removeAttrs = qw(nsstate nsds5replicaname nsds5replicachangecount);
+ my @removeAttrs = qw(nsstate nsds5replicachangecount);
for (@removeAttrs) {
$replica->remove($_);
}
@@ -1906,13 +1909,25 @@ sub migrateChangelog {
$newchangelogdir = ($newChangelog->getValues($changelogdir))[0];
stopServer($root,'slapd-'.$newname);
printTrace("\ncopying $oldchangelogdir${PATHSEP}* to $newchangelogdir",3);
- copyDir("$oldchangelogdir","$newchangelogdir");
- # We need to modify the DBVERSION file for a new verision of the db
- open(DBVERSION,">$newchangelogdir${PATHSEP}DBVERSION") || die "Can't overwrite $newchangelogdir${PATHSEP}DBVERSION: $! ";
- print DBVERSION "Changelog5/NSMMReplicationPlugin/3.0";
- close(DBVERSION);
+ # Clean destination changelog directory
+ opendir(NEWCLDIR, "$newchangelogdir");
+ while($delfile = readdir(NEWCLDIR)) {
+ if ( -f "$newchangelogdir/$delfile" ) {
+ unlink "$newchangelogdir/$delfile" or die "Can't delete $newchangelogdir/$delfile: $!\n";
+ }
+ }
+ copyDir("$oldchangelogdir","$newchangelogdir");
+
+ # We need to modify the DBVERSION file for a new verision of the db
+ # For 6.21 to 7.0, leave it as 4.0. For 6.11 to 7.0 we want the server
+ # to perform the migration at startup, so set it to 3.0.
+ if(substr($oldMinor,0,1) < 2) {
+ open(DBVERSION,">$newchangelogdir${PATHSEP}DBVERSION") || die "Can't overwrite $newchangelogdir${PATHSEP}DBVERSION: $! ";
+ print DBVERSION "Changelog5/NSMMReplicationPlugin/3.0";
+ close(DBVERSION);
+ }
&startServer() unless (isDirectoryAlive());
}
}
@@ -2437,12 +2452,15 @@ sub MigrateSSL {
if (! -d $aliasDir) {
mkdir($aliasDir, 0750);
}
- &stopServer($root,'slapd-'.$newname);
+ &stopServer($root,'slapd-'.$newname) if (isDirectoryAlive());
+
+ my $old_certdb_ver = (substr($oldMinor,0,1) >= 2) ? "8" : "7";
my $keydb = "$aliasDir${PATHSEP}slapd-$newname-key3.db" ;
my $certdb = "$aliasDir${PATHSEP}slapd-$newname-cert8.db" ;
- my $certdb7 = "$aliasDir${PATHSEP}slapd-$newname-cert7.db" ;
+ my $certdb_target = "$aliasDir${PATHSEP}slapd-$newname-cert${old_certdb_ver}.db" ;
my $old_keydb = "$oldDir${PATHSEP}alias${PATHSEP}slapd-$oldname-key3.db" ;
- my $old_certdb = "$oldDir${PATHSEP}alias${PATHSEP}slapd-$oldname-cert7.db";
+ my $old_certdb = "$oldDir${PATHSEP}alias${PATHSEP}slapd-$oldname-cert${old_certdb_ver}.db";
+
my $keydb_backup = "$aliasDir${PATHSEP}slapd-$newname-key3.db_backup" ;
my $certdb_backup = "$aliasDir${PATHSEP}slapd-$newname-cert7.db_backup" ;
if (-f $old_keydb) {
@@ -2471,19 +2489,19 @@ sub MigrateSSL {
printMsg("\n$certdb already exists. backup in $certdb_backup ...");
©BinFile($certdb,$certdb_backup);
unlink($certdb) || print "Couldn't delete $certdb : $!\n";
- ©BinFile($old_certdb,$certdb7);
+ ©BinFile($old_certdb,$certdb_target);
}
else {
print("\n\n$certdb already exists. Do you want to overwrite it ? [no]: ");
my $answer = <STDIN> ;
if ($answer =~ /^y|yes$/i) {
unlink($certdb) || print "Couldn't delete $certdb : $!\n";
- ©BinFile($old_certdb,$certdb7);
+ ©BinFile($old_certdb,$certdb_target);
}
}
}
else {
- ©BinFile($old_certdb,$certdb7);
+ ©BinFile($old_certdb,$certdb_target);
}
}
# copy the old password file
@@ -2494,7 +2512,7 @@ sub MigrateSSL {
);
}
&startServer();
- if ($PRESERVE) {
+ if ($PRESERVE && $old_certdb_ver == 7) {
chown($newuid,$newgid,$certdb) || print "Failed to set uid $newuid gid $newgid on $certdb : $!\n";
chmod($mode,$certdb) || print "Failed to set mode $mode on $certdb : $!\n";
}
@@ -3037,10 +3055,17 @@ sub migrate_credential{
chomp($credServerHome = <STDIN>);
}
}
+
+ my $cur_dir = getCwd();
+ my $migratecreddir = "${quote}$root${PATHSEP}bin${PATHSEP}slapd${PATHSEP}server";
+ chdir(${migratecreddir}) or die "Could not change directory to $migratecreddir: $!";
+
# print "\nMigratecred command is: ${quote}$root${PATHSEP}bin${PATHSEP}slapd${PATHSEP}server${PATHSEP}$migratecredExecName${quote} -o $credOldHome -n $credServerHome -c @old_value\n";
my @new_cred = `${quote}$root${PATHSEP}bin${PATHSEP}slapd${PATHSEP}server${PATHSEP}$migratecredExecName${quote} -o $credOldHome -n $credServerHome -c @old_value`;
+ chdir(${cur_dir}) or die "Could not change directory back to $cur_dir: $!";
+
if ( $? == 0 )
{
$entry_to_modify->setValues($credentials_attr, @new_cred);
| 0 |
c39efbf86a562f655e9170a1644fc979bad0c597
|
389ds/389-ds-base
|
Resolves: bug 486191
Description: slapd hang during cs80 cloning setup.
Fix Description: Not exactly related to the bug, but Noriko found a couple of places during investigation of internal add operations where the Slapi_Entry* could be leaked upon error. These fixes ensure that the entry is properly freed in case of error.
|
commit c39efbf86a562f655e9170a1644fc979bad0c597
Author: Rich Megginson <[email protected]>
Date: Thu Feb 19 23:39:50 2009 +0000
Resolves: bug 486191
Description: slapd hang during cs80 cloning setup.
Fix Description: Not exactly related to the bug, but Noriko found a couple of places during investigation of internal add operations where the Slapi_Entry* could be leaked upon error. These fixes ensure that the entry is properly freed in case of error.
diff --git a/ldap/servers/plugins/replication/cl4_api.c b/ldap/servers/plugins/replication/cl4_api.c
index 07bf7b247..c7d0ce023 100644
--- a/ldap/servers/plugins/replication/cl4_api.c
+++ b/ldap/servers/plugins/replication/cl4_api.c
@@ -263,7 +263,7 @@ static int _cl4WriteOperation (const slapi_operation_parameters *op)
{
int rc = CL4_SUCCESS, res;
char *changeEntryDN, *timeStr;
- Slapi_Entry *e;
+ Slapi_Entry *e = NULL;
Slapi_PBlock *pb = NULL;
Slapi_Value *values[3];
char s[CSN_STRSIZE];
@@ -364,6 +364,7 @@ static int _cl4WriteOperation (const slapi_operation_parameters *op)
pb = slapi_pblock_new (pb);
slapi_add_entry_internal_set_pb (pb, e, NULL, repl_get_plugin_identity (PLUGIN_LEGACY_REPLICATION), 0);
slapi_add_internal_pb (pb);
+ e = NULL; /* add consumes entry */
slapi_pblock_get(pb, SLAPI_PLUGIN_INTOP_RESULT, &res);
slapi_pblock_destroy(pb);
@@ -380,6 +381,7 @@ static int _cl4WriteOperation (const slapi_operation_parameters *op)
}
done:
+ slapi_entry_free(e);
if (changeEntryDN)
slapi_ch_free((void **) &changeEntryDN);
diff --git a/ldap/servers/plugins/replication/windows_protocol_util.c b/ldap/servers/plugins/replication/windows_protocol_util.c
index 8cd5bdfae..ed5b8a32a 100644
--- a/ldap/servers/plugins/replication/windows_protocol_util.c
+++ b/ldap/servers/plugins/replication/windows_protocol_util.c
@@ -3477,6 +3477,7 @@ windows_create_local_entry(Private_Repl_Protocol *prp,Slapi_Entry *remote_entry,
pb = slapi_pblock_new();
slapi_add_entry_internal_set_pb(pb, local_entry, NULL,repl_get_plugin_identity(PLUGIN_MULTIMASTER_REPLICATION),0);
slapi_add_internal_pb(pb);
+ local_entry = NULL; /* consumed by add */
slapi_pblock_get(pb, SLAPI_PLUGIN_INTOP_RESULT, &retval);
if (retval) {
@@ -3484,6 +3485,7 @@ windows_create_local_entry(Private_Repl_Protocol *prp,Slapi_Entry *remote_entry,
"add operation of entry %s returned: %d\n", slapi_sdn_get_dn(local_sdn), retval);
}
error:
+ slapi_entry_free(local_entry);
slapi_ch_free_string(&guid_str);
if (pb)
{
| 0 |
f59ddfbcba763c20fb11afb4d42bf6c3f1f6cedd
|
389ds/389-ds-base
|
Issue 50276 - 389-ds-console is not built on RHEL8 if cockpit_dist is already present
Description: When we make srpm we want to make sure that 389-ds-console is built every time.
It is built only if it's not already there (clean up is required).
We should enforce the cockpit_dist building even if it's present.
https://pagure.io/389-ds-base/issue/50276
Reviewed by: mreynolds, vashirov (Thanks!)
|
commit f59ddfbcba763c20fb11afb4d42bf6c3f1f6cedd
Author: Simon Pichugin <[email protected]>
Date: Tue Mar 12 17:32:01 2019 +0100
Issue 50276 - 389-ds-console is not built on RHEL8 if cockpit_dist is already present
Description: When we make srpm we want to make sure that 389-ds-console is built every time.
It is built only if it's not already there (clean up is required).
We should enforce the cockpit_dist building even if it's present.
https://pagure.io/389-ds-base/issue/50276
Reviewed by: mreynolds, vashirov (Thanks!)
diff --git a/rpm.mk b/rpm.mk
index 494522962..fbb607685 100644
--- a/rpm.mk
+++ b/rpm.mk
@@ -13,7 +13,6 @@ JEMALLOC_URL ?= $(shell rpmspec -P $(RPMBUILD)/SPECS/389-ds-base.spec | awk '/^S
JEMALLOC_TARBALL ?= $(shell basename "$(JEMALLOC_URL)")
BUNDLE_JEMALLOC = 1
NODE_MODULES_TEST = src/cockpit/389-console/node_modules/webpack
-WEBPACK_TEST = src/cockpit/389-console/cockpit_dist/index.html
GIT_TAG = ${TAG}
# Some sanitizers are supported only by clang
@@ -37,7 +36,7 @@ clean:
$(NODE_MODULES_TEST):
cd src/cockpit/389-console; make -f node_modules.mk install
-$(WEBPACK_TEST): $(NODE_MODULES_TEST)
+build-cockpit: $(NODE_MODULES_TEST)
cd src/cockpit/389-console; make -f node_modules.mk build-cockpit-plugin
dist-bz2: $(NODE_MODULES_TEST)
@@ -53,7 +52,7 @@ dist-bz2: $(NODE_MODULES_TEST)
rm -rf node_modules; \
mv node_modules.release node_modules
-local-archive: $(WEBPACK_TEST)
+local-archive: build-cockpit
-mkdir -p dist/$(NAME_VERSION)
rsync -a --exclude=node_modules --exclude=dist --exclude=.git --exclude=rpmbuild . dist/$(NAME_VERSION)
| 0 |
04a0b6ac776a1d588ec2e10ff651e5015078ad21
|
389ds/389-ds-base
|
Issue 6229 - After an initial failure, subsequent online backups fail (#6230)
* Issue 6229 - After an initial failure, subsequent online backups will not work
Several issues related to backup task error handling:
Backends stay busy after the failure
Exit code is 0 in some cases
Crash if failing to open the backup directory
And a more general one:
lib389 Task DN collision
Solutions:
Always reset the busy flags that have been set
Ensure that 0 is not returned in error case
Avoid closing NULL directory descriptor
Use a timestamp having milliseconds precision to create the task DN
Issue: #6229
Reviewed by: @droideck (Thanks!)
|
commit 04a0b6ac776a1d588ec2e10ff651e5015078ad21
Author: progier389 <[email protected]>
Date: Fri Jun 28 18:56:49 2024 +0200
Issue 6229 - After an initial failure, subsequent online backups fail (#6230)
* Issue 6229 - After an initial failure, subsequent online backups will not work
Several issues related to backup task error handling:
Backends stay busy after the failure
Exit code is 0 in some cases
Crash if failing to open the backup directory
And a more general one:
lib389 Task DN collision
Solutions:
Always reset the busy flags that have been set
Ensure that 0 is not returned in error case
Avoid closing NULL directory descriptor
Use a timestamp having milliseconds precision to create the task DN
Issue: #6229
Reviewed by: @droideck (Thanks!)
diff --git a/dirsrvtests/tests/suites/backups/backup_test.py b/dirsrvtests/tests/suites/backups/backup_test.py
index 2637f1ae3..f1c34947e 100644
--- a/dirsrvtests/tests/suites/backups/backup_test.py
+++ b/dirsrvtests/tests/suites/backups/backup_test.py
@@ -10,11 +10,14 @@ import logging
import pytest
import os
import shutil
+import time
+import glob
from datetime import datetime
from lib389._constants import DEFAULT_SUFFIX, INSTALL_LATEST_CONFIG
from lib389.properties import BACKEND_SAMPLE_ENTRIES, TASK_WAIT
from lib389.topologies import topology_st as topo, topology_m2 as topo_m2
-from lib389.backend import Backend
+from lib389.backend import Backends, Backend
+from lib389.dbgen import dbgen_users
from lib389.tasks import BackupTask, RestoreTask
from lib389.config import BDB_LDBMConfig
from lib389 import DSEldif
@@ -31,6 +34,58 @@ else:
logging.getLogger(__name__).setLevel(logging.INFO)
log = logging.getLogger(__name__)
+
+
+BESTRUCT = [
+ { "bename" : "be1", "suffix": "dc=be1", "nbusers": 1000 },
+ { "bename" : "be2", "suffix": "dc=be2", "nbusers": 1000 },
+ { "bename" : "be3", "suffix": "dc=be3", "nbusers": 1000 },
+]
+
+
[email protected](scope="function")
+def mytopo(topo, request):
+ bes = []
+
+ def fin():
+ for be in bes:
+ be.delete()
+ for dir in glob.glob(f'{inst.ds_paths.backup_dir}/*'):
+ shutil.rmtree(dir)
+
+ if not DEBUGGING:
+ request.addfinalizer(fin)
+
+ inst = topo.standalone
+
+ ldif_files = {}
+ for d in BESTRUCT:
+ bename = d['bename']
+ suffix = d['suffix']
+ nbusers = d['nbusers']
+ log.info(f'Adding suffix: {suffix} and backend: {bename}...')
+ backends = Backends(inst)
+ try:
+ be = backends.create(properties={'nsslapd-suffix': suffix, 'name': bename})
+ # Insert at list head so that children backends get deleted before parent one.
+ bes.insert(0, be)
+ except ldap.UNWILLING_TO_PERFORM as e:
+ if str(e) == "Mapping tree for this suffix exists!":
+ pass
+ else:
+ raise e
+
+ ldif_dir = inst.get_ldif_dir()
+ ldif_files[bename] = os.path.join(ldif_dir, f'default_{bename}.ldif')
+ dbgen_users(inst, nbusers, ldif_files[bename], suffix)
+ inst.stop()
+ for d in BESTRUCT:
+ bename = d['bename']
+ inst.ldif2db(bename, None, None, None, ldif_files[bename])
+ inst.start()
+ return topo
+
+
def test_missing_backend(topo):
"""Test that an error is returned when a restore is performed for a
backend that is no longer present.
@@ -166,6 +221,61 @@ def test_replication(topo_m2):
repl.wait_for_replication(S1, S2)
+def test_backup_task_after_failure(mytopo):
+ """Test that new backup task is successful after a failure.
+ backend that is no longer present.
+
+ :id: a6c24898-2cd9-11ef-8c09-482ae39447e5
+ :setup: Standalone Instance with multiple backends
+ :steps:
+ 1. Cleanup
+ 2. Perform a back up
+ 3. Rename the backup directory while waiting for backup completion.
+ 4. Check that backup failed.
+ 5. Perform a back up
+ 6. Check that backup succeed.
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ 4. Backup should fail
+ 5. Success
+ 6. Backup should succeed
+ """
+
+ inst = mytopo.standalone
+ tasks = inst.tasks
+ archive_dir1 = f'{inst.ds_paths.backup_dir}/bak1'
+ archive_dir1b = f'{inst.ds_paths.backup_dir}/bak1b'
+ archive_dir2 = f'{inst.ds_paths.backup_dir}/bak2'
+
+ # Sometime the backup complete too fast, so lets retry if first
+ # backup is successful
+ for retry in range(50):
+ # Step 1. Perform cleanup
+ for dir in glob.glob(f'{inst.ds_paths.backup_dir}/*'):
+ shutil.rmtree(dir)
+ # Step 2. Perform a backup
+ tasks.db2bak(backup_dir=archive_dir1)
+ # Step 3. Wait until task is completed, trying to rename backup directory
+ done,exitCode,warningCode = (False, None, None)
+ while not done:
+ if os.path.isdir(archive_dir1):
+ os.rename(archive_dir1, archive_dir1b)
+ done,exitCode,warningCode = tasks.checkTask(tasks.entry)
+ time.sleep(0.01)
+ if exitCode != 0:
+ break
+ # Step 4. Check that backup failed.
+ # If next assert fails too often, that means that the backup is too fast
+ # A fix would would probably be to add more backends within mytopo
+ assert exitCode != 0, "Backup did not fail as expected."
+ # Step 5. Perform a seconf backup after backup failure
+ exitCode = tasks.db2bak(backup_dir=archive_dir2, args={TASK_WAIT: True})
+ # Step 6. Check it is successful
+ assert exitCode == 0, "Backup failed. Issue #6229 may not be fixed."
+
+
if __name__ == '__main__':
# Run isolated
# -s for DEBUG mode
diff --git a/ldap/servers/slapd/back-ldbm/archive.c b/ldap/servers/slapd/back-ldbm/archive.c
index 0460a42f6..6658cc80a 100644
--- a/ldap/servers/slapd/back-ldbm/archive.c
+++ b/ldap/servers/slapd/back-ldbm/archive.c
@@ -16,6 +16,8 @@
#include "back-ldbm.h"
#include "dblayer.h"
+#define NO_OBJECT ((Object*)-1)
+
int
ldbm_temporary_close_all_instances(Slapi_PBlock *pb)
{
@@ -270,6 +272,7 @@ ldbm_back_ldbm2archive(Slapi_PBlock *pb)
int run_from_cmdline = 0;
Slapi_Task *task;
struct stat sbuf;
+ Object *last_busy_inst_obj = NO_OBJECT;
slapi_pblock_get(pb, SLAPI_PLUGIN_PRIVATE, &li);
slapi_pblock_get(pb, SLAPI_SEQ_VAL, &rawdirectory);
@@ -380,13 +383,12 @@ ldbm_back_ldbm2archive(Slapi_PBlock *pb)
/* to avoid conflict w/ import, do this check for commandline, as well */
{
- Object *inst_obj, *inst_obj2;
ldbm_instance *inst = NULL;
/* server is up -- mark all backends busy */
- for (inst_obj = objset_first_obj(li->li_instance_set); inst_obj;
- inst_obj = objset_next_obj(li->li_instance_set, inst_obj)) {
- inst = (ldbm_instance *)object_get_data(inst_obj);
+ for (last_busy_inst_obj = objset_first_obj(li->li_instance_set); last_busy_inst_obj;
+ last_busy_inst_obj = objset_next_obj(li->li_instance_set, last_busy_inst_obj)) {
+ inst = (ldbm_instance *)object_get_data(last_busy_inst_obj);
/* check if an import/restore is already ongoing... */
if (instance_set_busy(inst) != 0 || dblayer_in_import(inst) != 0) {
@@ -400,20 +402,6 @@ ldbm_back_ldbm2archive(Slapi_PBlock *pb)
"another task and cannot be disturbed.",
inst->inst_name);
}
-
- /* painfully, we have to clear the BUSY flags on the
- * backends we'd already marked...
- */
- for (inst_obj2 = objset_first_obj(li->li_instance_set);
- inst_obj2 && (inst_obj2 != inst_obj);
- inst_obj2 = objset_next_obj(li->li_instance_set,
- inst_obj2)) {
- inst = (ldbm_instance *)object_get_data(inst_obj2);
- instance_set_not_busy(inst);
- }
- if (inst_obj2 && inst_obj2 != inst_obj)
- object_release(inst_obj2);
- object_release(inst_obj);
goto err;
}
}
@@ -427,18 +415,26 @@ ldbm_back_ldbm2archive(Slapi_PBlock *pb)
goto err;
}
- if (!run_from_cmdline) {
+err:
+ /* Clear all BUSY flags that have been previously set */
+ if (last_busy_inst_obj != NO_OBJECT) {
ldbm_instance *inst;
Object *inst_obj;
- /* none of these backends are busy anymore */
- for (inst_obj = objset_first_obj(li->li_instance_set); inst_obj;
+ for (inst_obj = objset_first_obj(li->li_instance_set);
+ inst_obj && (inst_obj != last_busy_inst_obj);
inst_obj = objset_next_obj(li->li_instance_set, inst_obj)) {
inst = (ldbm_instance *)object_get_data(inst_obj);
instance_set_not_busy(inst);
}
+ if (last_busy_inst_obj != NULL) {
+ /* release last seen object for aborted objset_next_obj iterations */
+ if (inst_obj != NULL) {
+ object_release(inst_obj);
+ }
+ object_release(last_busy_inst_obj);
+ }
}
-err:
if (return_value) {
if (dir_bak) {
slapi_log_err(SLAPI_LOG_ERR,
@@ -727,7 +723,10 @@ ldbm_archive_config(char *bakdir, Slapi_Task *task)
}
error:
- PR_CloseDir(dirhandle);
+ if (NULL != dirhandle) {
+ PR_CloseDir(dirhandle);
+ dirhandle = NULL;
+ }
dse_backup_unlock();
slapi_ch_free_string(&backup_config_dir);
slapi_ch_free_string(&dse_file);
diff --git a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.c b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.c
index 457c5ed60..35f8173a7 100644
--- a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.c
+++ b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.c
@@ -982,6 +982,9 @@ dbmdb_backup(struct ldbminfo *li, char *dest_dir, Slapi_Task *task)
if (ldbm_archive_config(dest_dir, task) != 0) {
slapi_log_err(SLAPI_LOG_ERR, "dbmdb_backup",
"Backup of config files failed or is incomplete\n");
+ if (0 == return_value) {
+ return_value = -1;
+ }
}
goto bail;
diff --git a/src/lib389/lib389/__init__.py b/src/lib389/lib389/__init__.py
index 9279bcc53..31c0c7f74 100644
--- a/src/lib389/lib389/__init__.py
+++ b/src/lib389/lib389/__init__.py
@@ -69,7 +69,7 @@ from lib389.utils import (
get_user_is_root)
from lib389.paths import Paths
from lib389.nss_ssl import NssSsl
-from lib389.tasks import BackupTask, RestoreTask
+from lib389.tasks import BackupTask, RestoreTask, Task
from lib389.dseldif import DSEldif
# mixin
@@ -1420,7 +1420,7 @@ class DirSrv(SimpleLDAPObject, object):
name, self.ds_paths.prefix)
# create the archive
- name = "backup_%s_%s.tar.gz" % (self.serverid, time.strftime("%m%d%Y_%H%M%S"))
+ name = "backup_%s_%s.tar.gz" % (self.serverid, Task.get_timestamp())
backup_file = os.path.join(backup_dir, name)
tar = tarfile.open(backup_file, "w:gz")
tar.extraction_filter = (lambda member, path: member)
@@ -2817,7 +2817,7 @@ class DirSrv(SimpleLDAPObject, object):
else:
# No output file specified. Use the default ldif location/name
cmd.append('-a')
- tnow = datetime.now().strftime("%Y_%m_%d_%H_%M_%S")
+ tnow = Task.get_timestamp()
if bename:
ldifname = os.path.join(self.ds_paths.ldif_dir, "%s-%s-%s.ldif" % (self.serverid, bename, tnow))
else:
@@ -2888,7 +2888,7 @@ class DirSrv(SimpleLDAPObject, object):
if archive_dir is None:
# Use the instance name and date/time as the default backup name
- tnow = datetime.now().strftime("%Y_%m_%d_%H_%M_%S")
+ tnow = Task.get_timestamp()
archive_dir = os.path.join(self.ds_paths.backup_dir, "%s-%s" % (self.serverid, tnow))
elif not archive_dir.startswith("/"):
# Relative path, append it to the bak directory
@@ -3510,7 +3510,7 @@ class DirSrv(SimpleLDAPObject, object):
if archive is None:
# Use the instance name and date/time as the default backup name
- tnow = datetime.now().strftime("%Y_%m_%d_%H_%M_%S")
+ tnow = Task.get_timestamp()
if self.serverid is not None:
backup_dir_name = "%s-%s" % (self.serverid, tnow)
else:
diff --git a/src/lib389/lib389/tasks.py b/src/lib389/lib389/tasks.py
index 193805780..c1a2e7aaa 100644
--- a/src/lib389/lib389/tasks.py
+++ b/src/lib389/lib389/tasks.py
@@ -118,7 +118,7 @@ class Task(DSLdapObject):
return super(Task, self).create(rdn, properties, basedn)
@staticmethod
- def _get_task_date():
+ def get_timestamp():
"""Return a timestamp to use in naming new task entries."""
return datetime.now().isoformat()
@@ -132,7 +132,7 @@ class AutomemberRebuildMembershipTask(Task):
"""
def __init__(self, instance, dn=None):
- self.cn = 'automember_rebuild_' + Task._get_task_date()
+ self.cn = 'automember_rebuild_' + Task.get_timestamp()
dn = "cn=" + self.cn + "," + DN_AUTOMEMBER_REBUILD_TASK
super(AutomemberRebuildMembershipTask, self).__init__(instance, dn)
@@ -147,7 +147,7 @@ class AutomemberAbortRebuildTask(Task):
"""
def __init__(self, instance, dn=None):
- self.cn = 'automember_abort_' + Task._get_task_date()
+ self.cn = 'automember_abort_' + Task.get_timestamp()
dn = "cn=" + self.cn + "," + DN_AUTOMEMBER_ABORT_REBUILD_TASK
super(AutomemberAbortRebuildTask, self).__init__(instance, dn)
@@ -161,7 +161,7 @@ class FixupLinkedAttributesTask(Task):
"""
def __init__(self, instance, dn=None):
- self.cn = 'fixup_linked_attrs_' + Task._get_task_date()
+ self.cn = 'fixup_linked_attrs_' + Task.get_timestamp()
dn = "cn=" + self.cn + "," + DN_FIXUP_LINKED_ATTIBUTES
super(FixupLinkedAttributesTask, self).__init__(instance, dn)
@@ -175,7 +175,7 @@ class MemberUidFixupTask(Task):
"""
def __init__(self, instance, dn=None):
- self.cn = 'memberUid_fixup_' + Task._get_task_date()
+ self.cn = 'memberUid_fixup_' + Task.get_timestamp()
dn = f"cn={self.cn},cn=memberuid task,cn=tasks,cn=config"
super(MemberUidFixupTask, self).__init__(instance, dn)
@@ -190,7 +190,7 @@ class MemberOfFixupTask(Task):
"""
def __init__(self, instance, dn=None):
- self.cn = 'memberOf_fixup_' + Task._get_task_date()
+ self.cn = 'memberOf_fixup_' + Task.get_timestamp()
dn = "cn=" + self.cn + "," + DN_MBO_TASK
super(MemberOfFixupTask, self).__init__(instance, dn)
@@ -205,7 +205,7 @@ class USNTombstoneCleanupTask(Task):
"""
def __init__(self, instance, dn=None):
- self.cn = 'usn_cleanup_' + Task._get_task_date()
+ self.cn = 'usn_cleanup_' + Task.get_timestamp()
dn = "cn=" + self.cn + ",cn=USN tombstone cleanup task," + DN_TASKS
super(USNTombstoneCleanupTask, self).__init__(instance, dn)
@@ -225,7 +225,7 @@ class csngenTestTask(Task):
"""
def __init__(self, instance, dn=None):
- self.cn = 'csngenTest_' + Task._get_task_date()
+ self.cn = 'csngenTest_' + Task.get_timestamp()
dn = "cn=" + self.cn + ",cn=csngen_test," + DN_TASKS
super(csngenTestTask, self).__init__(instance, dn)
@@ -238,7 +238,7 @@ class EntryUUIDFixupTask(Task):
"""
def __init__(self, instance, dn=None):
- self.cn = 'entryuuid_fixup_' + Task._get_task_date()
+ self.cn = 'entryuuid_fixup_' + Task.get_timestamp()
dn = "cn=" + self.cn + "," + DN_EUUID_TASK
super(EntryUUIDFixupTask, self).__init__(instance, dn)
self._must_attributes.extend(['basedn'])
@@ -252,7 +252,7 @@ class DBCompactTask(Task):
"""
def __init__(self, instance, dn=None):
- self.cn = 'compact_db_' + Task._get_task_date()
+ self.cn = 'compact_db_' + Task.get_timestamp()
dn = "cn=" + self.cn + "," + DN_COMPACTDB_TASK
super(DBCompactTask, self).__init__(instance, dn)
@@ -265,7 +265,7 @@ class SchemaReloadTask(Task):
"""
def __init__(self, instance, dn=None):
- self.cn = 'schema_reload_' + Task._get_task_date()
+ self.cn = 'schema_reload_' + Task.get_timestamp()
dn = "cn=" + self.cn + ",cn=schema reload task," + DN_TASKS
super(SchemaReloadTask, self).__init__(instance, dn)
@@ -278,7 +278,7 @@ class SyntaxValidateTask(Task):
"""
def __init__(self, instance, dn=None):
- self.cn = 'syntax_validate_' + Task._get_task_date()
+ self.cn = 'syntax_validate_' + Task.get_timestamp()
dn = f"cn={self.cn},cn=syntax validate,cn=tasks,cn=config"
super(SyntaxValidateTask, self).__init__(instance, dn)
@@ -295,7 +295,7 @@ class AbortCleanAllRUVTask(Task):
"""
def __init__(self, instance, dn=None):
- self.cn = 'abortcleanallruv_' + Task._get_task_date()
+ self.cn = 'abortcleanallruv_' + Task.get_timestamp()
dn = "cn=" + self.cn + ",cn=abort cleanallruv," + DN_TASKS
super(AbortCleanAllRUVTask, self).__init__(instance, dn)
@@ -312,7 +312,7 @@ class CleanAllRUVTask(Task):
"""
def __init__(self, instance, dn=None):
- self.cn = 'cleanallruv_' + Task._get_task_date()
+ self.cn = 'cleanallruv_' + Task.get_timestamp()
dn = "cn=" + self.cn + ",cn=cleanallruv," + DN_TASKS
self._properties = None
@@ -359,7 +359,7 @@ class ImportTask(Task):
"""
def __init__(self, instance, dn=None):
- self.cn = 'import_' + Task._get_task_date()
+ self.cn = 'import_' + Task.get_timestamp()
dn = "cn=%s,%s" % (self.cn, DN_IMPORT_TASK)
self._properties = None
@@ -388,7 +388,7 @@ class ExportTask(Task):
"""
def __init__(self, instance, dn=None):
- self.cn = 'export_' + Task._get_task_date()
+ self.cn = 'export_' + Task.get_timestamp()
dn = "cn=%s,%s" % (self.cn, DN_EXPORT_TASK)
self._properties = None
@@ -411,7 +411,7 @@ class BackupTask(Task):
"""
def __init__(self, instance, dn=None):
- self.cn = 'backup_' + Task._get_task_date()
+ self.cn = 'backup_' + Task.get_timestamp()
dn = "cn=" + self.cn + ",cn=backup," + DN_TASKS
self._properties = None
@@ -426,7 +426,7 @@ class RestoreTask(Task):
"""
def __init__(self, instance, dn=None):
- self.cn = 'restore_' + Task._get_task_date()
+ self.cn = 'restore_' + Task.get_timestamp()
dn = "cn=" + self.cn + ",cn=restore," + DN_TASKS
self._properties = None
@@ -513,7 +513,7 @@ class Tasks(object):
raise ValueError("Import file (%s) does not exist" % input_file)
# Prepare the task entry
- cn = "import_" + time.strftime("%m%d%Y_%H%M%S", time.localtime())
+ cn = "import_" + Task.get_timestamp()
dn = "cn=%s,%s" % (cn, DN_IMPORT_TASK)
entry = Entry(dn)
entry.setValues('objectclass', 'top', 'extensibleObject')
@@ -581,7 +581,7 @@ class Tasks(object):
raise ValueError("output_file is mandatory")
# Prepare the task entry
- cn = "export_" + time.strftime("%m%d%Y_%H%M%S", time.localtime())
+ cn = "export_" + Task.get_timestamp()
dn = "cn=%s,%s" % (cn, DN_EXPORT_TASK)
entry = Entry(dn)
entry.update({
@@ -637,7 +637,7 @@ class Tasks(object):
raise ValueError("You must specify a backup directory.")
# build the task entry
- cn = "backup_" + time.strftime("%m%d%Y_%H%M%S", time.localtime())
+ cn = "backup_" + Task.get_timestamp()
dn = "cn=%s,%s" % (cn, DN_BACKUP_TASK)
entry = Entry(dn)
entry.update({
@@ -694,7 +694,7 @@ class Tasks(object):
raise ValueError("Backup file (%s) does not exist" % backup_dir)
# build the task entry
- cn = "restore_" + time.strftime("%m%d%Y_%H%M%S", time.localtime())
+ cn = "restore_" + Task.get_timestamp()
dn = "cn=%s,%s" % (cn, DN_RESTORE_TASK)
entry = Entry(dn)
entry.update({
@@ -789,7 +789,7 @@ class Tasks(object):
attrs.append(attr)
else:
attrs.append(attrname)
- cn = "index_vlv_%s" % (time.strftime("%m%d%Y_%H%M%S", time.localtime()))
+ cn = "index_vlv_%s" % (Task.get_timestamp())
dn = "cn=%s,%s" % (cn, DN_INDEX_TASK)
entry = Entry(dn)
entry.update({
@@ -803,7 +803,7 @@ class Tasks(object):
#
# Reindex all attributes - gather them first...
#
- cn = "index_all_%s" % (time.strftime("%m%d%Y_%H%M%S", time.localtime()))
+ cn = "index_all_%s" % (Task.get_timestamp())
dn = ('cn=%s,cn=ldbm database,cn=plugins,cn=config' % backend)
try:
indexes = self.conn.search_s(dn, ldap.SCOPE_SUBTREE, '(objectclass=nsIndex)')
@@ -815,7 +815,7 @@ class Tasks(object):
#
# Reindex specific attributes
#
- cn = "index_attrs_%s" % (time.strftime("%m%d%Y_%H%M%S", time.localtime()))
+ cn = "index_attrs_%s" % (Task.get_timestamp())
if isinstance(attrname, (tuple, list)):
# Need to guarantee this is a list (and not a tuple)
for attr in attrname:
@@ -903,8 +903,7 @@ class Tasks(object):
suffix = ents[0].getValue(attr)
- cn = "fixupmemberof_" + time.strftime("%m%d%Y_%H%M%S",
- time.localtime())
+ cn = "fixupmemberof_" + Task.get_timestamp()
dn = "cn=%s,%s" % (cn, DN_MBO_TASK)
entry = Entry(dn)
entry.setValues('objectclass', 'top', 'extensibleObject')
@@ -965,8 +964,7 @@ class Tasks(object):
if len(ents) != 1:
raise ValueError("invalid backend name: %s" % bename)
- cn = "fixupTombstone_" + time.strftime("%m%d%Y_%H%M%S",
- time.localtime())
+ cn = "fixupTombstone_" + Task.get_timestamp()
dn = "cn=%s,%s" % (cn, DN_TOMB_FIXUP_TASK)
entry = Entry(dn)
entry.setValues('objectclass', 'top', 'extensibleObject')
@@ -1019,7 +1017,7 @@ class Tasks(object):
@return exit code
'''
- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime())
+ cn = 'task-' + Task.get_timestamp()
dn = ('cn=%s,cn=automember rebuild membership,cn=tasks,cn=config' % cn)
entry = Entry(dn)
@@ -1077,7 +1075,7 @@ class Tasks(object):
if not ldif_out:
raise ValueError("Missing ldif_out")
- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime())
+ cn = 'task-' + Task.get_timestamp()
dn = ('cn=%s,cn=automember export updates,cn=tasks,cn=config' % cn)
entry = Entry(dn)
entry.setValues('objectclass', 'top', 'extensibleObject')
@@ -1129,7 +1127,7 @@ class Tasks(object):
if not ldif_out or not ldif_in:
raise ValueError("Missing ldif_out and/or ldif_in")
- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime())
+ cn = 'task-' + Task.get_timestamp()
dn = ('cn=%s,cn=automember map updates,cn=tasks,cn=config' % cn)
entry = Entry(dn)
@@ -1175,7 +1173,7 @@ class Tasks(object):
@return exit code
'''
- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime())
+ cn = 'task-' + Task.get_timestamp()
dn = ('cn=%s,cn=fixup linked attributes,cn=tasks,cn=config' % cn)
entry = Entry(dn)
entry.setValues('objectclass', 'top', 'extensibleObject')
@@ -1219,7 +1217,7 @@ class Tasks(object):
@return exit code
'''
- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime())
+ cn = 'task-' + Task.get_timestamp()
dn = ('cn=%s,cn=schema reload task,cn=tasks,cn=config' % cn)
entry = Entry(dn)
entry.setValues('objectclass', 'top', 'extensibleObject')
@@ -1264,7 +1262,7 @@ class Tasks(object):
@return exit code
'''
- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime())
+ cn = 'task-' + Task.get_timestamp()
dn = ('cn=%s,cn=memberuid task,cn=tasks,cn=config' % cn)
entry = Entry(dn)
entry.setValues('objectclass', 'top', 'extensibleObject')
@@ -1311,7 +1309,7 @@ class Tasks(object):
@return exit code
'''
- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime())
+ cn = 'task-' + Task.get_timestamp()
dn = ('cn=%s,cn=syntax validate,cn=tasks,cn=config' % cn)
entry = Entry(dn)
entry.setValues('objectclass', 'top', 'extensibleObject')
@@ -1358,7 +1356,7 @@ class Tasks(object):
@return exit code
'''
- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime())
+ cn = 'task-' + Task.get_timestamp()
dn = ('cn=%s,cn=USN tombstone cleanup task,cn=tasks,cn=config' % cn)
entry = Entry(dn)
entry.setValues('objectclass', 'top', 'extensibleObject')
@@ -1413,7 +1411,7 @@ class Tasks(object):
if not configfile:
raise ValueError("Missing required paramter: configfile")
- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime())
+ cn = 'task-' + Task.get_timestamp()
dn = ('cn=%s,cn=sysconfig reload,cn=tasks,cn=config' % cn)
entry = Entry(dn)
entry.setValues('objectclass', 'top', 'extensibleObject')
@@ -1464,7 +1462,7 @@ class Tasks(object):
if not suffix:
raise ValueError("Missing required paramter: suffix")
- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime())
+ cn = 'task-' + Task.get_timestamp()
dn = ('cn=%s,cn=cleanallruv,cn=tasks,cn=config' % cn)
entry = Entry(dn)
entry.setValues('objectclass', 'top', 'extensibleObject')
@@ -1516,7 +1514,7 @@ class Tasks(object):
if not suffix:
raise ValueError("Missing required paramter: suffix")
- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime())
+ cn = 'task-' + Task.get_timestamp()
dn = ('cn=%s,cn=abort cleanallruv,cn=tasks,cn=config' % cn)
entry = Entry(dn)
entry.setValues('objectclass', 'top', 'extensibleObject')
@@ -1571,7 +1569,7 @@ class Tasks(object):
if not nsArchiveDir:
raise ValueError("Missing required paramter: nsArchiveDir")
- cn = 'task-' + time.strftime("%m%d%Y_%H%M%S", time.localtime())
+ cn = 'task-' + Task.get_timestamp()
dn = ('cn=%s,cn=upgradedb,cn=tasks,cn=config' % cn)
entry = Entry(dn)
entry.setValues('objectclass', 'top', 'extensibleObject')
@@ -1616,6 +1614,6 @@ class LDAPIMappingReloadTask(Task):
"""
def __init__(self, instance, dn=None):
- self.cn = 'reload-' + Task._get_task_date()
+ self.cn = 'reload-' + Task.get_timestamp()
dn = f'cn={self.cn},cn=reload ldapi mappings,cn=tasks,cn=config'
super(LDAPIMappingReloadTask, self).__init__(instance, dn)
| 0 |
54b941dea5b595302c47ed2ebecf8ec30dc76050
|
389ds/389-ds-base
|
Issue 49990 - Need to enforce a hard maximum limit for file descriptors
Description: on some platforms the maximum FD limit is high it can cause
a OOM at server startup. So we need to add a hard maximum
limit.
relates: https://pagure.io/389-ds-base/issue/49990
Reviewed by: firstyear & tbordaz (Thanks!!)
|
commit 54b941dea5b595302c47ed2ebecf8ec30dc76050
Author: Mark Reynolds <[email protected]>
Date: Thu Jan 23 14:38:13 2020 -0500
Issue 49990 - Need to enforce a hard maximum limit for file descriptors
Description: on some platforms the maximum FD limit is high it can cause
a OOM at server startup. So we need to add a hard maximum
limit.
relates: https://pagure.io/389-ds-base/issue/49990
Reviewed by: firstyear & tbordaz (Thanks!!)
diff --git a/ldap/servers/slapd/libglobs.c b/ldap/servers/slapd/libglobs.c
index 6c0b018bb..41c9bbd6b 100644
--- a/ldap/servers/slapd/libglobs.c
+++ b/ldap/servers/slapd/libglobs.c
@@ -1579,7 +1579,9 @@ FrontendConfig_init(void)
#endif
/* Default the maximum fd's to the maximum allowed */
if (getrlimit(RLIMIT_NOFILE, &rlp) == 0) {
- maxdescriptors = (int64_t)rlp.rlim_max;
+ if ((int64_t)rlp.rlim_max < SLAPD_DEFAULT_MAXDESCRIPTORS) {
+ maxdescriptors = (int64_t)rlp.rlim_max;
+ }
}
/* Take the lock to make sure we barrier correctly. */
@@ -4355,7 +4357,7 @@ config_set_maxdescriptors(const char *attrname, char *value, char *errorbuf, int
{
int32_t retVal = LDAP_SUCCESS;
int64_t nValue = 0;
- int64_t maxVal = 524288;
+ int64_t maxVal = SLAPD_DEFAULT_MAXDESCRIPTORS;
struct rlimit rlp;
char *endp = NULL;
@@ -4366,7 +4368,9 @@ config_set_maxdescriptors(const char *attrname, char *value, char *errorbuf, int
}
if (0 == getrlimit(RLIMIT_NOFILE, &rlp)) {
- maxVal = (int)rlp.rlim_max;
+ if ((int64_t)rlp.rlim_max < maxVal) {
+ maxVal = (int64_t)rlp.rlim_max;
+ }
}
errno = 0;
diff --git a/ldap/servers/slapd/slap.h b/ldap/servers/slapd/slap.h
index 06bf11804..1faa02edb 100644
--- a/ldap/servers/slapd/slap.h
+++ b/ldap/servers/slapd/slap.h
@@ -350,8 +350,8 @@ typedef void (*VFPV)(); /* takes undefined arguments */
#define SLAPD_DEFAULT_PAGEDSIZELIMIT 0
#define SLAPD_DEFAULT_PAGEDSIZELIMIT_STR "0"
-#define SLAPD_DEFAULT_MAXDESCRIPTORS 8192
-#define SLAPD_DEFAULT_MAXDESCRIPTORS_STR "8192"
+#define SLAPD_DEFAULT_MAXDESCRIPTORS 1048576
+#define SLAPD_DEFAULT_MAXDESCRIPTORS_STR "1048576"
#define SLAPD_DEFAULT_MAX_FILTER_NEST_LEVEL 40
#define SLAPD_DEFAULT_MAX_FILTER_NEST_LEVEL_STR "40"
#define SLAPD_DEFAULT_GROUPEVALNESTLEVEL 0
| 0 |
8808f1a9e85aeb44942312f494a1a8b7a0e5ded1
|
389ds/389-ds-base
|
Ticket 47652 - replica add fails: MT.list return a list not an entry
Reviewed by: Nathan Kinder
|
commit 8808f1a9e85aeb44942312f494a1a8b7a0e5ded1
Author: Thierry bordaz (tbordaz) <[email protected]>
Date: Fri Dec 20 18:46:23 2013 +0100
Ticket 47652 - replica add fails: MT.list return a list not an entry
Reviewed by: Nathan Kinder
diff --git a/src/lib389/lib389/brooker.py b/src/lib389/lib389/brooker.py
index 26405a572..3d430094b 100644
--- a/src/lib389/lib389/brooker.py
+++ b/src/lib389/lib389/brooker.py
@@ -681,7 +681,8 @@ class Replica(object):
# create replica entry in mapping-tree
nsuffix = normalizeDN(suffix)
- mtent = self.conn.mappingtree.list(suffix=suffix)
+ mtents = self.conn.mappingtree.list(suffix=suffix)
+ mtent = mtents[0]
dn_replica = ','.join(("cn=replica", mtent.dn))
try:
entry = self.conn.getEntry(dn_replica, ldap.SCOPE_BASE)
| 0 |
1b26ed9a1ed9a8b2b0e91e4cbad37ec1ad2f604c
|
389ds/389-ds-base
|
Issue 6193 - Test failure: test_tls_command_returns_error_text
Bug Description:
openssl changed error message in
https://github.com/openssl/openssl/commit/fedab100a4b8f4c3b81de632f29c159fb46ac3f2
Fix Description:
Adjust assert to use regex for different messages.
Fixes: https://github.com/389ds/389-ds-base/issues/6193
Reviewed by: @progier389 (Thanks!)
|
commit 1b26ed9a1ed9a8b2b0e91e4cbad37ec1ad2f604c
Author: Viktor Ashirov <[email protected]>
Date: Fri May 31 14:00:53 2024 +0200
Issue 6193 - Test failure: test_tls_command_returns_error_text
Bug Description:
openssl changed error message in
https://github.com/openssl/openssl/commit/fedab100a4b8f4c3b81de632f29c159fb46ac3f2
Fix Description:
Adjust assert to use regex for different messages.
Fixes: https://github.com/389ds/389-ds-base/issues/6193
Reviewed by: @progier389 (Thanks!)
diff --git a/dirsrvtests/tests/suites/clu/dsctl_tls_test.py b/dirsrvtests/tests/suites/clu/dsctl_tls_test.py
index 22360fa91..9ce5b4e2a 100644
--- a/dirsrvtests/tests/suites/clu/dsctl_tls_test.py
+++ b/dirsrvtests/tests/suites/clu/dsctl_tls_test.py
@@ -10,6 +10,7 @@ import logging
import pytest
import ssl
import os
+import re
from lib389.topologies import topology_st as topo
from lib389.nss_ssl import NssSsl
@@ -79,9 +80,10 @@ def test_tls_command_returns_error_text(topo):
except ValueError as e:
assert '255' not in str(e)
if 'OpenSSL 3' in ssl.OPENSSL_VERSION:
- assert 'Could not read private key from' in str(e)
+ error_message = r"Could not (read|find) private key from"
else:
- assert 'unable to load private key' in str(e)
+ error_message = r"unable to load private key"
+ assert re.search(error_message, str(e))
if __name__ == '__main__':
| 0 |
ec4ba307c7800ed96a623baa042f207ff0c78b11
|
389ds/389-ds-base
|
Resolves: bug 339041
Bug Description: migration : encryption key entries missing when source is 6.21
Reviewed by: self
Fix Description: Just always create those entries when creating the backend. The server does this at startup, but I guess for this case that occurs too late in the startup process.
Platforms tested: RHEL5 x86_64
Flag Day: no
Doc impact: no
QA impact: should be covered by regular nightly and manual testing
New Tests integrated into TET: none
|
commit ec4ba307c7800ed96a623baa042f207ff0c78b11
Author: Rich Megginson <[email protected]>
Date: Fri Oct 19 03:17:56 2007 +0000
Resolves: bug 339041
Bug Description: migration : encryption key entries missing when source is 6.21
Reviewed by: self
Fix Description: Just always create those entries when creating the backend. The server does this at startup, but I guess for this case that occurs too late in the startup process.
Platforms tested: RHEL5 x86_64
Flag Day: no
Doc impact: no
QA impact: should be covered by regular nightly and manual testing
New Tests integrated into TET: none
diff --git a/ldap/ldif/template-suffix-db.ldif.in b/ldap/ldif/template-suffix-db.ldif.in
index f30ab8cec..1b9782667 100644
--- a/ldap/ldif/template-suffix-db.ldif.in
+++ b/ldap/ldif/template-suffix-db.ldif.in
@@ -7,6 +7,16 @@ objectclass: nsBackendInstance
nsslapd-suffix: %ds_suffix%
cn: %ds_bename%
+dn: cn=encrypted attribute keys,cn=%ds_bename%,cn=ldbm database,cn=plugins,cn=config
+objectClass: top
+objectClass: extensibleObject
+cn: encrypted attributes keys
+
+dn: cn=encrypted attributes,cn=%ds_bename%,cn=ldbm database,cn=plugins,cn=config
+objectClass: top
+objectClass: extensibleObject
+cn: encrypted attributes
+
dn: cn="%ds_suffix%",cn=mapping tree,cn=config
objectclass: top
objectclass: extensibleObject
| 0 |
3d92679cf97518aedcf6534ac5967edf8d2c9d28
|
389ds/389-ds-base
|
Ticket bz1358565 - clear and unsalted password types are vulnerable to timing attack
Description: Fixing a compiler warning introduced by commit
f0e03b5a51972a125fe78f448d1f68e288782d1e.
(cherry picked from commit c62ea0c98445d31fb55baebe9778fe860b3266ea)
|
commit 3d92679cf97518aedcf6534ac5967edf8d2c9d28
Author: Noriko Hosoi <[email protected]>
Date: Mon Aug 8 10:12:33 2016 -0700
Ticket bz1358565 - clear and unsalted password types are vulnerable to timing attack
Description: Fixing a compiler warning introduced by commit
f0e03b5a51972a125fe78f448d1f68e288782d1e.
(cherry picked from commit c62ea0c98445d31fb55baebe9778fe860b3266ea)
diff --git a/ldap/servers/plugins/pwdstorage/clear_pwd.c b/ldap/servers/plugins/pwdstorage/clear_pwd.c
index 84dac2a5a..b9b362d34 100644
--- a/ldap/servers/plugins/pwdstorage/clear_pwd.c
+++ b/ldap/servers/plugins/pwdstorage/clear_pwd.c
@@ -25,7 +25,37 @@
int
clear_pw_cmp( const char *userpwd, const char *dbpwd )
{
- return( strcmp( userpwd, dbpwd ));
+ int result = 0;
+ int len_user = strlen(userpwd);
+ int len_dbp = strlen(dbpwd);
+ if ( len_user != len_dbp ) {
+ result = 1;
+ }
+ /* We have to do this comparison ANYWAY else we have a length timing attack. */
+ if ( len_user >= len_dbp ) {
+ /*
+ * If they are the same length, result will be 0 here, and if we pass
+ * the check, we don't update result either. IE we pass.
+ * However, even if the first part of userpw matches dbpwd, but len !=, we
+ * have already failed anyawy. This prevents substring matching.
+ */
+ if (slapi_ct_memcmp(userpwd, dbpwd, len_dbp) != 0) {
+ result = 1;
+ }
+ } else {
+ /*
+ * If we stretched the userPassword, we'll allow a new timing attack, where
+ * if we see a delay on a short pw, we know we are stretching.
+ * when the delay goes away, it means we've found the length.
+ * Instead, because we don't want to use the short pw for comp, we just compare
+ * dbpwd to itself. We have already got result == 1 if we are here, so we are
+ * just trying to take up time!
+ */
+ if (slapi_ct_memcmp(dbpwd, dbpwd, len_dbp)) {
+ /* Do nothing, we have the if to fix a coverity check. */
+ }
+ }
+ return result;
}
char *
| 0 |
09f23e38d394b87f7034927256ff52d3430131a3
|
389ds/389-ds-base
|
Ticket 58358 - Update spec file with pre-release versioning
Description: Update the spec file for Fedora Packaging guidelines
|
commit 09f23e38d394b87f7034927256ff52d3430131a3
Author: Mark Reynolds <[email protected]>
Date: Wed Dec 2 12:08:41 2015 -0500
Ticket 58358 - Update spec file with pre-release versioning
Description: Update the spec file for Fedora Packaging guidelines
diff --git a/src/lib389/lib389.spec b/src/lib389/lib389.spec
index bbe350e3a..d3431d693 100644
--- a/src/lib389/lib389.spec
+++ b/src/lib389/lib389.spec
@@ -3,13 +3,13 @@
%define name lib389
%define version 1.0.1
-%define release 1
+%define prerel 1
Summary: A library for accessing, testing, and configuring the 389 Directory Server
Name: %{name}
Version: %{version}
-Release: %{release}%{?dist}
-Source0: http://port389.org/binaries/%{name}-%{version}.tar.bz2
+Release: %{prerel}%{?dist}
+Source0: http://port389.org/binaries/%{name}-%{version}-%{prerel}.tar.bz2
License: GPLv3+
Group: Development/Libraries
BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n)
@@ -28,7 +28,7 @@ configuring the 389 Directory Server.
%prep
%setup -qc
-mv %{name}-%{version} python2
+mv %{name}-%{version}-%{prerel} python2
%build
pushd python2
| 0 |
1c790df928ec3b0644584b3a6ad9ae737e1678b0
|
389ds/389-ds-base
|
Ticket 49086 - SDN premangaling broken after SASL change
Bug Description: SDN premangaling is broken after pw_verify change.
This was because we move the verification code to pw_verify, that
requires a target_address *and* a target_spec to look up what backend
we are about to select.
Fix Description: After a pre_bind, reset the target spec.
https://pagure.io/389-ds-base/issue/49086
Author: wibrown
Review by: mreynolds (Thanks!)
|
commit 1c790df928ec3b0644584b3a6ad9ae737e1678b0
Author: William Brown <[email protected]>
Date: Fri Feb 17 16:28:53 2017 +1000
Ticket 49086 - SDN premangaling broken after SASL change
Bug Description: SDN premangaling is broken after pw_verify change.
This was because we move the verification code to pw_verify, that
requires a target_address *and* a target_spec to look up what backend
we are about to select.
Fix Description: After a pre_bind, reset the target spec.
https://pagure.io/389-ds-base/issue/49086
Author: wibrown
Review by: mreynolds (Thanks!)
diff --git a/ldap/servers/slapd/bind.c b/ldap/servers/slapd/bind.c
index d8811d58b..b4bb36340 100644
--- a/ldap/servers/slapd/bind.c
+++ b/ldap/servers/slapd/bind.c
@@ -626,6 +626,7 @@ do_bind( Slapi_PBlock *pb )
rc = 0;
/* Check if a pre_bind plugin mapped the DN to another backend */
+ Slapi_DN *target_spec_sdn = operation_get_target_spec(pb->pb_op);
Slapi_DN *pb_sdn;
slapi_pblock_get(pb, SLAPI_BIND_TARGET_SDN, &pb_sdn);
if (!pb_sdn) {
@@ -644,6 +645,14 @@ do_bind( Slapi_PBlock *pb )
goto free_and_return;
}
+ /* pagure 49086, it's too hard to actually try and merge the two SDN values
+ * without corrupting them. As a result, we need to update the target spec instead.
+ *
+ * It's really important that when we start to rethink pblock, that we kill this with fire.
+ */
+ slapi_sdn_free(&target_spec_sdn);
+ operation_set_target_spec(pb->pb_op, pb_sdn);
+
/* We could be serving multiple database backends. Select the appropriate one */
/* pw_verify_be_dn will select the backend we need for us. */
| 0 |
42e2df3858a4e14706d57b5c907d1d3768f4d970
|
389ds/389-ds-base
|
Ticket 47981 - COS cache doesn't properly mark vattr cache as
invalid when there are multiple suffixes
Bug Description: When rebuilding the COS cache, we check each suffix for COS entries.
If the last suffix checked does not contain any COS entries, then the
virtual attribute cache is incorrectly not invalidated. This allows
for already cached entries to hold onto the old COS attributes/values.
Fix Description: Only set the vattr_cacheable flag if a suffix contains COS entries, not
if it does not - by default the flag is not set.
https://fedorahosted.org/389/ticket/47981
Reviewed by: nhosoi(Thanks!)
|
commit 42e2df3858a4e14706d57b5c907d1d3768f4d970
Author: Mark Reynolds <[email protected]>
Date: Wed Jan 7 08:59:06 2015 -0500
Ticket 47981 - COS cache doesn't properly mark vattr cache as
invalid when there are multiple suffixes
Bug Description: When rebuilding the COS cache, we check each suffix for COS entries.
If the last suffix checked does not contain any COS entries, then the
virtual attribute cache is incorrectly not invalidated. This allows
for already cached entries to hold onto the old COS attributes/values.
Fix Description: Only set the vattr_cacheable flag if a suffix contains COS entries, not
if it does not - by default the flag is not set.
https://fedorahosted.org/389/ticket/47981
Reviewed by: nhosoi(Thanks!)
diff --git a/dirsrvtests/tickets/ticket47981_test.py b/dirsrvtests/tickets/ticket47981_test.py
new file mode 100644
index 000000000..2a16ce603
--- /dev/null
+++ b/dirsrvtests/tickets/ticket47981_test.py
@@ -0,0 +1,345 @@
+import os
+import sys
+import time
+import ldap
+import ldap.sasl
+import logging
+import socket
+import pytest
+from lib389 import DirSrv, Entry, tools, tasks
+from lib389.tools import DirSrvTools
+from lib389._constants import *
+from lib389.properties import *
+from lib389.tasks import *
+from constants import *
+
+log = logging.getLogger(__name__)
+
+installation_prefix = None
+
+BRANCH = 'ou=people,' + DEFAULT_SUFFIX
+USER_DN = 'uid=user1,%s' % (BRANCH)
+BRANCH_CONTAINER = 'cn=nsPwPolicyContainer,ou=people,dc=example,dc=com'
+BRANCH_COS_DEF = 'cn=nsPwPolicy_CoS,ou=people,dc=example,dc=com'
+BRANCH_PWP = 'cn=cn\\3DnsPwPolicyEntry\\2Cou\\3DPeople\\2Cdc\\3Dexample\\2Cdc\\3Dcom,' + \
+ 'cn=nsPwPolicyContainer,ou=People,dc=example,dc=com'
+BRANCH_COS_TMPL = 'cn=cn\\3DnsPwTemplateEntry\\2Cou\\3DPeople\\2Cdc\\3Dexample\\2Cdc\\3Dcom,' + \
+ 'cn=nsPwPolicyContainer,ou=People,dc=example,dc=com'
+SECOND_SUFFIX = 'o=netscaperoot'
+BE_NAME = 'netscaperoot'
+
+
+class TopologyStandalone(object):
+ def __init__(self, standalone):
+ standalone.open()
+ self.standalone = standalone
+
+
[email protected](scope="module")
+def topology(request):
+ '''
+ This fixture is used to standalone topology for the 'module'.
+ At the beginning, It may exists a standalone instance.
+ It may also exists a backup for the standalone instance.
+
+ Principle:
+ If standalone instance exists:
+ restart it
+ If backup of standalone exists:
+ create/rebind to standalone
+
+ restore standalone instance from backup
+ else:
+ Cleanup everything
+ remove instance
+ remove backup
+ Create instance
+ Create backup
+ '''
+ global installation_prefix
+
+ if installation_prefix:
+ args_instance[SER_DEPLOYED_DIR] = installation_prefix
+
+ standalone = DirSrv(verbose=False)
+
+ # Args for the standalone instance
+ args_instance[SER_HOST] = HOST_STANDALONE
+ args_instance[SER_PORT] = PORT_STANDALONE
+ args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
+ args_standalone = args_instance.copy()
+ standalone.allocate(args_standalone)
+
+ # Get the status of the backups
+ backup_standalone = standalone.checkBackupFS()
+
+ # Get the status of the instance and restart it if it exists
+ instance_standalone = standalone.exists()
+ if instance_standalone:
+ # assuming the instance is already stopped, just wait 5 sec max
+ standalone.stop(timeout=5)
+ standalone.start(timeout=10)
+
+ if backup_standalone:
+ # The backup exist, assuming it is correct
+ # we just re-init the instance with it
+ if not instance_standalone:
+ standalone.create()
+ # Used to retrieve configuration information (dbdir, confdir...)
+ standalone.open()
+
+ # restore standalone instance from backup
+ standalone.stop(timeout=10)
+ standalone.restoreFS(backup_standalone)
+ standalone.start(timeout=10)
+
+ else:
+ # We should be here only in two conditions
+ # - This is the first time a test involve standalone instance
+ # - Something weird happened (instance/backup destroyed)
+ # so we discard everything and recreate all
+
+ # Remove the backup. So even if we have a specific backup file
+ # (e.g backup_standalone) we clear backup that an instance may have created
+ if backup_standalone:
+ standalone.clearBackupFS()
+
+ # Remove the instance
+ if instance_standalone:
+ standalone.delete()
+
+ # Create the instance
+ standalone.create()
+
+ # Used to retrieve configuration information (dbdir, confdir...)
+ standalone.open()
+
+ # Time to create the backups
+ standalone.stop(timeout=10)
+ standalone.backupfile = standalone.backupFS()
+ standalone.start(timeout=10)
+
+ # clear the tmp directory
+ standalone.clearTmpDir(__file__)
+
+ #
+ # Here we have standalone instance up and running
+ # Either coming from a backup recovery
+ # or from a fresh (re)init
+ # Time to return the topology
+ return TopologyStandalone(standalone)
+
+
+def addSubtreePwPolicy(inst):
+ #
+ # Add subtree policy to the people branch
+ #
+ try:
+ inst.add_s(Entry((BRANCH_CONTAINER, {
+ 'objectclass': 'top nsContainer'.split(),
+ 'cn': 'nsPwPolicyContainer'
+ })))
+ except ldap.LDAPError, e:
+ log.error('Failed to add subtree container for ou=people: error ' + e.message['desc'])
+ assert False
+
+ # Add the password policy subentry
+ try:
+ inst.add_s(Entry((BRANCH_PWP, {
+ 'objectclass': 'top ldapsubentry passwordpolicy'.split(),
+ 'cn': 'cn=nsPwPolicyEntry,ou=people,dc=example,dc=com',
+ 'passwordMustChange': 'off',
+ 'passwordExp': 'off',
+ 'passwordHistory': 'off',
+ 'passwordMinAge': '0',
+ 'passwordChange': 'off',
+ 'passwordStorageScheme': 'ssha'
+ })))
+ except ldap.LDAPError, e:
+ log.error('Failed to add passwordpolicy: error ' + e.message['desc'])
+ assert False
+
+ # Add the COS template
+ try:
+ inst.add_s(Entry((BRANCH_COS_TMPL, {
+ 'objectclass': 'top ldapsubentry costemplate extensibleObject'.split(),
+ 'cn': 'cn=nsPwPolicyEntry,ou=people,dc=example,dc=com',
+ 'cosPriority': '1',
+ 'cn': 'cn=nsPwTemplateEntry,ou=people,dc=example,dc=com',
+ 'pwdpolicysubentry': BRANCH_PWP
+ })))
+ except ldap.LDAPError, e:
+ log.error('Failed to add COS template: error ' + e.message['desc'])
+ assert False
+
+ # Add the COS definition
+ try:
+ inst.add_s(Entry((BRANCH_COS_DEF, {
+ 'objectclass': 'top ldapsubentry cosSuperDefinition cosPointerDefinition'.split(),
+ 'cn': 'cn=nsPwPolicyEntry,ou=people,dc=example,dc=com',
+ 'costemplatedn': BRANCH_COS_TMPL,
+ 'cosAttribute': 'pwdpolicysubentry default operational-default'
+ })))
+ except ldap.LDAPError, e:
+ log.error('Failed to add COS def: error ' + e.message['desc'])
+ assert False
+ time.sleep(0.5)
+
+
+def delSubtreePwPolicy(inst):
+ try:
+ inst.delete_s(BRANCH_COS_DEF)
+ except ldap.LDAPError, e:
+ log.error('Failed to delete COS def: error ' + e.message['desc'])
+ assert False
+
+ try:
+ inst.delete_s(BRANCH_COS_TMPL)
+ except ldap.LDAPError, e:
+ log.error('Failed to delete COS template: error ' + e.message['desc'])
+ assert False
+
+ try:
+ inst.delete_s(BRANCH_PWP)
+ except ldap.LDAPError, e:
+ log.error('Failed to delete COS password policy: error ' + e.message['desc'])
+ assert False
+
+ try:
+ inst.delete_s(BRANCH_CONTAINER)
+ except ldap.LDAPError, e:
+ log.error('Failed to delete COS container: error ' + e.message['desc'])
+ assert False
+ time.sleep(0.5)
+
+
+def test_ticket47981(topology):
+ """
+ If there are multiple suffixes, and the last suffix checked does not contain any COS entries,
+ while other suffixes do, then the vattr cache is not invalidated as it should be. Then any
+ cached entries will still contain the old COS attributes/values.
+ """
+
+ log.info('Testing Ticket 47981 - Test that COS def changes are correctly reflected in affected users')
+
+ #
+ # Create a second backend that does not have any COS entries
+ #
+ log.info('Adding second suffix that will not contain any COS entries...\n')
+
+ topology.standalone.backend.create(SECOND_SUFFIX, {BACKEND_NAME: BE_NAME})
+ topology.standalone.mappingtree.create(SECOND_SUFFIX, bename=BE_NAME)
+ try:
+ topology.standalone.add_s(Entry((SECOND_SUFFIX, {
+ 'objectclass': 'top organization'.split(),
+ 'o': BE_NAME})))
+ except ldap.ALREADY_EXISTS:
+ pass
+ except ldap.LDAPError, e:
+ log.error('Failed to create suffix entry: error ' + e.message['desc'])
+ assert False
+
+ #
+ # Add People branch, it might already exist
+ #
+ log.info('Add our test entries to the default suffix, and proceed with the test...')
+
+ try:
+ topology.standalone.add_s(Entry((BRANCH, {
+ 'objectclass': 'top extensibleObject'.split(),
+ 'ou': 'level4'
+ })))
+ except ldap.ALREADY_EXISTS:
+ pass
+ except ldap.LDAPError, e:
+ log.error('Failed to add ou=people: error ' + e.message['desc'])
+ assert False
+
+ #
+ # Add a user to the branch
+ #
+ try:
+ topology.standalone.add_s(Entry((USER_DN, {
+ 'objectclass': 'top extensibleObject'.split(),
+ 'uid': 'user1'
+ })))
+ except ldap.LDAPError, e:
+ log.error('Failed to add user1: error ' + e.message['desc'])
+ assert False
+
+ #
+ # Enable password policy and add the subtree policy
+ #
+ try:
+ topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-pwpolicy-local', 'on')])
+ except ldap.LDAPError, e:
+ log.error('Failed to set pwpolicy-local: error ' + e.message['desc'])
+ assert False
+
+ addSubtreePwPolicy(topology.standalone)
+
+ #
+ # Now check the user has its expected passwordPolicy subentry
+ #
+ try:
+ entries = topology.standalone.search_s(USER_DN,
+ ldap.SCOPE_BASE,
+ '(objectclass=top)',
+ ['pwdpolicysubentry', 'dn'])
+ if not entries[0].hasAttr('pwdpolicysubentry'):
+ log.fatal('User does not have expected pwdpolicysubentry!')
+ assert False
+ except ldap.LDAPError, e:
+ log.fatal('Unable to search for entry %s: error %s' % (USER_DN, e.message['desc']))
+ assert False
+
+ #
+ # Delete the password policy and make sure it is removed from the same user
+ #
+ delSubtreePwPolicy(topology.standalone)
+ try:
+ entries = topology.standalone.search_s(USER_DN, ldap.SCOPE_BASE, '(objectclass=top)', ['pwdpolicysubentry'])
+ if entries[0].hasAttr('pwdpolicysubentry'):
+ log.fatal('User unexpectedly does have the pwdpolicysubentry!')
+ assert False
+ except ldap.LDAPError, e:
+ log.fatal('Unable to search for entry %s: error %s' % (USER_DN, e.message['desc']))
+ assert False
+
+ #
+ # Add the subtree policvy back and see if the user now has it
+ #
+ addSubtreePwPolicy(topology.standalone)
+ try:
+ entries = topology.standalone.search_s(USER_DN, ldap.SCOPE_BASE, '(objectclass=top)', ['pwdpolicysubentry'])
+ if not entries[0].hasAttr('pwdpolicysubentry'):
+ log.fatal('User does not have expected pwdpolicysubentry!')
+ assert False
+ except ldap.LDAPError, e:
+ log.fatal('Unable to search for entry %s: error %s' % (USER_DN, e.message['desc']))
+ assert False
+
+ # If we got here the test passed
+ log.info('Test PASSED')
+
+
+def test_ticket47981_final(topology):
+ topology.standalone.stop(timeout=10)
+
+
+def run_isolated():
+ '''
+ run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..)
+ To run isolated without py.test, you need to
+ - edit this file and comment '@pytest.fixture' line before 'topology' function.
+ - set the installation prefix
+ - run this program
+ '''
+ global installation_prefix
+ installation_prefix = None
+
+ topo = topology(True)
+ test_ticket47981(topo)
+
+if __name__ == '__main__':
+ run_isolated()
\ No newline at end of file
diff --git a/ldap/servers/plugins/cos/cos_cache.c b/ldap/servers/plugins/cos/cos_cache.c
index 0d9a61eed..7d8e87717 100644
--- a/ldap/servers/plugins/cos/cos_cache.c
+++ b/ldap/servers/plugins/cos/cos_cache.c
@@ -260,7 +260,7 @@ static int cos_cache_add_tmpl(cosTemplates **pTemplates, cosAttrValue *dn, cosAt
/* cosDefinitions manipulation */
static int cos_cache_build_definition_list(cosDefinitions **pDefs, int *vattr_cacheable);
-static int cos_cache_add_dn_defs(char *dn, cosDefinitions **pDefs, int *vattr_cacheable);
+static int cos_cache_add_dn_defs(char *dn, cosDefinitions **pDefs);
static int cos_cache_add_defn(cosDefinitions **pDefs, cosAttrValue **dn, int cosType, cosAttrValue **tree, cosAttrValue **tmpDn, cosAttrValue **spec, cosAttrValue **pAttrs, cosAttrValue **pOverrides, cosAttrValue **pOperational, cosAttrValue **pCosMerge, cosAttrValue **pCosOpDefault);
static int cos_cache_entry_is_cos_related( Slapi_Entry *e);
@@ -619,9 +619,9 @@ static int cos_cache_build_definition_list(cosDefinitions **pDefs, int *vattr_ca
LDAPDebug( LDAP_DEBUG_TRACE, "--> cos_cache_build_definition_list\n",0,0,0);
/*
- the class of service definitions may be anywhere in the DIT,
- so our first task is to find them.
- */
+ * The class of service definitions may be anywhere in the DIT,
+ * so our first task is to find them.
+ */
attrs[0] = "namingcontexts";
attrs[1] = 0;
@@ -629,9 +629,9 @@ static int cos_cache_build_definition_list(cosDefinitions **pDefs, int *vattr_ca
LDAPDebug( LDAP_DEBUG_PLUGIN, "cos: Building class of service cache after status change.\n",0,0,0);
/*
- * XXXrbyrne: this looks really ineficient--should be using
+ * XXXrbyrne: this looks really inefficient--should be using
* slapi_get_next_suffix(), rather than searching for namingcontexts.
- */
+ */
pSuffixSearch = slapi_search_internal("",LDAP_SCOPE_BASE,"(objectclass=*)",NULL,attrs,0);
if(pSuffixSearch)
@@ -671,19 +671,21 @@ static int cos_cache_build_definition_list(cosDefinitions **pDefs, int *vattr_ca
{
/* here's a suffix, lets search it... */
if(suffixVals[valIndex]->bv_val)
- if(!cos_cache_add_dn_defs(suffixVals[valIndex]->bv_val ,pDefs, vattr_cacheable))
+ {
+ if(!cos_cache_add_dn_defs(suffixVals[valIndex]->bv_val ,pDefs))
+ {
+ *vattr_cacheable = -1;
cos_def_available = 1;
-
+ break;
+ }
+ }
valIndex++;
}
-
-
ber_bvecfree( suffixVals );
suffixVals = NULL;
}
}
}
-
} while(!slapi_entry_next_attr(pSuffixList[suffixIndex], suffixAttr, &suffixAttr));
}
suffixIndex++;
@@ -709,7 +711,6 @@ next:
slapi_pblock_destroy(pSuffixSearch);
}
-
LDAPDebug( LDAP_DEBUG_TRACE, "<-- cos_cache_build_definition_list\n",0,0,0);
return ret;
}
@@ -750,10 +751,6 @@ cos_dn_defs_cb (Slapi_Entry* e, void *callback_data)
char *norm_dn = NULL;
info=(struct dn_defs_info *)callback_data;
-
- /* assume cacheable */
- info->vattr_cacheable = -1;
-
cos_cache_add_attrval(&pDn, slapi_entry_get_dn(e));
if(slapi_entry_first_attr(e, &dnAttr)) {
goto bail;
@@ -1076,7 +1073,7 @@ bail:
#define DN_DEF_FILTER "(&(|(objectclass=cosSuperDefinition)(objectclass=cosDefinition))(objectclass=ldapsubentry))"
-static int cos_cache_add_dn_defs(char *dn, cosDefinitions **pDefs, int *vattr_cacheable)
+static int cos_cache_add_dn_defs(char *dn, cosDefinitions **pDefs)
{
Slapi_PBlock *pDnSearch = 0;
struct dn_defs_info info = {NULL, 0, 0};
@@ -1084,7 +1081,6 @@ static int cos_cache_add_dn_defs(char *dn, cosDefinitions **pDefs, int *vattr_ca
if (pDnSearch) {
info.ret=-1; /* assume no good defs */
info.pDefs=pDefs;
- info.vattr_cacheable = 0; /* assume not cacheable */
slapi_search_internal_set_pb(pDnSearch, dn, LDAP_SCOPE_SUBTREE,
DN_DEF_FILTER,NULL,0,
NULL,NULL,cos_get_plugin_identity(),0);
@@ -1096,8 +1092,6 @@ static int cos_cache_add_dn_defs(char *dn, cosDefinitions **pDefs, int *vattr_ca
slapi_pblock_destroy (pDnSearch);
}
- *vattr_cacheable = info.vattr_cacheable;
-
return info.ret;
}
| 0 |
beb23fe4b5cc15a692a2282b27a49deedb502eda
|
389ds/389-ds-base
|
Bug 604453 - SASL Stress and Server crash: Program quits with the assertion failure in PR_Poll
https://bugzilla.redhat.com/show_bug.cgi?id=604453
Resolves: bug 604453
Description: SASL Stress and Server crash: Program quits with the assertion failure in PR_Poll
Branch: master
Reviewed by: nhosoi (Thanks!)
Fix Description: We usually do not have to pop the IO layer from the SASL
connection. This is usually handled by PR_Close() on the PRFD, which should
only happen when the connection is not being referenced by the main PR_Poll()
call. The only time we need to explicitly pop the SASL IO layer is if the
client re-negotiates the SASL bind with a different ssf e.g. going from
clear to protected or vice versa.
Platforms tested: RHEL5 x86_64
(cherry picked from commit ac6ce0947d9b2d5ec3649948bca861f0e318d708)
|
commit beb23fe4b5cc15a692a2282b27a49deedb502eda
Author: Rich Megginson <[email protected]>
Date: Fri Jun 18 11:51:48 2010 -0600
Bug 604453 - SASL Stress and Server crash: Program quits with the assertion failure in PR_Poll
https://bugzilla.redhat.com/show_bug.cgi?id=604453
Resolves: bug 604453
Description: SASL Stress and Server crash: Program quits with the assertion failure in PR_Poll
Branch: master
Reviewed by: nhosoi (Thanks!)
Fix Description: We usually do not have to pop the IO layer from the SASL
connection. This is usually handled by PR_Close() on the PRFD, which should
only happen when the connection is not being referenced by the main PR_Poll()
call. The only time we need to explicitly pop the SASL IO layer is if the
client re-negotiates the SASL bind with a different ssf e.g. going from
clear to protected or vice versa.
Platforms tested: RHEL5 x86_64
(cherry picked from commit ac6ce0947d9b2d5ec3649948bca861f0e318d708)
diff --git a/ldap/servers/slapd/connection.c b/ldap/servers/slapd/connection.c
index bab334531..85aef8e08 100644
--- a/ldap/servers/slapd/connection.c
+++ b/ldap/servers/slapd/connection.c
@@ -197,8 +197,7 @@ connection_cleanup(Connection *conn)
conn->c_extension= NULL;
conn->c_ssl_ssf = 0;
conn->c_unix_local = 0;
- /* remove any SASL I/O from the connection */
- sasl_io_cleanup(conn);
+ /* destroy any sasl context */
sasl_dispose((sasl_conn_t**)&conn->c_sasl_conn);
/* PAGED_RESULTS */
if (conn->c_search_result_set) {
| 0 |
70ba6e385482f160ce6e3e47258ad5a4712977bf
|
389ds/389-ds-base
|
Issue 49997 - Add a new CI test case
Bug Description: If the suffix provided in the command line does not exist or it's
not replicated, we have an error message that it's regarding the RUV
Fix Description: Added a test case that will validate if a wrong suffix is passed then
a proper error message is displayed or not.
Relates: https://pagure.io/389-ds-base/issue/49997
Review by: vashirov (Thanks!)
|
commit 70ba6e385482f160ce6e3e47258ad5a4712977bf
Author: Akshay Adhikari <[email protected]>
Date: Tue Jun 25 18:33:28 2019 +0530
Issue 49997 - Add a new CI test case
Bug Description: If the suffix provided in the command line does not exist or it's
not replicated, we have an error message that it's regarding the RUV
Fix Description: Added a test case that will validate if a wrong suffix is passed then
a proper error message is displayed or not.
Relates: https://pagure.io/389-ds-base/issue/49997
Review by: vashirov (Thanks!)
diff --git a/dirsrvtests/tests/suites/ds_tools/replcheck_test.py b/dirsrvtests/tests/suites/ds_tools/replcheck_test.py
index 5773586aa..11f713e2b 100644
--- a/dirsrvtests/tests/suites/ds_tools/replcheck_test.py
+++ b/dirsrvtests/tests/suites/ds_tools/replcheck_test.py
@@ -403,6 +403,35 @@ def test_inconsistencies(topo_tls_ldapi):
user_m1.delete()
+def test_suffix_exists(topo_tls_ldapi):
+ """Check if wrong suffix is provided, server is giving Error: Failed
+ to validate suffix.
+
+ :id: ce75debc-c07f-4e72-8787-8f99cbfaf1e2
+ :setup: Two master replication
+ :steps:
+ 1. Run ds-replcheck with wrong suffix (Non Existing)
+ :expectedresults:
+ 1. It should be unsuccessful
+ """
+ m1 = topo_tls_ldapi.ms["master1"]
+ m2 = topo_tls_ldapi.ms["master2"]
+ ds_replcheck_path = os.path.join(m1.ds_paths.bin_dir, 'ds-replcheck')
+
+ if ds_is_newer("1.4.1.2"):
+ tool_cmd = [ds_replcheck_path, 'online', '-b', 'dc=test,dc=com', '-D', DN_DM, '-w', PW_DM,
+ '-m', 'ldaps://{}:{}'.format(m1.host, m1.sslport),
+ '-r', 'ldaps://{}:{}'.format(m2.host, m2.sslport)]
+ else:
+ tool_cmd = [ds_replcheck_path, '-b', 'dc=test,dc=com', '-D', DN_DM, '-w', PW_DM,
+ '-m', 'ldaps://{}:{}'.format(m1.host, m1.sslport),
+ '-r', 'ldaps://{}:{}'.format(m2.host, m2.sslport)]
+
+ result1 = subprocess.Popen(tool_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, encoding='utf-8')
+ result = result1.communicate()
+ assert "Failed to validate suffix" in result[0]
+
+
if __name__ == '__main__':
# Run isolated
# -s for DEBUG mode
| 0 |
459f73835de1be34bd9c9f1636ade92fd884d9ca
|
389ds/389-ds-base
|
Issue: 50112 - Port ACI test suit from TET to python3(modify)
Port ACI test suit from TET to python3(modify)
https://pagure.io/389-ds-base/issue/50112
Reviewed by: William Brown
|
commit 459f73835de1be34bd9c9f1636ade92fd884d9ca
Author: Anuj Borah <[email protected]>
Date: Wed Jan 30 13:19:08 2019 +0530
Issue: 50112 - Port ACI test suit from TET to python3(modify)
Port ACI test suit from TET to python3(modify)
https://pagure.io/389-ds-base/issue/50112
Reviewed by: William Brown
diff --git a/dirsrvtests/tests/suites/acl/modify_test.py b/dirsrvtests/tests/suites/acl/modify_test.py
new file mode 100644
index 000000000..8f9054b1d
--- /dev/null
+++ b/dirsrvtests/tests/suites/acl/modify_test.py
@@ -0,0 +1,574 @@
+# --- BEGIN COPYRIGHT BLOCK ---
+# Copyright (C) 2019 Red Hat, Inc.
+# All rights reserved.
+#
+# License: GPL (version 3 or any later version).
+# See LICENSE for details.
+# --- END COPYRIGHT BLOCK ----
+
+
+import pytest, os, ldap
+from lib389._constants import DEFAULT_SUFFIX, PW_DM
+from lib389.idm.user import UserAccount
+from lib389.idm.account import Anonymous
+from lib389.idm.group import Group, UniqueGroup
+from lib389.idm.organizationalunit import OrganizationalUnit
+from lib389.idm.group import Groups
+from lib389.topologies import topology_st as topo
+from lib389.idm.domain import Domain
+
+
+CONTAINER_1_DELADD = "ou=Product Development,{}".format(DEFAULT_SUFFIX)
+CONTAINER_2_DELADD = "ou=Accounting,{}".format(DEFAULT_SUFFIX)
+USER_DELADD = "cn=Jeff Vedder,{}".format(CONTAINER_1_DELADD)
+USER_WITH_ACI_DELADD = "cn=Sam Carter,{}".format(CONTAINER_2_DELADD)
+KIRSTENVAUGHAN = "cn=Kirsten Vaughan, ou=Human Resources, {}".format(DEFAULT_SUFFIX)
+HUMAN_OU_GLOBAL = "ou=Human Resources,{}".format(DEFAULT_SUFFIX)
+
+
[email protected](scope="function")
+def cleanup_tree(request, topo):
+
+ def fin():
+ for i in [USER_DELADD, USER_WITH_ACI_DELADD, KIRSTENVAUGHAN, CONTAINER_1_DELADD, CONTAINER_2_DELADD, HUMAN_OU_GLOBAL]:
+ try:
+ UserAccount(topo.standalone, i).delete()
+ except:
+ pass
+
+ request.addfinalizer(fin)
+
+
[email protected](scope="function")
+def aci_of_user(request, topo):
+ aci_list = Domain(topo.standalone, DEFAULT_SUFFIX).get_attr_vals('aci')
+
+ def finofaci():
+ domain = Domain(topo.standalone, DEFAULT_SUFFIX)
+ domain.set('aci', None)
+ for i in aci_list:
+ domain.add("aci", i)
+
+ request.addfinalizer(finofaci)
+
+
+def test_allow_write_access_to_targetattr_with_a_single_attribute(
+ topo, aci_of_user, cleanup_tree):
+ """
+ Modify Test 1 Allow write access to targetattr with a single attribute
+ :id:620d7b82-7abf-11e8-a4db-8c16451d917b
+ :setup: server
+ :steps:
+ 1. Add test entry
+ 2. Add ACI
+ 3. User should follow ACI role
+ :expectedresults:
+ 1. Entry should be added
+ 2. Operation should succeed
+ 3. Operation should succeed
+ """
+ ACI_BODY = '(targetattr = "title")(version 3.0; acl "ACI NAME"; allow (write) (userdn = "ldap:///anyone") ;)'
+ Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY)
+
+ ou = OrganizationalUnit(topo.standalone, "ou=Product Development,{}".format(DEFAULT_SUFFIX))
+ ou.create(properties={'ou': 'Product Development'})
+
+ properties = {
+ 'uid': 'Jeff Vedder',
+ 'cn': 'Jeff Vedder',
+ 'sn': 'user',
+ 'uidNumber': '1000',
+ 'gidNumber': '2000',
+ 'homeDirectory': '/home/' + 'JeffVedder',
+ 'userPassword': PW_DM
+ }
+ user = UserAccount(topo.standalone, "cn=Jeff Vedder,ou=Product Development,{}".format(DEFAULT_SUFFIX))
+ user.create(properties=properties)
+
+ # Allow write access to targetattr with a single attribute
+ conn = Anonymous(topo.standalone).bind()
+ ua = UserAccount(conn, USER_DELADD)
+ ua.add("title", "Architect")
+ assert ua.get_attr_val('title')
+ ua.remove("title", "Architect")
+
+
+def test_allow_write_access_to_targetattr_with_multiple_attibutes(
+ topo, aci_of_user, cleanup_tree):
+ """
+ Modify Test 2 Allow write access to targetattr with multiple attibutes
+ :id:6b9f05c6-7abf-11e8-9ba1-8c16451d917b
+ :setup: server
+ :steps:
+ 1. Add test entry
+ 2. Add ACI
+ 3. User should follow ACI role
+ :expectedresults:
+ 1. Entry should be added
+ 2. Operation should succeed
+ 3. Operation should succeed
+ """
+ ACI_BODY = '(targetattr = "telephonenumber || roomnumber")(version 3.0; acl "ACI NAME"; allow (write) (userdn = "ldap:///anyone") ;)'
+ Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY)
+
+ ou = OrganizationalUnit(topo.standalone, "ou=Product Development,{}".format(DEFAULT_SUFFIX))
+ ou.create(properties={'ou': 'Product Development'})
+
+ properties = {
+ 'uid': 'Jeff Vedder',
+ 'cn': 'Jeff Vedder',
+ 'sn': 'user',
+ 'uidNumber': '1000',
+ 'gidNumber': '2000',
+ 'homeDirectory': '/home/' + 'JeffVedder',
+ 'userPassword': PW_DM
+ }
+ user = UserAccount(topo.standalone, "cn=Jeff Vedder,ou=Product Development,{}".format(DEFAULT_SUFFIX))
+ user.create(properties=properties)
+
+ # Allow write access to targetattr with multiple attibutes
+ conn = Anonymous(topo.standalone).bind()
+ ua = UserAccount(conn, USER_DELADD)
+ ua.add("telephonenumber", "+1 408 555 1212")
+ assert ua.get_attr_val('telephonenumber')
+ ua.add("roomnumber", "101")
+ assert ua.get_attr_val('roomnumber')
+
+
+def test_allow_write_access_to_userdn_all(topo, aci_of_user, cleanup_tree):
+ """
+ Modify Test 3 Allow write access to userdn 'all'
+ :id:70c58818-7abf-11e8-afa1-8c16451d917b
+ :setup: server
+ :steps:
+ 1. Add test entry
+ 2. Add ACI
+ 3. User should follow ACI role
+ :expectedresults:
+ 1. Entry should be added
+ 2. Operation should succeed
+ 3. Operation should succeed
+ """
+ ACI_BODY = '(targetattr = "*")(version 3.0; acl "ACI NAME"; allow (write) (userdn = "ldap:///all") ;)'
+ Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY)
+
+ for i in ['Product Development', 'Accounting']:
+ ou = OrganizationalUnit(topo.standalone, "ou={},{}".format(i, DEFAULT_SUFFIX))
+ ou.create(properties={'ou': i})
+
+ for i in ['Jeff Vedder,ou=Product Development', 'Sam Carter,ou=Accounting']:
+ properties = {
+ 'uid': i,
+ 'cn': i,
+ 'sn': 'user',
+ 'uidNumber': '1000',
+ 'gidNumber': '2000',
+ 'homeDirectory': '/home/' + i,
+ 'userPassword': PW_DM
+ }
+ user = UserAccount(topo.standalone, "cn={},{}".format(i, DEFAULT_SUFFIX))
+ user.create(properties=properties)
+
+ # Allow write access to userdn 'all'
+ conn = Anonymous(topo.standalone).bind()
+ with pytest.raises(ldap.INSUFFICIENT_ACCESS):
+ UserAccount(conn, USER_DELADD).add("title", "Architect")
+ conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM)
+ UserAccount(conn, USER_DELADD).add("title", "Architect")
+ assert UserAccount(conn, USER_DELADD).get_attr_val('title')
+
+
+def test_allow_write_access_to_userdn_with_wildcards_in_dn(
+ topo, aci_of_user, cleanup_tree):
+ """
+ Modify Test 4 Allow write access to userdn with wildcards in DN
+ :id:766c2312-7abf-11e8-b57d-8c16451d917b
+ :setup: server
+ :steps:
+ 1. Add test entry
+ 2. Add ACI
+ 3. User should follow ACI role
+ :expectedresults:
+ 1. Entry should be added
+ 2. Operation should succeed
+ 3. Operation should succeed
+ """
+ ACI_BODY = '(targetattr = "*")(version 3.0; acl "ACI NAME"; allow (write)(userdn = "ldap:///cn=*, ou=Product Development,{}") ;)'.format(DEFAULT_SUFFIX)
+ Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY)
+
+ ou = OrganizationalUnit(topo.standalone, "ou=Product Development,{}".format(DEFAULT_SUFFIX))
+ ou.create(properties={'ou': 'Product Development'})
+
+ properties = {
+ 'uid': 'Jeff Vedder',
+ 'cn': 'Jeff Vedder',
+ 'sn': 'user',
+ 'uidNumber': '1000',
+ 'gidNumber': '2000',
+ 'homeDirectory': '/home/' + 'JeffVedder',
+ 'userPassword': PW_DM
+ }
+ user = UserAccount(topo.standalone, "cn=Jeff Vedder,ou=Product Development,{}".format(DEFAULT_SUFFIX))
+ user.create(properties=properties)
+
+ conn = UserAccount(topo.standalone, USER_DELADD).bind(PW_DM)
+ # Allow write access to userdn with wildcards in DN
+ ua = UserAccount(conn, USER_DELADD)
+ ua.add("title", "Architect")
+ assert ua.get_attr_val('title')
+
+
+def test_allow_write_access_to_userdn_with_multiple_dns(topo, aci_of_user, cleanup_tree):
+ """
+ Modify Test 5 Allow write access to userdn with multiple DNs
+ :id:7aae760a-7abf-11e8-bc3a-8c16451d917b
+ :setup: server
+ :steps:
+ 1. Add test entry
+ 2. Add ACI
+ 3. User should follow ACI role
+ :expectedresults:
+ 1. Entry should be added
+ 2. Operation should succeed
+ 3. Operation should succeed
+ """
+ ACI_BODY = '(targetattr = "*")(version 3.0; acl "ACI NAME"; allow (write)(userdn = "ldap:///{} || ldap:///{}") ;)'.format(USER_DELADD, USER_WITH_ACI_DELADD)
+ Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY)
+
+ for i in ['Product Development', 'Accounting', 'Human Resources']:
+ ou = OrganizationalUnit(topo.standalone, "ou={},{}".format(i, DEFAULT_SUFFIX))
+ ou.create(properties={'ou': i})
+
+ for i in ['Jeff Vedder,ou=Product Development', 'Sam Carter,ou=Accounting', 'Kirsten Vaughan, ou=Human Resources']:
+ properties = {
+ 'uid': i,
+ 'cn': i,
+ 'sn': 'user',
+ 'uidNumber': '1000',
+ 'gidNumber': '2000',
+ 'homeDirectory': '/home/' + i,
+ 'userPassword': PW_DM
+ }
+ user = UserAccount(topo.standalone, "cn={},{}".format(i, DEFAULT_SUFFIX))
+ user.create(properties=properties)
+
+ conn = UserAccount(topo.standalone, USER_DELADD).bind(PW_DM)
+ # Allow write access to userdn with multiple DNs
+ ua = UserAccount(conn, KIRSTENVAUGHAN)
+ ua.add("title", "Architect")
+ conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM)
+ # Allow write access to userdn with multiple DNs
+ ua = UserAccount(conn, USER_DELADD)
+ ua.add("title", "Architect")
+ assert ua.get_attr_val('title')
+
+
+def test_allow_write_access_to_target_with_wildcards(topo, aci_of_user, cleanup_tree):
+ """
+ Modify Test 6 Allow write access to target with wildcards
+ :id:825fe884-7abf-11e8-8541-8c16451d917b
+ :setup: server
+ :steps:
+ 1. Add test entry
+ 2. Add ACI
+ 3. User should follow ACI role
+ :expectedresults:
+ 1. Entry should be added
+ 2. Operation should succeed
+ 3. Operation should succeed
+ """
+ ACI_BODY = '(target = ldap:///{})(targetattr = "*")(version 3.0; acl "ACI NAME"; allow (write) (userdn = "ldap:///anyone") ;)'.format(DEFAULT_SUFFIX)
+ Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY)
+
+ for i in ['Product Development', 'Accounting', 'Human Resources']:
+ ou = OrganizationalUnit(topo.standalone, "ou={},{}".format(i, DEFAULT_SUFFIX))
+ ou.create(properties={'ou': i})
+
+ for i in ['Jeff Vedder,ou=Product Development', 'Sam Carter,ou=Accounting', 'Kirsten Vaughan, ou=Human Resources']:
+ properties = {
+ 'uid': i,
+ 'cn': i,
+ 'sn': 'user',
+ 'uidNumber': '1000',
+ 'gidNumber': '2000',
+ 'homeDirectory': '/home/' + i,
+ 'userPassword': PW_DM
+ }
+ user = UserAccount(topo.standalone, "cn={},{}".format(i, DEFAULT_SUFFIX))
+ user.create(properties=properties)
+
+ conn = UserAccount(topo.standalone, USER_DELADD).bind(PW_DM)
+ # Allow write access to target with wildcards
+ ua = UserAccount(conn, KIRSTENVAUGHAN)
+ ua.add("title", "Architect")
+ assert ua.get_attr_val('title')
+ conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM)
+ # Allow write access to target with wildcards
+ ua = UserAccount(conn, USER_DELADD)
+ ua.add("title", "Architect")
+ assert ua.get_attr_val('title')
+
+
+def test_allow_write_access_to_userdnattr(topo, aci_of_user, cleanup_tree):
+ """
+ Modify Test 7 Allow write access to userdnattr
+ :id:86b418f6-7abf-11e8-ae28-8c16451d917b
+ :setup: server
+ :steps:
+ 1. Add test entry
+ 2. Add ACI
+ 3. User should follow ACI role
+ :expectedresults:
+ 1. Entry should be added
+ 2. Operation should succeed
+ 3. Operation should succeed
+ """
+ ACI_BODY = '(target = ldap:///{})(targetattr=*)(version 3.0; acl "$tet_thistest";allow (write) (userdn = "ldap:///anyone"); )'.format(DEFAULT_SUFFIX)
+ Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY)
+
+ for i in ['Product Development', 'Accounting']:
+ ou = OrganizationalUnit(topo.standalone, "ou={},{}".format(i, DEFAULT_SUFFIX))
+ ou.create(properties={'ou': i})
+
+ for i in ['Jeff Vedder,ou=Product Development', 'Sam Carter,ou=Accounting']:
+ properties = {
+ 'uid': i,
+ 'cn': i,
+ 'sn': 'user',
+ 'uidNumber': '1000',
+ 'gidNumber': '2000',
+ 'homeDirectory': '/home/' + i,
+ 'userPassword': PW_DM
+ }
+ user = UserAccount(topo.standalone, "cn={},{}".format(i, DEFAULT_SUFFIX))
+ user.create(properties=properties)
+
+ UserAccount(topo.standalone, USER_WITH_ACI_DELADD).add('manager', USER_WITH_ACI_DELADD)
+ conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM)
+ # Allow write access to userdnattr
+ ua = UserAccount(conn, USER_DELADD)
+ ua.add('uid', 'scoobie')
+ assert ua.get_attr_val('uid')
+ ua.add('uid', 'jvedder')
+ assert ua.get_attr_val('uid')
+
+
+def test_allow_selfwrite_access_to_anyone(topo, aci_of_user, cleanup_tree):
+ """
+ Modify Test 8 Allow selfwrite access to anyone
+ :id:8b3becf0-7abf-11e8-ac34-8c16451d917b
+ :setup: server
+ :steps:
+ 1. Add test entry
+ 2. Add ACI
+ 3. User should follow ACI role
+ :expectedresults:
+ 1. Entry should be added
+ 2. Operation should succeed
+ 3. Operation should succeed
+ """
+ groups = Groups(topo.standalone, DEFAULT_SUFFIX)
+ group = groups.create(properties={"cn": "group1",
+ "description": "testgroup"})
+
+ ACI_BODY = '(target = ldap:///cn=group1,ou=Groups,{})(targetattr = "member")(version 3.0; acl "ACI NAME"; allow (selfwrite) (userdn = "ldap:///anyone") ;)'.format(DEFAULT_SUFFIX)
+ Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY)
+
+ ou = OrganizationalUnit(topo.standalone, "ou=Product Development,{}".format(DEFAULT_SUFFIX))
+ ou.create(properties={'ou': 'Product Development'})
+
+ properties = {
+ 'uid': 'Jeff Vedder',
+ 'cn': 'Jeff Vedder',
+ 'sn': 'user',
+ 'uidNumber': '1000',
+ 'gidNumber': '2000',
+ 'homeDirectory': '/home/' + 'JeffVedder',
+ 'userPassword': PW_DM
+ }
+ user = UserAccount(topo.standalone, "cn=Jeff Vedder,ou=Product Development,{}".format(DEFAULT_SUFFIX))
+ user.create(properties=properties)
+
+ conn = UserAccount(topo.standalone, USER_DELADD).bind(PW_DM)
+ # Allow selfwrite access to anyone
+ groups = Groups(conn, DEFAULT_SUFFIX)
+ groups.list()[0].add_member(USER_DELADD)
+ group.delete()
+
+
+def test_uniquemember_should_also_be_the_owner(topo, aci_of_user):
+ """
+ Modify Test 10 groupdnattr = \"ldap:///$BASEDN?owner\" if owner is a group, group's
+ uniquemember should also be the owner
+ :id:9456b2d4-7abf-11e8-829d-8c16451d917b
+ :setup: server
+ :steps:
+ 1. Add test entry
+ 2. Add ACI
+ 3. User should follow ACI role
+ :expectedresults:
+ 1. Entry should be added
+ 2. Operation should succeed
+ 3. Operation should succeed
+ """
+ for i in ['ACLGroupTest']:
+ ou = OrganizationalUnit(topo.standalone, "ou={},{}".format(i, DEFAULT_SUFFIX))
+ ou.create(properties={'ou': i})
+
+ ou = OrganizationalUnit(topo.standalone, "ou=ACLDevelopment,{}".format(DEFAULT_SUFFIX))
+ ou.create(properties={'ou': 'ACLDevelopment'})
+ ou.set('aci','(targetattr="*")(version 3.0; acl "groupdnattr acl"; '
+ 'allow (all)groupdnattr = "ldap:///{}?owner";)'.format(DEFAULT_SUFFIX))
+
+ grp = UniqueGroup(topo.standalone, "uid=anuj,ou=ACLDevelopment, {}".format(DEFAULT_SUFFIX))
+ user_props = (
+ {'sn': 'Borah',
+ 'cn': 'Anuj',
+ 'objectclass': ['top', 'person', 'organizationalPerson', 'inetOrgPerson', 'groupofUniquenames'],
+ 'userpassword': PW_DM,
+ 'givenname': 'Anuj',
+ 'ou': ['ACLDevelopment', 'People'],
+ 'roomnumber': '123',
+ 'uniquemember': 'cn=mandatory member'
+ }
+ )
+ grp.create(properties=user_props)
+
+ grp = UniqueGroup(topo.standalone, "uid=2ishani,ou=ACLDevelopment, {}".format(DEFAULT_SUFFIX))
+ user_props = (
+ {'sn': 'Borah',
+ 'cn': '2ishani',
+ 'objectclass': ['top', 'person','organizationalPerson', 'inetOrgPerson', 'groupofUniquenames'],
+ 'userpassword': PW_DM,
+ 'givenname': '2ishani',
+ 'ou': ['ACLDevelopment', 'People'],
+ 'roomnumber': '1234',
+ 'uniquemember': 'cn=mandatory member', "owner": "cn=group4, ou=ACLGroupTest, {}".format(DEFAULT_SUFFIX)
+ }
+ )
+ grp.create(properties=user_props)
+
+ grp = UniqueGroup(topo.standalone, 'cn=group1,ou=ACLGroupTest,'+DEFAULT_SUFFIX)
+ grp.create(properties={'cn': 'group1',
+ 'ou': 'groups'})
+ grp.set('uniquemember', ["cn=group2, ou=ACLGroupTest, {}".format(DEFAULT_SUFFIX),
+ "cn=group3, ou=ACLGroupTest, {}".format(DEFAULT_SUFFIX)])
+
+ grp = UniqueGroup(topo.standalone, 'cn=group3,ou=ACLGroupTest,' + DEFAULT_SUFFIX)
+ grp.create(properties={'cn': 'group3',
+ 'ou': 'groups'})
+ grp.set('uniquemember', ["cn=group4, ou=ACLGroupTest, {}".format(DEFAULT_SUFFIX)])
+
+ grp = UniqueGroup(topo.standalone, 'cn=group4,ou=ACLGroupTest,' + DEFAULT_SUFFIX)
+ grp.create(properties={
+ 'cn': 'group4',
+ 'ou': 'groups'})
+ grp.set('uniquemember', ["uid=anuj, ou=ACLDevelopment, {}".format(DEFAULT_SUFFIX)])
+
+ #uniquemember should also be the owner
+ conn = UserAccount(topo.standalone, "uid=anuj,ou=ACLDevelopment, {}".format(DEFAULT_SUFFIX)).bind(PW_DM)
+ ua = UserAccount(conn, "uid=2ishani, ou=ACLDevelopment, {}".format(DEFAULT_SUFFIX))
+ ua.add('roomnumber', '9999')
+ assert ua.get_attr_val('roomnumber')
+
+ for DN in ["cn=group4,ou=ACLGroupTest,{}".format(DEFAULT_SUFFIX),
+ "cn=group3,ou=ACLGroupTest,{}".format(DEFAULT_SUFFIX),
+ "cn=group1,ou=ACLGroupTest,{}".format(DEFAULT_SUFFIX),
+ "uid=2ishani,ou=ACLDevelopment,{}".format(DEFAULT_SUFFIX),
+ "uid=anuj,ou=ACLDevelopment,{}".format(DEFAULT_SUFFIX), "ou=ACLDevelopment,{}".format(DEFAULT_SUFFIX),
+ "ou=ACLGroupTest, {}".format(DEFAULT_SUFFIX)]:
+ UserAccount(topo.standalone, DN).delete()
+
+
+def test_aci_with_both_allow_and_deny(topo, aci_of_user, cleanup_tree):
+ """
+ Modify Test 12 aci with both allow and deny
+ :id:9dcfe902-7abf-11e8-86dc-8c16451d917b
+ :setup: server
+ :steps:
+ 1. Add test entry
+ 2. Add ACI
+ 3. User should follow ACI role
+ :expectedresults:
+ 1. Entry should be added
+ 2. Operation should succeed
+ 3. Operation should succeed
+ """
+ ACI_BODY = '(targetattr = "*")(version 3.0; acl "ACI NAME"; deny (read, search)userdn = "ldap:///{}"; allow (all) userdn = "ldap:///{}" ;)'.format(USER_WITH_ACI_DELADD, USER_DELADD)
+ Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY)
+
+ for i in ['Product Development', 'Accounting']:
+ ou = OrganizationalUnit(topo.standalone, "ou={},{}".format(i, DEFAULT_SUFFIX))
+ ou.create(properties={'ou': i})
+
+ for i in ['Jeff Vedder,ou=Product Development', 'Sam Carter,ou=Accounting']:
+ properties = {
+ 'uid': i,
+ 'cn': i,
+ 'sn': 'user',
+ 'uidNumber': '1000',
+ 'gidNumber': '2000',
+ 'homeDirectory': '/home/' + i,
+ 'userPassword': PW_DM
+ }
+ user = UserAccount(topo.standalone, "cn={},{}".format(i, DEFAULT_SUFFIX))
+ user.create(properties=properties)
+
+ conn = UserAccount(topo.standalone, USER_DELADD).bind(PW_DM)
+ # aci with both allow and deny, testing allow
+ assert UserAccount(conn, USER_WITH_ACI_DELADD).get_attr_val('uid')
+ conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM)
+ # aci with both allow and deny, testing deny
+ with pytest.raises(IndexError):
+ UserAccount(conn, USER_WITH_ACI_DELADD).get_attr_val('uid')
+
+
+def test_allow_owner_to_modify_entry(topo, aci_of_user, cleanup_tree):
+ """
+ Modify Test 14 allow userdnattr = owner to modify entry
+ :id:aa302090-7abf-11e8-811a-8c16451d917b
+ :setup: server
+ :steps:
+ 1. Add test entry
+ 2. Add ACI
+ 3. User should follow ACI role
+ :expectedresults:
+ 1. Entry should be added
+ 2. Operation should succeed
+ 3. Operation should succeed
+ """
+ grp = UniqueGroup(topo.standalone, 'cn=intranet,' + DEFAULT_SUFFIX)
+ grp.create(properties={
+ 'cn': 'intranet',
+ 'ou': 'groups'})
+ grp.set('owner', USER_WITH_ACI_DELADD)
+
+ ACI_BODY = '(target ="ldap:///cn=intranet, {}") (targetattr ="*")(targetfilter ="(objectclass=groupOfUniqueNames)") (version 3.0;acl "$tet_thistest";allow(read, write, delete, search, compare, add) (userdnattr = "owner");)'.format(DEFAULT_SUFFIX)
+ Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY)
+
+ for i in ['Product Development', 'Accounting']:
+ ou = OrganizationalUnit(topo.standalone, "ou={},{}".format(i, DEFAULT_SUFFIX))
+ ou.create(properties={'ou': i})
+ for i in ['Jeff Vedder,ou=Product Development', 'Sam Carter,ou=Accounting']:
+ properties = {
+ 'uid': i,
+ 'cn': i,
+ 'sn': 'user',
+ 'uidNumber': '1000',
+ 'gidNumber': '2000',
+ 'homeDirectory': '/home/' + i,
+ 'userPassword': PW_DM
+ }
+ user = UserAccount(topo.standalone, "cn={},{}".format(i, DEFAULT_SUFFIX))
+ user.create(properties=properties)
+
+ conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM)
+ # allow userdnattr = owner to modify entry
+ ua = UserAccount(conn, 'cn=intranet,dc=example,dc=com')
+ ua.set('uniquemember', "cn=Andy Walker, ou=Accounting,dc=example,dc=com")
+ assert ua.get_attr_val('uniquemember')
+
+
+if __name__ == "__main__":
+ CURRENT_FILE = os.path.realpath(__file__)
+ pytest.main("-s -v %s" % CURRENT_FILE)
diff --git a/dirsrvtests/tests/suites/acl/modrdn_test.py b/dirsrvtests/tests/suites/acl/modrdn_test.py
new file mode 100644
index 000000000..c395e0baa
--- /dev/null
+++ b/dirsrvtests/tests/suites/acl/modrdn_test.py
@@ -0,0 +1,298 @@
+# --- BEGIN COPYRIGHT BLOCK ---
+# Copyright (C) 2019 Red Hat, Inc.
+# All rights reserved.
+#
+# License: GPL (version 3 or any later version).
+# See LICENSE for details.
+# --- END COPYRIGHT BLOCK ----
+
+import pytest, os, ldap
+from lib389._constants import DEFAULT_SUFFIX, PW_DM
+from lib389.idm.user import UserAccount
+from lib389.idm.account import Anonymous
+from lib389.idm.group import Group, UniqueGroup
+from lib389.idm.organizationalunit import OrganizationalUnit, OrganizationalUnits
+from lib389.topologies import topology_st as topo
+from lib389.idm.domain import Domain
+
+
+CONTAINER_1_DELADD = "ou=Product Development,{}".format(DEFAULT_SUFFIX)
+CONTAINER_2_DELADD = "ou=Accounting,{}".format(DEFAULT_SUFFIX)
+USER_DELADD = "cn=Jeff Vedder,{}".format(CONTAINER_1_DELADD)
+USER_WITH_ACI_DELADD = "cn=Sam Carter,{}".format(CONTAINER_2_DELADD)
+DYNAMIC_MODRDN = "cn=Test DYNAMIC_MODRDN Group 70, {}".format(DEFAULT_SUFFIX)
+SAM_DAMMY_MODRDN = "cn=Sam Carter1,ou=Accounting,{}".format(DEFAULT_SUFFIX)
+TRAC340_MODRDN = "cn=TRAC340_MODRDN,{}".format(DEFAULT_SUFFIX)
+NEWENTRY9_MODRDN = "cn=NEWENTRY9_MODRDN,{}".format("ou=People,{}".format(DEFAULT_SUFFIX))
+OU0_OU_MODRDN = "ou=OU0,{}".format(DEFAULT_SUFFIX)
+OU2_OU_MODRDN = "ou=OU2,{}".format(DEFAULT_SUFFIX)
+
+
[email protected](scope="function")
+def aci_of_user(request, topo):
+ aci_list = Domain(topo.standalone, DEFAULT_SUFFIX).get_attr_vals('aci')
+
+ def finofaci():
+ domain = Domain(topo.standalone, DEFAULT_SUFFIX)
+ domain.set('aci', None)
+ for i in aci_list:
+ domain.add("aci", i)
+
+ request.addfinalizer(finofaci)
+
+
[email protected](scope="function")
+def _add_user(request, topo):
+ ou = OrganizationalUnit(topo.standalone, 'ou=Product Development,{}'.format(DEFAULT_SUFFIX))
+ ou.create(properties={'ou': 'Product Development'})
+
+ ou = OrganizationalUnit(topo.standalone, 'ou=Accounting,{}'.format(DEFAULT_SUFFIX))
+ ou.create(properties={'ou': 'Accounting'})
+
+ groups = Group(topo.standalone, DYNAMIC_MODRDN)
+ group_properties = {"cn": "Test DYNAMIC_MODRDN Group 70",
+ "objectclass": ["top", 'groupofURLs'],
+ 'memberURL': 'ldap:///{}??base?(cn=*)'.format(USER_WITH_ACI_DELADD)}
+ groups.create(properties=group_properties)
+
+ properties = {
+ 'uid': 'Jeff Vedder',
+ 'cn': 'Jeff Vedder',
+ 'sn': 'user',
+ 'uidNumber': '1000',
+ 'gidNumber': '2000',
+ 'homeDirectory': '/home/' + 'JeffVedder',
+ 'userPassword': PW_DM
+ }
+ user = UserAccount(topo.standalone, 'cn=Jeff Vedder,ou=Product Development,{}'.format(DEFAULT_SUFFIX))
+ user.create(properties=properties)
+
+ properties = {
+ 'uid': 'Sam Carter',
+ 'cn': 'Sam Carter',
+ 'sn': 'user',
+ 'uidNumber': '1000',
+ 'gidNumber': '2000',
+ 'homeDirectory': '/home/' + 'SamCarter',
+ 'userPassword': PW_DM
+ }
+ user = UserAccount(topo.standalone, 'cn=Sam Carter,ou=Accounting,{}'.format(DEFAULT_SUFFIX))
+ user.create(properties=properties)
+
+ def fin():
+ for DN in [USER_DELADD,USER_WITH_ACI_DELADD,DYNAMIC_MODRDN,CONTAINER_2_DELADD,CONTAINER_1_DELADD]:
+ UserAccount(topo.standalone, DN).delete()
+
+ request.addfinalizer(fin)
+
+
+def test_allow_write_privilege_to_anyone(topo, _add_user, aci_of_user):
+ """
+ Modrdn Test 1 Allow write privilege to anyone
+ :id: 4406f12e-7932-11e8-9dea-8c16451d917b
+ :setup: server
+ :steps:
+ 1. Add test entry
+ 2. Add ACI
+ 3. User should follow ACI role
+ :expectedresults:
+ 1. Entry should be added
+ 2. Operation should succeed
+ 3. Operation should succeed
+ """
+ Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",
+ '(target ="ldap:///{}")(targetattr=*)(version 3.0;acl "$tet_thistest";allow '
+ '(write) (userdn = "ldap:///anyone");)'.format(DEFAULT_SUFFIX))
+ conn = Anonymous(topo.standalone).bind()
+ # Allow write privilege to anyone
+ useraccount = UserAccount(conn, USER_WITH_ACI_DELADD)
+ useraccount.rename("cn=Jeff Vedder")
+ assert 'cn=Jeff Vedder,ou=Accounting,dc=example,dc=com' == useraccount.dn
+ useraccount = UserAccount(conn, "cn=Jeff Vedder,ou=Accounting,dc=example,dc=com")
+ useraccount.rename("cn=Sam Carter")
+ assert 'cn=Sam Carter,ou=Accounting,dc=example,dc=com' == useraccount.dn
+
+
+def test_allow_write_privilege_to_dynamic_group_with_scope_set_to_base_in_ldap_url(
+ topo, _add_user, aci_of_user
+):
+ """
+ Modrdn Test 2 Allow write privilege to DYNAMIC_MODRDN group with scope set to base in LDAP URL
+ :id: 4c0f8c00-7932-11e8-8398-8c16451d917b
+ :setup: server
+ :steps:
+ 1. Add test entry
+ 2. Add ACI
+ 3. User should follow ACI role
+ :expectedresults:
+ 1. Entry should be added
+ 2. Operation should succeed
+ 3. Operation should succeed
+ """
+ Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(target = ldap:///{})(targetattr=*)(version 3.0; acl "$tet_thistest"; allow(all)(groupdn = "ldap:///{}"); )'.format(DEFAULT_SUFFIX, DYNAMIC_MODRDN))
+ conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM)
+ # Allow write privilege to DYNAMIC_MODRDN group with scope set to base in LDAP URL
+ useraccount = UserAccount(conn, USER_DELADD)
+ useraccount.rename("cn=Jeffbo Vedder")
+ assert 'cn=Jeffbo Vedder,ou=Product Development,dc=example,dc=com' == useraccount.dn
+ useraccount = UserAccount(conn, "cn=Jeffbo Vedder,{}".format(CONTAINER_1_DELADD))
+ useraccount.rename("cn=Jeff Vedder")
+ assert 'cn=Jeff Vedder,ou=Product Development,dc=example,dc=com' == useraccount.dn
+
+
+def test_write_access_to_naming_atributes(topo, _add_user, aci_of_user):
+ """
+ Test for write access to naming atributes (1)
+ Test that check for add writes to the new naming attr
+ :id: 532fc630-7932-11e8-8924-8c16451d917b
+ :setup: server
+ :steps:
+ 1. Add test entry
+ 2. Add ACI
+ 3. User should follow ACI role
+ :expectedresults:
+ 1. Entry should be added
+ 2. Operation should succeed
+ 3. Operation should succeed
+ """
+ Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", '(target ="ldap:///{}")(targetattr != "uid")(version 3.0;acl "$tet_thistest";allow (write) (userdn = "ldap:///anyone");)'.format(DEFAULT_SUFFIX))
+ conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM)
+ #Test for write access to naming atributes
+ useraccount = UserAccount(conn, USER_WITH_ACI_DELADD)
+ with pytest.raises(ldap.INSUFFICIENT_ACCESS):
+ useraccount.rename("uid=Jeffbo Vedder")
+
+
+def test_write_access_to_naming_atributes_two(topo, _add_user, aci_of_user):
+ """
+ Test for write access to naming atributes (2)
+ :id: 5a2077d2-7932-11e8-9e7b-8c16451d917b
+ :setup: server
+ :steps:
+ 1. Add test entry
+ 2. Add ACI
+ 3. User should follow ACI role
+ 4. Now try to modrdn it to cn, won't work if request deleteoldrdn.
+ :expectedresults:
+ 1. Entry should be added
+ 2. Operation should succeed
+ 3. Operation should succeed
+ 4. Operation should not succeed
+ """
+ Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", '(target ="ldap:///{}")(targetattr != "uid")(version 3.0;acl "$tet_thistest";allow (write) (userdn = "ldap:///anyone");)'.format(DEFAULT_SUFFIX))
+ properties = {
+ 'uid': 'Sam Carter1',
+ 'cn': 'Sam Carter1',
+ 'sn': 'user',
+ 'uidNumber': '1000',
+ 'gidNumber': '2000',
+ 'homeDirectory': '/home/' + 'SamCarter1'
+ }
+ user = UserAccount(topo.standalone, 'cn=Sam Carter1,ou=Accounting,{}'.format(DEFAULT_SUFFIX))
+ user.create(properties=properties)
+ user.set("userPassword", "password")
+ conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM)
+ # Test for write access to naming atributes
+ useraccount = UserAccount(conn, SAM_DAMMY_MODRDN)
+ with pytest.raises(ldap.INSUFFICIENT_ACCESS):
+ useraccount.rename("uid=Jeffbo Vedder")
+ UserAccount(topo.standalone, SAM_DAMMY_MODRDN).delete()
+
+
[email protected]
+def test_access_aci_list_contains_any_deny_rule(topo, _add_user, aci_of_user):
+ """
+ Testing bug #950351: RHDS denies MODRDN access if ACI list contains any DENY rule
+ Bug description: If you create a deny ACI for some or more attributes there is incorrect behaviour
+ as you cannot rename the entry anymore
+ :id: 62cbbb8a-7932-11e8-96a7-8c16451d917b
+ :setup: server
+ :steps:
+ 1. Add test entry
+ 2. Adding a new ou ou=People to $BASEDN
+ 3. Adding a user NEWENTRY9_MODRDN to ou=People,$BASEDN
+ 4. Adding an allow rule for NEWENTRY9_MODRDN and for others an aci deny rule
+ :expectedresults:
+ 1. Entry should be added
+ 2. Operation should succeed
+ 3. Operation should succeed
+ 4. Operation should succeed
+ """
+ properties = {
+ 'uid': 'NEWENTRY9_MODRDN',
+ 'cn': 'NEWENTRY9_MODRDN_People',
+ 'sn': 'user',
+ 'uidNumber': '1000',
+ 'gidNumber': '2000',
+ 'homeDirectory': '/home/' + 'NEWENTRY9_MODRDN'
+ }
+ user = UserAccount(topo.standalone, 'cn=NEWENTRY9_MODRDN,ou=People,{}'.format(DEFAULT_SUFFIX))
+ user.create(properties=properties)
+ user.set("userPassword", "password")
+ user.set("telephoneNumber", "989898191")
+ user.set("mail", "[email protected]")
+ user.set("givenName", "givenName")
+ user.set("uid", "NEWENTRY9_MODRDN")
+ OrganizationalUnits(topo.standalone, DEFAULT_SUFFIX).get('People').add("aci", ['(targetattr = "*") '
+ '(version 3.0;acl "admin";allow (all)(userdn = "ldap:///{}");)'.format(NEWENTRY9_MODRDN),
+ '(targetattr = "mail") (version 3.0;acl "deny_mail";deny (write)(userdn = "ldap:///anyone");)',
+ '(targetattr = "uid") (version 3.0;acl "allow uid";allow (write)(userdn = "ldap:///{}");)'.format(NEWENTRY9_MODRDN)])
+ UserAccount(topo.standalone, NEWENTRY9_MODRDN).replace("userpassword", "Anuj")
+ useraccount = UserAccount(topo.standalone, NEWENTRY9_MODRDN)
+ useraccount.rename("uid=newrdnchnged")
+ assert 'uid=newrdnchnged,ou=People,dc=example,dc=com' == useraccount.dn
+
+
+def test_renaming_target_entry(topo, _add_user, aci_of_user):
+ """
+ Test for renaming target entry
+ :id: 6be1d33a-7932-11e8-9115-8c16451d917b
+ :setup: server
+ :steps:
+ 1. Add test entry
+ 2. Create a test user entry
+ 3.Create a new ou entry with an aci
+ 4. Make sure uid=$MYUID has the access
+ 5. Rename ou=OU0 to ou=OU1
+ 6. Create another ou=OU2
+ 7. Move ou=OU1 under ou=OU2
+ 8. Make sure uid=$MYUID still has the access
+ :expectedresults:
+ 1. Entry should be added
+ 2. Operation should succeed
+ 3. Operation should succeed
+ 4. Operation should succeed
+ 5. Operation should succeed
+ 6. Operation should succeed
+ 7. Operation should succeed
+ 8. Operation should succeed
+ """
+ properties = {
+ 'uid': 'TRAC340_MODRDN',
+ 'cn': 'TRAC340_MODRDN',
+ 'sn': 'user',
+ 'uidNumber': '1000',
+ 'gidNumber': '2000',
+ 'homeDirectory': '/home/' + 'TRAC340_MODRDN'
+ }
+ user = UserAccount(topo.standalone, 'cn=TRAC340_MODRDN,{}'.format(DEFAULT_SUFFIX))
+ user.create(properties=properties)
+ user.set("userPassword", "password")
+ ou = OrganizationalUnit(topo.standalone, 'ou=OU0,{}'.format(DEFAULT_SUFFIX))
+ ou.create(properties={'ou': 'OU0'})
+ ou.set('aci', '(targetattr=*)(version 3.0; acl "$MYUID";allow(read, search, compare) userdn = "ldap:///{}";)'.format(TRAC340_MODRDN))
+ conn = UserAccount(topo.standalone, TRAC340_MODRDN).bind(PW_DM)
+ assert OrganizationalUnits(conn, DEFAULT_SUFFIX).get('OU0')
+ # Test for renaming target entry
+ OrganizationalUnits(topo.standalone, DEFAULT_SUFFIX).get('OU0').rename("ou=OU1")
+ assert OrganizationalUnits(conn, DEFAULT_SUFFIX).get('OU1')
+ ou = OrganizationalUnit(topo.standalone, 'ou=OU2,{}'.format(DEFAULT_SUFFIX))
+ ou.create(properties={'ou': 'OU2'})
+ # Test for renaming target entry
+ OrganizationalUnits(topo.standalone, DEFAULT_SUFFIX).get('OU1').rename("ou=OU1", newsuperior=OU2_OU_MODRDN)
+ assert OrganizationalUnits(conn, DEFAULT_SUFFIX).get('OU1')
+
+
+if __name__ == "__main__":
+ CURRENT_FILE = os.path.realpath(__file__)
+ pytest.main("-s -v %s" % CURRENT_FILE)
| 0 |
230ace2aa657e931b4c6dfa742a28d072a8f5db7
|
389ds/389-ds-base
|
Ticket #48109 - substring index with nssubstrbegin: 1 is not being used with filters like (attr=x*)
Description: In case, index entry has this style of substr width definition:
nsMatchingRule: nsSubstr{Begin,Middle,End}=<NUM>
it should be converted to
nssubstr{Begin,Middle,End}: <NUM>
and skip the following nsMatchingRule evaluation. There was a bug in the
logic to skip. The feature itself was not effected, but this bogus error
was logged in the error log:
[..] from ldbm instance init: line 0: unknown or invalid matching rule
"nssubstrbegin=3" in index configuration (ignored)
Plus, the test script ticket48109_test.py is adjusted to the new format.
https://fedorahosted.org/389/ticket/48109
Reviewed by [email protected] (Thank you, Mark!!)
|
commit 230ace2aa657e931b4c6dfa742a28d072a8f5db7
Author: Noriko Hosoi <[email protected]>
Date: Wed Jun 15 14:59:56 2016 -0700
Ticket #48109 - substring index with nssubstrbegin: 1 is not being used with filters like (attr=x*)
Description: In case, index entry has this style of substr width definition:
nsMatchingRule: nsSubstr{Begin,Middle,End}=<NUM>
it should be converted to
nssubstr{Begin,Middle,End}: <NUM>
and skip the following nsMatchingRule evaluation. There was a bug in the
logic to skip. The feature itself was not effected, but this bogus error
was logged in the error log:
[..] from ldbm instance init: line 0: unknown or invalid matching rule
"nssubstrbegin=3" in index configuration (ignored)
Plus, the test script ticket48109_test.py is adjusted to the new format.
https://fedorahosted.org/389/ticket/48109
Reviewed by [email protected] (Thank you, Mark!!)
diff --git a/dirsrvtests/tests/tickets/ticket48109_test.py b/dirsrvtests/tests/tickets/ticket48109_test.py
index e4091e0fd..1d7a334d2 100644
--- a/dirsrvtests/tests/tickets/ticket48109_test.py
+++ b/dirsrvtests/tests/tickets/ticket48109_test.py
@@ -26,6 +26,13 @@ installation1_prefix = None
UID_INDEX = 'cn=uid,cn=index,cn=userRoot,cn=ldbm database,cn=plugins,cn=config'
+
+logging.getLogger(__name__).setLevel(logging.DEBUG)
+log = logging.getLogger(__name__)
+
+installation1_prefix = None
+
+
class TopologyStandalone(object):
def __init__(self, standalone):
standalone.open()
@@ -52,13 +59,18 @@ def topology(request):
standalone.create()
standalone.open()
+ # Delete each instance in the end
+ def fin():
+ standalone.delete()
+ request.addfinalizer(fin)
+
# Clear out the tmp dir
standalone.clearTmpDir(__file__)
return TopologyStandalone(standalone)
-def test_ticket48109_0(topology):
+def test_ticket48109(topology):
'''
Set SubStr lengths to cn=uid,cn=index,...
objectClass: extensibleObject
@@ -147,8 +159,6 @@ def test_ticket48109_0(topology):
log.error('Failed to delete substr lengths: error ' + e.message['desc'])
assert False
-
-def test_ticket48109_1(topology):
'''
Set SubStr lengths to cn=uid,cn=index,...
nsIndexType: sub
@@ -234,8 +244,6 @@ def test_ticket48109_1(topology):
log.error('Failed to delete substr lengths: error ' + e.message['desc'])
assert False
-
-def test_ticket48109_2(topology):
'''
Set SubStr conflict formats/lengths to cn=uid,cn=index,...
objectClass: extensibleObject
@@ -369,26 +377,11 @@ def test_ticket48109_2(topology):
except ldap.LDAPError as e:
log.error('Failed to delete substr lengths: error ' + e.message['desc'])
assert False
-
- log.info('Test complete')
-
-
-def test_ticket48109_final(topology):
- topology.standalone.delete()
log.info('Testcase PASSED')
-def run_isolated():
- global installation1_prefix
- installation1_prefix = None
-
- topo = topology(True)
- test_ticket48109_0(topo)
- test_ticket48109_1(topo)
- test_ticket48109_2(topo)
- test_ticket48109_final(topo)
-
-
if __name__ == '__main__':
- run_isolated()
-
+ # Run isolated
+ # -s for DEBUG mode
+ CURRENT_FILE = os.path.realpath(__file__)
+ pytest.main("-s %s" % CURRENT_FILE)
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_attr.c b/ldap/servers/slapd/back-ldbm/ldbm_attr.c
index 092b6b5d3..7e44cfe77 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_attr.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_attr.c
@@ -790,23 +790,23 @@ attr_index_config(
* nsMatchingRule: nsSubstrMiddle=2
* nsMatchingRule: nsSubstrEnd=2
*/
- if (!a->ai_substr_lens || !a->ai_substr_lens[INDEX_SUBSTRBEGIN]) {
- if (PL_strcasestr(attrValue->bv_val, INDEX_ATTR_SUBSTRBEGIN)) {
+ if (PL_strcasestr(attrValue->bv_val, INDEX_ATTR_SUBSTRBEGIN)) {
+ if (!a->ai_substr_lens || !a->ai_substr_lens[INDEX_SUBSTRBEGIN]) {
_set_attr_substrlen(INDEX_SUBSTRBEGIN, attrValue->bv_val, &substrlens);
- do_continue = 1; /* done with j - next j */
}
+ do_continue = 1; /* done with j - next j */
}
- if (!a->ai_substr_lens || !a->ai_substr_lens[INDEX_SUBSTRMIDDLE]) {
- if (PL_strcasestr(attrValue->bv_val, INDEX_ATTR_SUBSTRMIDDLE)) {
+ if (PL_strcasestr(attrValue->bv_val, INDEX_ATTR_SUBSTRMIDDLE)) {
+ if (!a->ai_substr_lens || !a->ai_substr_lens[INDEX_SUBSTRMIDDLE]) {
_set_attr_substrlen(INDEX_SUBSTRMIDDLE, attrValue->bv_val, &substrlens);
- do_continue = 1; /* done with j - next j */
}
+ do_continue = 1; /* done with j - next j */
}
- if (!a->ai_substr_lens || !a->ai_substr_lens[INDEX_SUBSTREND]) {
- if (PL_strcasestr(attrValue->bv_val, INDEX_ATTR_SUBSTREND)) {
+ if (PL_strcasestr(attrValue->bv_val, INDEX_ATTR_SUBSTREND)) {
+ if (!a->ai_substr_lens || !a->ai_substr_lens[INDEX_SUBSTREND]) {
_set_attr_substrlen(INDEX_SUBSTREND, attrValue->bv_val, &substrlens);
- do_continue = 1; /* done with j - next j */
}
+ do_continue = 1; /* done with j - next j */
}
/* check if this is a simple ordering specification
for an attribute that has no ordering matching rule */
| 0 |
c99d4c0741aca2fb0f42dd34e147f65011537cb4
|
389ds/389-ds-base
|
Coverity Fixes (Part 4)
11753 - Resource leak (aclparse.c)
11754 - Resource leak (dna.c)
11755 - Resource leak (dna.c)
11756 - Resource leak (linked_attrs.c)
11757 - Resource leak (pam_ptconfig.c)
11758 - Resource leak (repl5_replica_config.c)
11759 - Resource leak (windows_inc_protocol.c)
11760 - Resource leak (syntaxes/value.c)
11761 - Resource leak (dblayer.c)
11764 - Resource leak (dblayer.c)
11766 - Resource leak (dblayer.c)
11769 - Resource leak (entry.c)
11770 - Resource leak (entry.c)
11771 - Resource leak (entrywsi.c)
11772 - Resource leak (entrywsi.c)
11773 - Resource leak (schema.c)
11774 - Resource leak (snmp_collator.c)
11775 - Resource leak (ldclt/data.c)
11776 - Resource leak (tools/mmldif.c)
11777 - Resource leak (snmp/main.c)
11778 - Resource leak (lib/libutil/dbconf.c)
11779 - Resource leak (lib/libaccess/register.cpp)
11781 - Resource leak (lib/libadmin/error.c)
11872 - Resource leak (agtmmap.c)
https://bugzilla.redhat.com/show_bug.cgi?id=970221
Reviewed by: nhosoi(Thanks!)
|
commit c99d4c0741aca2fb0f42dd34e147f65011537cb4
Author: Mark Reynolds <[email protected]>
Date: Wed Jun 5 16:19:57 2013 -0400
Coverity Fixes (Part 4)
11753 - Resource leak (aclparse.c)
11754 - Resource leak (dna.c)
11755 - Resource leak (dna.c)
11756 - Resource leak (linked_attrs.c)
11757 - Resource leak (pam_ptconfig.c)
11758 - Resource leak (repl5_replica_config.c)
11759 - Resource leak (windows_inc_protocol.c)
11760 - Resource leak (syntaxes/value.c)
11761 - Resource leak (dblayer.c)
11764 - Resource leak (dblayer.c)
11766 - Resource leak (dblayer.c)
11769 - Resource leak (entry.c)
11770 - Resource leak (entry.c)
11771 - Resource leak (entrywsi.c)
11772 - Resource leak (entrywsi.c)
11773 - Resource leak (schema.c)
11774 - Resource leak (snmp_collator.c)
11775 - Resource leak (ldclt/data.c)
11776 - Resource leak (tools/mmldif.c)
11777 - Resource leak (snmp/main.c)
11778 - Resource leak (lib/libutil/dbconf.c)
11779 - Resource leak (lib/libaccess/register.cpp)
11781 - Resource leak (lib/libadmin/error.c)
11872 - Resource leak (agtmmap.c)
https://bugzilla.redhat.com/show_bug.cgi?id=970221
Reviewed by: nhosoi(Thanks!)
diff --git a/ldap/servers/plugins/acl/acl.h b/ldap/servers/plugins/acl/acl.h
index 6dbb68f32..d69db30c7 100644
--- a/ldap/servers/plugins/acl/acl.h
+++ b/ldap/servers/plugins/acl/acl.h
@@ -870,6 +870,7 @@ int acllist_moddn_aci_needsLock ( Slapi_DN *oldsdn, char *newdn );
void acllist_print_tree ( Avlnode *root, int *depth, char *start, char *side);
AciContainer *acllist_get_aciContainer_new ( );
void acllist_done_aciContainer ( AciContainer *);
+void free_targetattrfilters( Targetattrfilter ***attrFilterArray);
aclUserGroup* aclg_find_userGroup (const char *n_dn);
void aclg_regen_ugroup_signature( aclUserGroup *ugroup);
diff --git a/ldap/servers/plugins/acl/acllist.c b/ldap/servers/plugins/acl/acllist.c
index e8198af37..623a739a4 100644
--- a/ldap/servers/plugins/acl/acllist.c
+++ b/ldap/servers/plugins/acl/acllist.c
@@ -94,7 +94,6 @@ static int __acllist_add_aci ( aci_t *aci );
static int __acllist_aciContainer_node_cmp ( caddr_t d1, caddr_t d2 );
static int __acllist_aciContainer_node_dup ( caddr_t d1, caddr_t d2 );
static void __acllist_free_aciContainer ( AciContainer **container);
-static void free_targetattrfilters( Targetattrfilter ***input_attrFilterArray);
void my_print( Avlnode *root );
@@ -565,8 +564,9 @@ acllist_free_aci(aci_t *item)
slapi_ch_free ( (void **) &item );
}
-static void free_targetattrfilters( Targetattrfilter ***attrFilterArray) {
-
+void
+free_targetattrfilters( Targetattrfilter ***attrFilterArray)
+{
if (*attrFilterArray) {
int i = 0;
Targetattrfilter *attrfilter;
@@ -592,7 +592,6 @@ static void free_targetattrfilters( Targetattrfilter ***attrFilterArray) {
/* Now free the array */
slapi_ch_free ( (void **) attrFilterArray );
}
-
}
/* SEARCH */
diff --git a/ldap/servers/plugins/acl/aclparse.c b/ldap/servers/plugins/acl/aclparse.c
index 5941b2aec..bf027645f 100644
--- a/ldap/servers/plugins/acl/aclparse.c
+++ b/ldap/servers/plugins/acl/aclparse.c
@@ -1970,14 +1970,13 @@ static int __acl__init_targetattrfilters( aci_t *aci, char *input_str) {
* We need to put each component into a targetattrfilter component of
* the array.
*
-*/
-
+ */
static int process_filter_list( Targetattrfilter ***input_attrFilterArray,
char * input_str) {
char *str, *end_attr;
Targetattrfilter *attrfilter = NULL;
- int numattr=0;
+ int numattr=0, rc = 0;
Targetattrfilter **attrFilterArray = NULL;
str = input_str;
@@ -2009,22 +2008,20 @@ static int process_filter_list( Targetattrfilter ***input_attrFilterArray,
memset (attrfilter, 0, sizeof(Targetattrfilter));
if (strstr( str,":") != NULL) {
-
if ( __acl_init_targetattrfilter( attrfilter, str ) != 0 ) {
slapi_ch_free((void**)&attrfilter);
- return(ACL_SYNTAX_ERR);
+ rc = ACL_SYNTAX_ERR;
+ break;
}
} else {
slapi_ch_free((void**)&attrfilter);
- return(ACL_SYNTAX_ERR);
+ rc = ACL_SYNTAX_ERR;
+ break;
}
-
/*
- * Add the attrfilte to the targetAttrFilter list
- */
-
-
+ * Add the attrfilter to the targetAttrFilter list
+ */
attrFilterArray = (Targetattrfilter **) slapi_ch_realloc (
(void *) attrFilterArray,
((numattr+1)*sizeof(Targetattrfilter *)) );
@@ -2033,7 +2030,6 @@ static int process_filter_list( Targetattrfilter ***input_attrFilterArray,
/* Move on to the next attribute in the list */
str = end_attr;
-
}/* while */
/* NULL terminate the list */
@@ -2042,10 +2038,13 @@ static int process_filter_list( Targetattrfilter ***input_attrFilterArray,
(void *) attrFilterArray,
((numattr+1)*sizeof(Targetattrfilter *)) );
attrFilterArray[numattr] = NULL;
+ if(rc){
+ free_targetattrfilters(&attrFilterArray);
+ } else {
+ *input_attrFilterArray = attrFilterArray;
+ }
- *input_attrFilterArray = attrFilterArray;
- return 0;
-
+ return rc;
}
/*
diff --git a/ldap/servers/plugins/dna/dna.c b/ldap/servers/plugins/dna/dna.c
index e11dd995b..48d539f7d 100644
--- a/ldap/servers/plugins/dna/dna.c
+++ b/ldap/servers/plugins/dna/dna.c
@@ -1515,11 +1515,13 @@ dna_get_shared_servers(struct configEntry *config_entry, PRCList **servers)
* to lowest. */
struct dnaServer *sitem;
PRCList* item = PR_LIST_HEAD(*servers);
+ int inserted = 0;
while (item != *servers) {
sitem = (struct dnaServer *)item;
if (server->remaining > sitem->remaining) {
PR_INSERT_BEFORE(&(server->list), item);
+ inserted = 1;
break;
}
@@ -1528,9 +1530,13 @@ dna_get_shared_servers(struct configEntry *config_entry, PRCList **servers)
if (*servers == item) {
/* add to tail */
PR_INSERT_BEFORE(&(server->list), item);
+ inserted = 1;
break;
}
}
+ if(!inserted){
+ dna_free_shared_server(&server);
+ }
}
}
}
@@ -3340,6 +3346,7 @@ dna_pre_op(Slapi_PBlock * pb, int modtype)
bail:
if (resulting_e)
slapi_entry_free(resulting_e);
+ slapi_mods_free(&smods);
if (ret) {
slapi_log_error(SLAPI_LOG_PLUGIN, DNA_PLUGIN_SUBSYSTEM,
diff --git a/ldap/servers/plugins/linkedattrs/linked_attrs.c b/ldap/servers/plugins/linkedattrs/linked_attrs.c
index 4bea10f17..7d8370d65 100644
--- a/ldap/servers/plugins/linkedattrs/linked_attrs.c
+++ b/ldap/servers/plugins/linkedattrs/linked_attrs.c
@@ -755,6 +755,7 @@ linked_attrs_insert_config_index(struct configEntry *entry)
struct configEntry *config_entry = NULL;
struct configIndex *index_entry = NULL;
PRCList *list = PR_LIST_HEAD(g_managed_config_index);
+ int inserted = 0;
index_entry = (struct configIndex *)slapi_ch_calloc(1, sizeof(struct configIndex));
index_entry->config = entry;
@@ -769,6 +770,7 @@ linked_attrs_insert_config_index(struct configEntry *entry)
slapi_log_error(SLAPI_LOG_CONFIG, LINK_PLUGIN_SUBSYSTEM,
"store [%s] before [%s] \n", entry->dn,
config_entry->dn);
+ inserted = 1;
break;
}
@@ -779,6 +781,7 @@ linked_attrs_insert_config_index(struct configEntry *entry)
PR_INSERT_BEFORE(&(index_entry->list), list);
slapi_log_error(SLAPI_LOG_CONFIG, LINK_PLUGIN_SUBSYSTEM,
"store [%s] at tail\n", entry->dn);
+ inserted = 1;
break;
}
}
@@ -787,6 +790,10 @@ linked_attrs_insert_config_index(struct configEntry *entry)
slapi_log_error(SLAPI_LOG_CONFIG, LINK_PLUGIN_SUBSYSTEM,
"store [%s] at head \n", entry->dn);
PR_INSERT_LINK(&(index_entry->list), g_managed_config_index);
+ inserted = 1;
+ }
+ if(!inserted){
+ slapi_ch_free((void **)&index_entry);
}
}
diff --git a/ldap/servers/plugins/pam_passthru/pam_ptconfig.c b/ldap/servers/plugins/pam_passthru/pam_ptconfig.c
index fb2dc2bec..c40b87efb 100644
--- a/ldap/servers/plugins/pam_passthru/pam_ptconfig.c
+++ b/ldap/servers/plugins/pam_passthru/pam_ptconfig.c
@@ -602,6 +602,7 @@ pam_passthru_apply_config (Slapi_Entry* e)
PRCList *list;
Slapi_Attr *a = NULL;
char *filter_str = NULL;
+ int inserted = 0;
pam_ident_attr = slapi_entry_attr_get_charptr(e, PAMPT_PAM_IDENT_ATTR);
map_method = slapi_entry_attr_get_charptr(e, PAMPT_MAP_METHOD_ATTR);
@@ -688,6 +689,7 @@ pam_passthru_apply_config (Slapi_Entry* e)
PR_INSERT_BEFORE(&(entry->list), list);
slapi_log_error(SLAPI_LOG_CONFIG, PAM_PASSTHRU_PLUGIN_SUBSYSTEM,
"store [%s] at tail\n", entry->dn);
+ inserted = 1;
break;
}
}
@@ -696,9 +698,13 @@ pam_passthru_apply_config (Slapi_Entry* e)
PR_INSERT_LINK(&(entry->list), pam_passthru_global_config);
slapi_log_error(SLAPI_LOG_CONFIG, PAM_PASSTHRU_PLUGIN_SUBSYSTEM,
"store [%s] at head \n", entry->dn);
+ inserted = 1;
}
bail:
+ if(!inserted){
+ pam_passthru_free_config_entry(&entry);
+ }
slapi_ch_free_string(&new_service);
slapi_ch_free_string(&map_method);
slapi_ch_free_string(&pam_ident_attr);
diff --git a/ldap/servers/plugins/replication/repl5_replica_config.c b/ldap/servers/plugins/replication/repl5_replica_config.c
index cc0a93f1f..7d83c99fa 100644
--- a/ldap/servers/plugins/replication/repl5_replica_config.c
+++ b/ldap/servers/plugins/replication/repl5_replica_config.c
@@ -1847,7 +1847,7 @@ check_replicas_are_done_cleaning(cleanruv_data *data )
{
Object *agmt_obj;
Repl_Agmt *agmt;
- char *csnstr = NULL;
+ char csnstr[CSN_STRSIZE];
char *filter = NULL;
int not_all_cleaned = 1;
int interval = 10;
@@ -1890,7 +1890,6 @@ check_replicas_are_done_cleaning(cleanruv_data *data )
interval = 14400;
}
}
- slapi_ch_free_string(&csnstr);
slapi_ch_free_string(&filter);
}
diff --git a/ldap/servers/plugins/replication/windows_inc_protocol.c b/ldap/servers/plugins/replication/windows_inc_protocol.c
index cf979cc0d..e503f30b3 100644
--- a/ldap/servers/plugins/replication/windows_inc_protocol.c
+++ b/ldap/servers/plugins/replication/windows_inc_protocol.c
@@ -1024,6 +1024,7 @@ windows_inc_run(Private_Repl_Protocol *prp)
windows_conn_cancel_linger(prp->conn);
/* ... and disconnect, if currently connected */
windows_conn_disconnect(prp->conn);
+ ruv_destroy ( &ruv );
LDAPDebug0Args( LDAP_DEBUG_TRACE, "<= windows_inc_run\n" );
}
diff --git a/ldap/servers/plugins/syntaxes/value.c b/ldap/servers/plugins/syntaxes/value.c
index efef9a853..0335a89e4 100644
--- a/ldap/servers/plugins/syntaxes/value.c
+++ b/ldap/servers/plugins/syntaxes/value.c
@@ -282,10 +282,13 @@ value_cmp(
value_normalize_ext( v1->bv_val, syntax,
1 /* trim leading blanks */, &alt );
if (alt) {
+ int inserted = 0;
+
if (free_v1) {
slapi_ch_free_string(&v1->bv_val);
v1->bv_val = alt;
v1->bv_len = strlen(alt);
+ inserted = 1;
} else {
if (strlen(alt) < buffer_space) {
v1->bv_len = strlen(alt);
@@ -297,8 +300,12 @@ value_cmp(
v1 = (struct berval *)slapi_ch_malloc(sizeof(struct berval));
v1->bv_val = alt;
v1->bv_len = strlen(alt);
+ inserted = 1;
}
}
+ if(!inserted){
+ slapi_ch_free_string(&alt);
+ }
}
if (!free_v1) {
buffer_space -= v1->bv_len + 1;
@@ -320,10 +327,13 @@ value_cmp(
value_normalize_ext( v2->bv_val, syntax,
1 /* trim leading blanks */, &alt );
if (alt) {
+ int inserted = 0;
+
if (free_v2) {
slapi_ch_free_string(&v2->bv_val);
v2->bv_val = alt;
v2->bv_len = strlen(alt);
+ inserted = 1;
} else {
if (strlen(alt) < buffer_space) {
v2->bv_len = strlen(alt);
@@ -335,8 +345,12 @@ value_cmp(
v2 = (struct berval *)slapi_ch_malloc(sizeof(struct berval));
v2->bv_val = alt;
v2->bv_len = strlen(alt);
+ inserted = 1;
}
}
+ if(!inserted){
+ slapi_ch_free_string(&alt);
+ }
}
if (!free_v2) {
buffer_space -= v2->bv_len + 1;
diff --git a/ldap/servers/slapd/agtmmap.c b/ldap/servers/slapd/agtmmap.c
index f18138a5f..392254720 100644
--- a/ldap/servers/slapd/agtmmap.c
+++ b/ldap/servers/slapd/agtmmap.c
@@ -196,6 +196,7 @@ agt_mopen_stats (char * statsfile, int mode, int *hdl)
}
if(fstat (fd, &fileinfo) != 0){
+ close(fd);
rc = errno;
goto bail;
}
diff --git a/ldap/servers/slapd/back-ldbm/dblayer.c b/ldap/servers/slapd/back-ldbm/dblayer.c
index 2f05668a5..e93f79a18 100644
--- a/ldap/servers/slapd/back-ldbm/dblayer.c
+++ b/ldap/servers/slapd/back-ldbm/dblayer.c
@@ -1980,7 +1980,7 @@ dblayer_get_id2entry_size(ldbm_instance *inst)
char *id2entry_file = NULL;
PRFileInfo64 info;
int rc;
- char inst_dir[MAXPATHLEN], *inst_dirp;
+ char inst_dir[MAXPATHLEN], *inst_dirp = NULL;
if (NULL == inst) {
return 0;
@@ -1989,6 +1989,9 @@ dblayer_get_id2entry_size(ldbm_instance *inst)
inst_dirp = dblayer_get_full_inst_dir(li, inst, inst_dir, MAXPATHLEN);
id2entry_file = slapi_ch_smprintf("%s/%s", inst_dirp,
ID2ENTRY LDBM_FILENAME_SUFFIX);
+ if(inst_dirp != inst_dir){
+ slapi_ch_free_string(&inst_dirp);
+ }
rc = PR_GetFileInfo64(id2entry_file, &info);
slapi_ch_free_string(&id2entry_file);
if (rc) {
@@ -3160,6 +3163,9 @@ dblayer_open_file(backend *be, char* indexname, int open_flag,
}
abs_file_name = slapi_ch_smprintf("%s%c%s",
inst_dirp, get_sep(inst_dirp), file_name);
+ if (inst_dirp != inst_dir){
+ slapi_ch_free_string(&inst_dirp);
+ }
DB_OPEN(pENV->dblayer_openflags,
dbp, NULL/* txnid */, abs_file_name, subname, DB_BTREE,
open_flags, priv->dblayer_file_mode, return_value);
@@ -3175,8 +3181,6 @@ dblayer_open_file(backend *be, char* indexname, int open_flag,
goto out;
slapi_ch_free_string(&abs_file_name);
- if (inst_dirp != inst_dir)
- slapi_ch_free_string(&inst_dirp);
}
DB_OPEN(pENV->dblayer_openflags,
dbp, NULL, /* txnid */ rel_path, subname, DB_BTREE,
diff --git a/ldap/servers/slapd/entry.c b/ldap/servers/slapd/entry.c
index 06815fb85..ea86fcc5a 100644
--- a/ldap/servers/slapd/entry.c
+++ b/ldap/servers/slapd/entry.c
@@ -574,6 +574,7 @@ str2entry_fast( const char *rawdn, const Slapi_RDN *srdn, char *s, int flags, in
}
done:
+ csnset_free(&valuecsnset);
csn_free(&attributedeletioncsn);
csn_free(&maxcsn);
LDAPDebug( LDAP_DEBUG_TRACE, "<= str2entry_fast 0x%x\n",
@@ -766,6 +767,7 @@ str2entry_dupcheck( const char *rawdn, char *s, int flags, int read_stateinfo )
( 0 != ( flags & SLAPI_STR2ENTRY_REMOVEDUPVALS ));
Slapi_Value *value = 0;
CSN *attributedeletioncsn= NULL;
+ CSNSet *valuecsnset= NULL;
CSN *maxcsn= NULL;
char *normdn = NULL;
int strict = 0;
@@ -786,7 +788,6 @@ str2entry_dupcheck( const char *rawdn, char *s, int flags, int read_stateinfo )
}
while ( (s = ldif_getline( &next )) != NULL )
{
- CSNSet *valuecsnset= NULL;
int value_state= VALUE_NOTFOUND;
int attr_state= VALUE_NOTFOUND;
int freeval = 0;
@@ -842,6 +843,7 @@ str2entry_dupcheck( const char *rawdn, char *s, int flags, int read_stateinfo )
"str2entry_dupcheck: Invalid DN: %s\n", rawdn);
slapi_entry_free( e );
if (freeval) slapi_ch_free_string(&bvvalue.bv_val);
+ csnset_free(&valuecsnset);
csn_free(&attributedeletioncsn);
csn_free(&maxcsn);
return NULL;
@@ -864,6 +866,7 @@ str2entry_dupcheck( const char *rawdn, char *s, int flags, int read_stateinfo )
"str2entry_dupcheck: Invalid DN: %s\n", rawdn);
slapi_entry_free( e );
if (freeval) slapi_ch_free_string(&bvvalue.bv_val);
+ csnset_free(&valuecsnset);
csn_free(&attributedeletioncsn);
csn_free(&maxcsn);
return NULL;
@@ -977,6 +980,7 @@ str2entry_dupcheck( const char *rawdn, char *s, int flags, int read_stateinfo )
if (freeval) slapi_ch_free_string(&bvvalue.bv_val);
csn_free(&attributedeletioncsn);
csn_free(&maxcsn);
+ csnset_free(&valuecsnset);
return NULL;
}
for ( i = 0; i < nattrs; i++ )
@@ -1319,6 +1323,7 @@ free_and_return:
}
slapi_ch_free((void **) &dyn_attrs );
if (value) slapi_value_free(&value);
+ csnset_free(&valuecsnset);
csn_free(&attributedeletioncsn);
csn_free(&maxcsn);
diff --git a/ldap/servers/slapd/entrywsi.c b/ldap/servers/slapd/entrywsi.c
index 411fb6fce..00de19338 100644
--- a/ldap/servers/slapd/entrywsi.c
+++ b/ldap/servers/slapd/entrywsi.c
@@ -447,18 +447,21 @@ entry_add_present_values_wsi(Slapi_Entry *e, const char *type, struct berval **b
}
return retVal;
}
+
static int
entry_add_present_values_wsi_single_valued(Slapi_Entry *e, const char *type, struct berval **bervals, const CSN *csn, int urp, long flags)
{
int retVal= LDAP_SUCCESS;
Slapi_Value **valuestoadd = NULL;
+
valuearray_init_bervalarray(bervals,&valuestoadd); /* JCM SLOW FUNCTION */
if(!valuearray_isempty(valuestoadd))
{
Slapi_Attr *a= NULL;
long a_flags_orig;
int attr_state= entry_attr_find_wsi(e, type, &a);
- a_flags_orig = a->a_flags;
+
+ a_flags_orig = a->a_flags;
a->a_flags |= flags;
/* Check if the type of the to-be-added values has DN syntax or not. */
if (slapi_attr_is_dn_syntax_attr(a)) {
@@ -476,7 +479,6 @@ entry_add_present_values_wsi_single_valued(Slapi_Entry *e, const char *type, str
valuearray_update_csn (valuestoadd,CSN_TYPE_VALUE_UPDATED,csn);
valueset_add_valuearray_ext(&a->a_present_values, valuestoadd, SLAPI_VALUE_FLAG_PASSIN);
slapi_ch_free ( (void **)&valuestoadd );
-
/*
* Now delete non-RDN values from a->a_present_values; and
* restore possible RDN values from a->a_deleted_values
@@ -489,16 +491,16 @@ entry_add_present_values_wsi_single_valued(Slapi_Entry *e, const char *type, str
Slapi_Value **deletedvalues= NULL;
switch(attr_state)
{
- case ATTRIBUTE_PRESENT:
- /* The attribute is already on the present list */
- break;
- case ATTRIBUTE_DELETED:
- /* Move the deleted attribute onto the present list */
- entry_deleted_attribute_to_present_attribute(e, a);
- break;
- case ATTRIBUTE_NOTFOUND:
- /* No-op - attribute was initialized & added to entry above */
- break;
+ case ATTRIBUTE_PRESENT:
+ /* The attribute is already on the present list */
+ break;
+ case ATTRIBUTE_DELETED:
+ /* Move the deleted attribute onto the present list */
+ entry_deleted_attribute_to_present_attribute(e, a);
+ break;
+ case ATTRIBUTE_NOTFOUND:
+ /* No-op - attribute was initialized & added to entry above */
+ break;
}
/* Check if any of the values to be added are on the deleted list */
valueset_remove_valuearray(&a->a_deleted_values,
@@ -522,24 +524,28 @@ entry_add_present_values_wsi_single_valued(Slapi_Entry *e, const char *type, str
}
valuearray_update_csn(valuestoadd,CSN_TYPE_VALUE_UPDATED,csn);
retVal= attr_add_valuearray(a, valuestoadd, slapi_entry_get_dn_const(e));
- valuearray_free(&valuestoadd);
}
a->a_flags = a_flags_orig;
}
+ valuearray_free(&valuestoadd);
+
return(retVal);
}
+
static int
entry_add_present_values_wsi_multi_valued(Slapi_Entry *e, const char *type, struct berval **bervals, const CSN *csn, int urp, long flags)
{
int retVal= LDAP_SUCCESS;
Slapi_Value **valuestoadd = NULL;
+
valuearray_init_bervalarray(bervals,&valuestoadd); /* JCM SLOW FUNCTION */
if(!valuearray_isempty(valuestoadd))
{
- Slapi_Attr *a= NULL;
+ Slapi_Attr *a = NULL;
long a_flags_orig;
- int attr_state= entry_attr_find_wsi(e, type, &a);
- a_flags_orig = a->a_flags;
+ int attr_state = entry_attr_find_wsi(e, type, &a);
+
+ a_flags_orig = a->a_flags;
a->a_flags |= flags;
/* Check if the type of the to-be-added values has DN syntax or not. */
if (slapi_attr_is_dn_syntax_attr(a)) {
@@ -577,18 +583,19 @@ entry_add_present_values_wsi_multi_valued(Slapi_Entry *e, const char *type, stru
else
{
Slapi_Value **deletedvalues= NULL;
+
switch(attr_state)
{
- case ATTRIBUTE_PRESENT:
- /* The attribute is already on the present list */
- break;
- case ATTRIBUTE_DELETED:
- /* Move the deleted attribute onto the present list */
- entry_deleted_attribute_to_present_attribute(e, a);
- break;
- case ATTRIBUTE_NOTFOUND:
- /* No-op - attribute was initialized & added to entry above */
- break;
+ case ATTRIBUTE_PRESENT:
+ /* The attribute is already on the present list */
+ break;
+ case ATTRIBUTE_DELETED:
+ /* Move the deleted attribute onto the present list */
+ entry_deleted_attribute_to_present_attribute(e, a);
+ break;
+ case ATTRIBUTE_NOTFOUND:
+ /* No-op - attribute was initialized & added to entry above */
+ break;
}
/* Check if any of the values to be added are on the deleted list */
valueset_remove_valuearray(&a->a_deleted_values,
@@ -612,10 +619,11 @@ entry_add_present_values_wsi_multi_valued(Slapi_Entry *e, const char *type, stru
}
valuearray_update_csn(valuestoadd,CSN_TYPE_VALUE_UPDATED,csn);
retVal= attr_add_valuearray(a, valuestoadd, slapi_entry_get_dn_const(e));
- valuearray_free(&valuestoadd);
}
a->a_flags = a_flags_orig;
}
+ valuearray_free(&valuestoadd);
+
return(retVal);
}
diff --git a/ldap/servers/slapd/schema.c b/ldap/servers/slapd/schema.c
index fa7e5aadd..49626132d 100644
--- a/ldap/servers/slapd/schema.c
+++ b/ldap/servers/slapd/schema.c
@@ -3238,6 +3238,7 @@ read_oc_ldif ( const char *input, struct objclass **oc, char *errorbuf,
pnew_oc->oc_kind = kind;
*oc = pnew_oc;
+
return read_oc_ldif_return( LDAP_SUCCESS, pOcOid, psbOcName, pOcSup, pOcDesc );
}
@@ -4750,7 +4751,9 @@ load_schema_dse(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *ignored,
if ( LDAP_SUCCESS != (*returncode = parse_oc_str(s, &oc, returntext,
SLAPI_DSE_RETURNTEXT_SIZE, flags,
primary_file /* force user defined? */,
- schema_ds4x_compat))) {
+ schema_ds4x_compat)))
+ {
+ oc_free( &oc );
break;
}
if (flags & DSE_SCHEMA_NO_LOAD)
diff --git a/ldap/servers/slapd/snmp_collator.c b/ldap/servers/slapd/snmp_collator.c
index aeac7ae57..b3d072ac4 100644
--- a/ldap/servers/slapd/snmp_collator.c
+++ b/ldap/servers/slapd/snmp_collator.c
@@ -738,8 +738,10 @@ snmp_update_cache_stats()
/* set the cache hits/cache entries info */
be = slapi_get_first_backend(&cookie);
- if (!be)
+ if (!be){
+ slapi_ch_free ((void **) &cookie);
return;
+ }
be_next = slapi_get_next_backend(cookie);
diff --git a/ldap/servers/slapd/tools/ldclt/data.c b/ldap/servers/slapd/tools/ldclt/data.c
index e0eac55e5..f6dd4ef47 100644
--- a/ldap/servers/slapd/tools/ldclt/data.c
+++ b/ldap/servers/slapd/tools/ldclt/data.c
@@ -335,6 +335,8 @@ exit:
#ifdef _WIN32
if (findPath) free (findPath);
#endif
+ close(fd);
+
return rc;
}
diff --git a/ldap/servers/slapd/tools/mmldif.c b/ldap/servers/slapd/tools/mmldif.c
index 6d6233887..fb97129f5 100644
--- a/ldap/servers/slapd/tools/mmldif.c
+++ b/ldap/servers/slapd/tools/mmldif.c
@@ -735,6 +735,7 @@ readrec(edfFILE * edf1, attrib1_t ** attrib)
attrib1_t * newlist = NULL;
attrib1_t * a;
int ignore_rec = FALSE;
+ int free_it = 0;
*attrib = NULL;
if (edf1->end) {
@@ -759,6 +760,9 @@ readrec(edfFILE * edf1, attrib1_t ** attrib)
/* that's for the case where the file */
/* has a trailing blank line */
freefreelist(freelist);
+ if(free_it){
+ freefreelist(att);
+ }
return IDDS_MM_EOF;
}
break; /* return */
@@ -790,9 +794,11 @@ readrec(edfFILE * edf1, attrib1_t ** attrib)
continue;
if (!freelist) {
att = (attrib1_t *)malloc(sizeof(attrib1_t));
+ free_it = 1;
} else {
att = freelist;
freelist = freelist->next;
+ free_it = 0;
}
att->namelen = vptr-line;
diff --git a/ldap/servers/snmp/main.c b/ldap/servers/snmp/main.c
index 42dd76b0a..9a60f7089 100644
--- a/ldap/servers/snmp/main.c
+++ b/ldap/servers/snmp/main.c
@@ -311,6 +311,9 @@ load_config(char *conf_path)
/* load agentx-master setting */
p = p + 13;
if ((p = strtok(p, " \t\n")) != NULL) {
+ if (agentx_master){
+ free(agentx_master);
+ }
if ((agentx_master = (char *) malloc(strlen(p) + 1)) != NULL)
strcpy(agentx_master, p);
}
diff --git a/lib/ldaputil/dbconf.c b/lib/ldaputil/dbconf.c
index 746afc9ad..e6de3b77d 100644
--- a/lib/ldaputil/dbconf.c
+++ b/lib/ldaputil/dbconf.c
@@ -263,7 +263,10 @@ static int dbconf_parse_propval (char *buf, char *ptr,
/* Success - we have prop & val */
propval = (DBPropVal_t *)malloc(sizeof(DBPropVal_t));
- if (!propval) return LDAPU_ERR_OUT_OF_MEMORY;
+ if (!propval){
+ if (encval) free(val);
+ return LDAPU_ERR_OUT_OF_MEMORY;
+ }
memset((void *)propval, 0, sizeof(DBPropVal_t));
propval->prop = strdup(prop);
propval->val = val ? strdup(val) : 0;
@@ -273,7 +276,7 @@ static int dbconf_parse_propval (char *buf, char *ptr,
return LDAPU_ERR_OUT_OF_MEMORY;
}
- if (encval) free(val); /* val was allocated by dbconf_decodeval */
+ if(encval) free(val); /* val was allocated by dbconf_decodeval */
insert_dbinfo_propval(db_info, propval);
return LDAPU_SUCCESS;
diff --git a/lib/libaccess/register.cpp b/lib/libaccess/register.cpp
index 3f2acc5eb..766436e63 100644
--- a/lib/libaccess/register.cpp
+++ b/lib/libaccess/register.cpp
@@ -791,6 +791,7 @@ ACL_AttrGetterRegister(NSErr_t *errp, const char *attr, ACLAttrGetterFn_t fn,
if (*hep == 0) { /* New entry */
PR_INIT_CLIST(&getter->list);
if (NULL == PR_HashTableAdd(ACLAttrGetterHash, attr, (void *)getter)) {
+ FREE(getter);
ACL_CritExit();
return -1;
}
diff --git a/lib/libadmin/error.c b/lib/libadmin/error.c
index c0a1f8031..4ba98036c 100644
--- a/lib/libadmin/error.c
+++ b/lib/libadmin/error.c
@@ -126,10 +126,13 @@ NSAPI_PUBLIC void output_alert(int type, char *info, char *details, int wait)
fprintf(stdout, "%s:%s\\n%s", error_headers[type], info, wrapped);
if(type==FILE_ERROR || type==SYSTEM_ERROR) {
err = get_error();
- if(err != 0)
+ if(err != 0){
+ char *err_str = verbose_error();
fprintf(stdout,
"\\n\\nThe system returned error number %d, "
- "which is %s.", err, verbose_error());
+ "which is %s.", err, err_str);
+ FREE(err_str);
+ }
}
fprintf(stdout, "\");");
| 0 |
04635e402bbef8ef60dfd0e721efd5cea8bf61ac
|
389ds/389-ds-base
|
Ticket 49249 - cos_cache is erroneously logging schema checking failure
Bug Description:
cos is generating virtual attributes in several steps.
One of the first step is to check that the generated attribute will
conform the schema.
Then additional checks (override/merge and cos scope) are performed.
If the entry does not conform the schema, it skips the additional checks.
In such case it logs a message stating that the virtual attribute does not
apply.
During slapi-log-err refactoring (https://pagure.io/389-ds-base/issue/48978)
the logging level, in case of schema violation, was move from SLAPI_LOG_PLUGIN
to SLAPI_LOG_ERR.
This change is incorrect because the potential failure to schema check is
normal and does not imply the cos would apply to the entry (for example if
the entry was not in the scope, the cos would also be skipped).
Fix Description:
Move back the logging level from SLAPI_LOG_ERR to SLAPI_LOG_PLUGIN
https://pagure.io/389-ds-base/issue/49249
Reviewed by: Mark Reynolds
Platforms tested: F23
Flag Day: no
Doc impact: no
|
commit 04635e402bbef8ef60dfd0e721efd5cea8bf61ac
Author: Thierry Bordaz <[email protected]>
Date: Thu May 11 09:21:38 2017 +0200
Ticket 49249 - cos_cache is erroneously logging schema checking failure
Bug Description:
cos is generating virtual attributes in several steps.
One of the first step is to check that the generated attribute will
conform the schema.
Then additional checks (override/merge and cos scope) are performed.
If the entry does not conform the schema, it skips the additional checks.
In such case it logs a message stating that the virtual attribute does not
apply.
During slapi-log-err refactoring (https://pagure.io/389-ds-base/issue/48978)
the logging level, in case of schema violation, was move from SLAPI_LOG_PLUGIN
to SLAPI_LOG_ERR.
This change is incorrect because the potential failure to schema check is
normal and does not imply the cos would apply to the entry (for example if
the entry was not in the scope, the cos would also be skipped).
Fix Description:
Move back the logging level from SLAPI_LOG_ERR to SLAPI_LOG_PLUGIN
https://pagure.io/389-ds-base/issue/49249
Reviewed by: Mark Reynolds
Platforms tested: F23
Flag Day: no
Doc impact: no
diff --git a/dirsrvtests/tests/tickets/ticket49249_test.py b/dirsrvtests/tests/tickets/ticket49249_test.py
new file mode 100644
index 000000000..1dfd07eee
--- /dev/null
+++ b/dirsrvtests/tests/tickets/ticket49249_test.py
@@ -0,0 +1,140 @@
+import time
+import ldap
+import logging
+import pytest
+from lib389 import DirSrv, Entry, tools, tasks
+from lib389.tools import DirSrvTools
+from lib389._constants import *
+from lib389.properties import *
+from lib389.tasks import *
+from lib389.utils import *
+from lib389.topologies import topology_st as topo
+
+DEBUGGING = os.getenv("DEBUGGING", default=False)
+if DEBUGGING:
+ logging.getLogger(__name__).setLevel(logging.DEBUG)
+else:
+ logging.getLogger(__name__).setLevel(logging.INFO)
+log = logging.getLogger(__name__)
+
+COS_BRANCH = 'ou=cos_scope,' + DEFAULT_SUFFIX
+COS_DEF = 'cn=cos_definition,' + COS_BRANCH
+COS_TEMPLATE = 'cn=cos_template,' + COS_BRANCH
+INVALID_USER_WITH_COS = 'cn=cos_user_no_mail,' + COS_BRANCH
+VALID_USER_WITH_COS = 'cn=cos_user_with_mail,' + COS_BRANCH
+
+NO_COS_BRANCH = 'ou=no_cos_scope,' + DEFAULT_SUFFIX
+INVALID_USER_WITHOUT_COS = 'cn=no_cos_user_no_mail,' + NO_COS_BRANCH
+VALID_USER_WITHOUT_COS = 'cn=no_cos_user_with_mail,' + NO_COS_BRANCH
+
+def test_ticket49249(topo):
+ """Write your testcase here...
+
+ Also, if you need any testcase initialization,
+ please, write additional fixture for that(include finalizer).
+ """
+ # Add the branches
+ try:
+ topo.standalone.add_s(Entry((COS_BRANCH, {
+ 'objectclass': 'top extensibleObject'.split(),
+ 'ou': 'cos_scope'
+ })))
+ except ldap.LDAPError as e:
+ log.error('Failed to add cos_scope: error ' + e.message['desc'])
+ assert False
+
+ try:
+ topo.standalone.add_s(Entry((NO_COS_BRANCH, {
+ 'objectclass': 'top extensibleObject'.split(),
+ 'ou': 'no_cos_scope'
+ })))
+ except ldap.LDAPError as e:
+ log.error('Failed to add no_cos_scope: error ' + e.message['desc'])
+ assert False
+
+ try:
+ topo.standalone.add_s(Entry((COS_TEMPLATE, {
+ 'objectclass': 'top ldapsubentry costemplate extensibleObject'.split(),
+ 'cn': 'cos_template',
+ 'cosPriority': '1',
+ 'cn': 'cn=nsPwTemplateEntry,ou=level1,dc=example,dc=com',
+ 'mailAlternateAddress': 'hello@world'
+ })))
+ except ldap.LDAPError as e:
+ log.error('Failed to add cos_template: error ' + e.message['desc'])
+ assert False
+
+ try:
+ topo.standalone.add_s(Entry((COS_DEF, {
+ 'objectclass': 'top ldapsubentry cosSuperDefinition cosPointerDefinition'.split(),
+ 'cn': 'cos_definition',
+ 'costemplatedn': COS_TEMPLATE,
+ 'cosAttribute': 'mailAlternateAddress default'
+ })))
+ except ldap.LDAPError as e:
+ log.error('Failed to add cos_definition: error ' + e.message['desc'])
+ assert False
+
+ try:
+ # This entry is not allowed to have mailAlternateAddress
+ topo.standalone.add_s(Entry((INVALID_USER_WITH_COS, {
+ 'objectclass': 'top person'.split(),
+ 'cn': 'cos_user_no_mail',
+ 'sn': 'cos_user_no_mail'
+ })))
+ except ldap.LDAPError as e:
+ log.error('Failed to add cos_user_no_mail: error ' + e.message['desc'])
+ assert False
+
+ try:
+ # This entry is allowed to have mailAlternateAddress
+ topo.standalone.add_s(Entry((VALID_USER_WITH_COS, {
+ 'objectclass': 'top mailGroup'.split(),
+ 'cn': 'cos_user_with_mail'
+ })))
+ except ldap.LDAPError as e:
+ log.error('Failed to add cos_user_no_mail: error ' + e.message['desc'])
+ assert False
+
+ try:
+ # This entry is not allowed to have mailAlternateAddress
+ topo.standalone.add_s(Entry((INVALID_USER_WITHOUT_COS, {
+ 'objectclass': 'top person'.split(),
+ 'cn': 'no_cos_user_no_mail',
+ 'sn': 'no_cos_user_no_mail'
+ })))
+ except ldap.LDAPError as e:
+ log.error('Failed to add no_cos_user_no_mail: error ' + e.message['desc'])
+ assert False
+
+ try:
+ # This entry is allowed to have mailAlternateAddress
+ topo.standalone.add_s(Entry((VALID_USER_WITHOUT_COS, {
+ 'objectclass': 'top mailGroup'.split(),
+ 'cn': 'no_cos_user_with_mail'
+ })))
+ except ldap.LDAPError as e:
+ log.error('Failed to add no_cos_user_with_mail: error ' + e.message['desc'])
+ assert False
+
+ try:
+ entries = topo.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, '(mailAlternateAddress=*)')
+ assert len(entries) == 1
+ assert entries[0].hasValue('mailAlternateAddress', 'hello@world')
+ except ldap.LDAPError as e:
+ log.fatal('Unable to retrieve cos_user_with_mail (only entry with mailAlternateAddress) : error %s' % (USER1_DN, e.message['desc']))
+ assert False
+
+ assert not topo.standalone.ds_error_log.match(".*cos attribute mailAlternateAddress failed schema.*")
+
+ if DEBUGGING:
+ # Add debugging steps(if any)...
+ pass
+
+
+if __name__ == '__main__':
+ # Run isolated
+ # -s for DEBUG mode
+ CURRENT_FILE = os.path.realpath(__file__)
+ pytest.main("-s %s" % CURRENT_FILE)
+
diff --git a/ldap/servers/plugins/cos/cos_cache.c b/ldap/servers/plugins/cos/cos_cache.c
index 00cae9cef..25bee61bb 100644
--- a/ldap/servers/plugins/cos/cos_cache.c
+++ b/ldap/servers/plugins/cos/cos_cache.c
@@ -2380,7 +2380,7 @@ static int cos_cache_query_attr(cos_cache *ptheCache, vattr_context *context,
if(!cos_cache_schema_check(pCache, attr_index, pObjclasses))
{
- slapi_log_err(SLAPI_LOG_ERR, COS_PLUGIN_SUBSYSTEM, "cos_cache_query_attr - cos attribute %s failed schema check on dn: %s\n",type,pDn);
+ slapi_log_err(SLAPI_LOG_PLUGIN, COS_PLUGIN_SUBSYSTEM, "cos_cache_query_attr - cos attribute %s failed schema check on dn: %s\n",type,pDn);
goto bail;
}
}
| 0 |
50c377b902c35d3167080c0277b8669e377be0d0
|
389ds/389-ds-base
|
Resolves: 435730
Summary: Allow fractional replication between masters.
|
commit 50c377b902c35d3167080c0277b8669e377be0d0
Author: Nathan Kinder <[email protected]>
Date: Mon Mar 3 18:35:11 2008 +0000
Resolves: 435730
Summary: Allow fractional replication between masters.
diff --git a/ldap/servers/plugins/replication/repl5_protocol_util.c b/ldap/servers/plugins/replication/repl5_protocol_util.c
index dafd77742..4a4114d71 100644
--- a/ldap/servers/plugins/replication/repl5_protocol_util.c
+++ b/ldap/servers/plugins/replication/repl5_protocol_util.c
@@ -378,27 +378,6 @@ acquire_replica(Private_Repl_Protocol *prp, char *prot_oid, RUV **ruv)
default:
return_value = ACQUIRE_FATAL_ERROR;
}
- /* Now check for fractional compatibility with the replica
- * We need to do the check now because prior to acquiring the
- * replica we do not have sufficient access rights to read the replica id
- */
- /* Check if this is a fractional agreement, we need to
- * verify that the consumer is read-only */
- if ((return_value == ACQUIRE_SUCCESS) &&
- agmt_is_fractional(prp->agmt)) {
- crc = conn_replica_is_readonly(conn);
- if (CONN_IS_NOT_READONLY == crc) {
- /* This is a fatal error */
- slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name,
- "%s: Unable to acquire replica: "
- "the agreement is fractional but the replica is not read-only. Fractional agreements must specify a read-only replica "
- "Replication is aborting.\n",
- agmt_get_long_name(prp->agmt));
- prp->last_acquire_response_code = NSDS50_REPL_INTERNAL_ERROR;
- return_value = ACQUIRE_FATAL_ERROR;
- goto error;
- }
- }
}
else
{
| 0 |
4a045f0c371ae588c39cdfe1cbdb4ee048b0aeac
|
389ds/389-ds-base
|
Resolves: bug 454030
Description: Need to address 64-bit compiler warnings - part 1
Fix Description: missed one line from my previous commit
|
commit 4a045f0c371ae588c39cdfe1cbdb4ee048b0aeac
Author: Rich Megginson <[email protected]>
Date: Thu Oct 9 14:57:32 2008 +0000
Resolves: bug 454030
Description: Need to address 64-bit compiler warnings - part 1
Fix Description: missed one line from my previous commit
diff --git a/ldap/servers/slapd/back-ldbm/ldif2ldbm.c b/ldap/servers/slapd/back-ldbm/ldif2ldbm.c
index 2cf91f3ad..31c762241 100644
--- a/ldap/servers/slapd/back-ldbm/ldif2ldbm.c
+++ b/ldap/servers/slapd/back-ldbm/ldif2ldbm.c
@@ -269,7 +269,7 @@ int add_op_attrs(Slapi_PBlock *pb, struct ldbminfo *li, struct backentry *ep,
int import_subcount_mother_init(import_subcount_stuff *mothers, ID parent_id,
size_t count)
{
- PR_ASSERT(NULL == PL_HashTableLookup(mothers->hashtable,(void*)parent_id));
+ PR_ASSERT(NULL == PL_HashTableLookup(mothers->hashtable,(void*)((uintptr_t)parent_id)));
PL_HashTableAdd(mothers->hashtable,(void*)((uintptr_t)parent_id),(void*)count);
return 0;
}
| 0 |
ce1ac026add75269d75adc215edac5e01a650e22
|
389ds/389-ds-base
|
609255 - fix coverity Defect Type: Memory - illegal accesses issues
https://bugzilla.redhat.com/show_bug.cgi?id=609255
12237 UNINIT Triaged Unassigned Bug Minor Fix Required
vlv_trim_candidates_byvalue() ds/ldap/servers/slapd/back-ldbm/vlv.c
Commit:
There is almost no chance to pass uninitialized typedown_value to
ber_bvecfree unless vlv_request_control value is NULL. Anyway, we
init typedown_value to NULL.
|
commit ce1ac026add75269d75adc215edac5e01a650e22
Author: Noriko Hosoi <[email protected]>
Date: Wed Jun 30 11:19:10 2010 -0700
609255 - fix coverity Defect Type: Memory - illegal accesses issues
https://bugzilla.redhat.com/show_bug.cgi?id=609255
12237 UNINIT Triaged Unassigned Bug Minor Fix Required
vlv_trim_candidates_byvalue() ds/ldap/servers/slapd/back-ldbm/vlv.c
Commit:
There is almost no chance to pass uninitialized typedown_value to
ber_bvecfree unless vlv_request_control value is NULL. Anyway, we
init typedown_value to NULL.
diff --git a/ldap/servers/slapd/back-ldbm/vlv.c b/ldap/servers/slapd/back-ldbm/vlv.c
index 2f2cb48e9..fbbd6044b 100644
--- a/ldap/servers/slapd/back-ldbm/vlv.c
+++ b/ldap/servers/slapd/back-ldbm/vlv.c
@@ -1489,7 +1489,7 @@ vlv_trim_candidates_byvalue(backend *be, const IDList *candidates, const sort_sp
PRUint32 current= 0;
ID id = NOID;
int found= 0;
- struct berval **typedown_value;
+ struct berval **typedown_value = NULL;
/* For non-matchrule indexing */
value_compare_fn_type compare_fn= NULL;
| 0 |
d58a568921eb8edff21c8203530fef505bab2b18
|
389ds/389-ds-base
|
Ticket 47790 - Integer config attributes accept invalid
values at server startup
Bug Description: Manually editing the dse.ldif allows invalid values to be set
for some configuration attributes
Fix Description: Check integer config values to make sure they are numbers.
https://fedorahosted.org/389/ticket/47790
Jenkins: passed
Reviewed by: nhosoi(Thanks!)
|
commit d58a568921eb8edff21c8203530fef505bab2b18
Author: Mark Reynolds <[email protected]>
Date: Tue Jul 15 14:07:56 2014 -0400
Ticket 47790 - Integer config attributes accept invalid
values at server startup
Bug Description: Manually editing the dse.ldif allows invalid values to be set
for some configuration attributes
Fix Description: Check integer config values to make sure they are numbers.
https://fedorahosted.org/389/ticket/47790
Jenkins: passed
Reviewed by: nhosoi(Thanks!)
diff --git a/ldap/schema/01core389.ldif b/ldap/schema/01core389.ldif
index 0e0e75fac..1b8a70bdb 100644
--- a/ldap/schema/01core389.ldif
+++ b/ldap/schema/01core389.ldif
@@ -285,7 +285,7 @@ attributeTypes: ( 2.16.840.1.113730.3.1.2290 NAME 'nsslapd-disk-monitoring-thres
attributeTypes: ( 2.16.840.1.113730.3.1.2291 NAME 'nsslapd-disk-monitoring-grace-period' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-ORIGIN 'Netscape Directory Server' )
attributeTypes: ( 2.16.840.1.113730.3.1.2292 NAME 'nsslapd-disk-monitoring-logging-critical' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE X-ORIGIN 'Netscape Directory Server' )
attributeTypes: ( 2.16.840.1.113730.3.1.2293 NAME 'nsslapd-ndn-cache-enabled' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE X-ORIGIN 'Netscape Directory Server' )
-attributeTypes: ( 2.16.840.1.113730.3.1.2294 NAME 'nsslapd-ndn-cache-max-size' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE X-ORIGIN 'Netscape Directory Server' )
+attributeTypes: ( 2.16.840.1.113730.3.1.2294 NAME 'nsslapd-ndn-cache-max-size' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE X-ORIGIN 'Netscape Directory Server' )
attributeTypes: ( 2.16.840.1.113730.3.1.2295 NAME 'nsslapd-allowed-sasl-mechanisms' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE X-ORIGIN 'Netscape Directory Server' )
attributeTypes: ( 2.16.840.1.113730.3.1.2296 NAME 'nsslapd-ignore-virtual-attrs' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE X-ORIGIN 'Netscape Directory Server' )
attributeTypes: ( 2.16.840.1.113730.3.1.2297 NAME 'nsslapd-search-return-original-type-switch' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE X-ORIGIN 'Netscape Directory Server' )
diff --git a/ldap/servers/slapd/libglobs.c b/ldap/servers/slapd/libglobs.c
index e890aed36..b437112a7 100644
--- a/ldap/servers/slapd/libglobs.c
+++ b/ldap/servers/slapd/libglobs.c
@@ -1746,7 +1746,6 @@ config_set_disk_threshold( const char *attrname, char *value, char *errorbuf, in
errno = 0;
threshold = strtoll(value, &endp, 10);
-
if ( *endp != '\0' || threshold <= 4096 || errno == ERANGE ) {
PR_snprintf ( errorbuf, SLAPI_DSE_RETURNTEXT_SIZE,
"%s: \"%s\" is invalid, threshold must be greater than 4096 and less then %lld",
@@ -1788,8 +1787,7 @@ config_set_disk_grace_period( const char *attrname, char *value, char *errorbuf,
}
period = strtol(value, &endp, 10);
-
- if ( *endp != '\0' || period < 1 ) {
+ if ( *endp != '\0' || period < 1 || errno == ERANGE ) {
PR_snprintf ( errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, "%s: \"%s\" is invalid, grace period must be at least 1 minute",
attrname, value);
retVal = LDAP_OPERATIONS_ERROR;
@@ -1821,9 +1819,17 @@ config_set_ndn_cache_max_size(const char *attrname, char *value, char *errorbuf,
{
slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
int retVal = LDAP_SUCCESS;
+ char *endp;
long size;
- size = atol(value);
+ size = strtol(value, &endp, 10);
+ if ( *endp != '\0' || errno == ERANGE){
+ retVal = LDAP_OPERATIONS_ERROR;
+ PR_snprintf(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, "(%s) value (%s) "
+ "is invalid\n",attrname, value);
+ return retVal;
+ }
+
if(size < 0){
size = 0; /* same as -1 */
}
@@ -1846,13 +1852,21 @@ config_set_sasl_maxbufsize(const char *attrname, char *value, char *errorbuf, in
{
slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
int retVal = LDAP_SUCCESS;
- int default_size = atoi(DEFAULT_SASL_MAXBUFSIZE);
- int size;
+ long default_size = atol(DEFAULT_SASL_MAXBUFSIZE);
+ long size;
+ char *endp;
+
+ size = strtol(value, &endp, 10);
+ if ( *endp != '\0' || errno == ERANGE){
+ retVal = LDAP_OPERATIONS_ERROR;
+ PR_snprintf(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, "(%s) value (%s) "
+ "is invalid\n",attrname, value);
+ return retVal;
+ }
- size = atoi(value);
if(size < default_size){
- PR_snprintf ( errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, "nsslapd-sasl-max-buffer-size is too low (%d), "
- "setting to default value (%d).\n",size, default_size);
+ PR_snprintf ( errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, "nsslapd-sasl-max-buffer-size is too low (%ld), "
+ "setting to default value (%ld).\n",size, default_size);
size = default_size;
}
if(apply){
@@ -1888,7 +1902,6 @@ config_set_port( const char *attrname, char *port, char *errorbuf, int apply ) {
errno = 0;
nPort = strtol(port, &endp, 10);
-
if ( *endp != '\0' || errno == ERANGE || nPort > LDAP_PORT_MAX || nPort < 0 ) {
retVal = LDAP_OPERATIONS_ERROR;
PR_snprintf ( errorbuf, SLAPI_DSE_RETURNTEXT_SIZE,
@@ -1898,8 +1911,7 @@ config_set_port( const char *attrname, char *port, char *errorbuf, int apply ) {
}
if ( nPort == 0 ) {
- LDAPDebug( LDAP_DEBUG_ANY,
- "Information: Non-Secure Port Disabled\n", 0, 0, 0 );
+ LDAPDebug( LDAP_DEBUG_ANY, "Information: Non-Secure Port Disabled\n", 0, 0, 0 );
}
if ( apply ) {
@@ -1926,7 +1938,6 @@ config_set_secureport( const char *attrname, char *port, char *errorbuf, int app
errno = 0;
nPort = strtol(port, &endp, 10);
-
if (*endp != '\0' || errno == ERANGE || nPort > LDAP_PORT_MAX || nPort <= 0 ) {
retVal = LDAP_OPERATIONS_ERROR;
PR_snprintf ( errorbuf, SLAPI_DSE_RETURNTEXT_SIZE,
@@ -2573,7 +2584,7 @@ config_set_pw_minlength( const char *attrname, char *value, char *errorbuf, int
if ( config_value_is_null( attrname, value, errorbuf, 0 )) {
return LDAP_OPERATIONS_ERROR;
}
-
+
errno = 0;
minLength = strtol(value, &endp, 10);
@@ -2960,7 +2971,7 @@ config_set_pw_inhistory( const char *attrname, char *value, char *errorbuf, int
if ( config_value_is_null( attrname, value, errorbuf, 0 )) {
return LDAP_OPERATIONS_ERROR;
}
-
+
errno = 0;
history = strtol(value, &endp, 10);
@@ -3157,6 +3168,7 @@ config_set_pw_gracelimit( const char *attrname, char *value, char *errorbuf, int
if ( config_value_is_null( attrname, value, errorbuf, 0 )) {
return LDAP_OPERATIONS_ERROR;
}
+
errno = 0;
gracelimit = strtol(value, &endp, 10);
@@ -4107,7 +4119,7 @@ config_set_timelimit( const char *attrname, char *value, char *errorbuf, int app
if ( config_value_is_null( attrname, value, errorbuf, 1 )) {
return LDAP_OPERATIONS_ERROR;
}
-
+
errno = 0;
nVal = strtol(value, &endp, 10);
@@ -5747,18 +5759,29 @@ config_set_maxbersize( const char *attrname, char *value, char *errorbuf, int ap
{
int retVal = LDAP_SUCCESS;
slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
+ long size;
+ char *endp;
if ( config_value_is_null( attrname, value, errorbuf, 0 )) {
return LDAP_OPERATIONS_ERROR;
}
+ errno = 0;
+ size = strtol(value, &endp, 10);
+ if ( *endp != '\0' || errno == ERANGE){
+ retVal = LDAP_OPERATIONS_ERROR;
+ PR_snprintf(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, "(%s) value (%s) "
+ "is invalid\n",attrname, value);
+ return retVal;
+ }
+
if ( !apply ) {
return retVal;
}
CFG_LOCK_WRITE(slapdFrontendConfig);
- slapdFrontendConfig->maxbersize = atoi(value);
+ slapdFrontendConfig->maxbersize = size;
CFG_UNLOCK_WRITE(slapdFrontendConfig);
return retVal;
@@ -5789,6 +5812,7 @@ config_set_maxsasliosize( const char *attrname, char *value, char *errorbuf, int
return LDAP_OPERATIONS_ERROR;
}
+ errno = 0;
maxsasliosize = strtol(value, &endptr, 10);
/* Check for non-numeric garbage in the value */
@@ -5842,6 +5866,7 @@ config_set_localssf( const char *attrname, char *value, char *errorbuf, int appl
return LDAP_OPERATIONS_ERROR;
}
+ errno = 0;
localssf = (int) strtol(value, &endptr, 10);
/* Check for non-numeric garbage in the value */
@@ -5884,6 +5909,7 @@ config_set_minssf( const char *attrname, char *value, char *errorbuf, int apply
return LDAP_OPERATIONS_ERROR;
}
+ errno = 0;
minssf = (int) strtol(value, &endptr, 10);
/* Check for non-numeric garbage in the value */
@@ -5970,20 +5996,31 @@ config_set_max_filter_nest_level( const char *attrname, char *value,
{
int retVal = LDAP_SUCCESS;
slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
+ char *endp;
+ long level;
if ( config_value_is_null( attrname, value, errorbuf, 0 )) {
return LDAP_OPERATIONS_ERROR;
}
+ errno = 0;
+ level = strtol(value, &endp, 10);
+ if ( *endp != '\0' || errno == ERANGE){
+ retVal = LDAP_OPERATIONS_ERROR;
+ PR_snprintf(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, "(%s) value (%s) "
+ "is invalid\n",attrname, value);
+ return retVal;
+ }
+
if ( !apply ) {
return retVal;
}
#ifdef ATOMIC_GETSET_FILTER_NEST_LEVEL
- PR_AtomicSet(&slapdFrontendConfig->max_filter_nest_level, atoi(value));
+ PR_AtomicSet(&slapdFrontendConfig->max_filter_nest_level, level);
#else
CFG_LOCK_WRITE(slapdFrontendConfig);
- slapdFrontendConfig->max_filter_nest_level = atoi(value);
+ slapdFrontendConfig->max_filter_nest_level = level;
CFG_UNLOCK_WRITE(slapdFrontendConfig);
#endif
return retVal;
@@ -6677,14 +6714,24 @@ config_set_outbound_ldap_io_timeout( const char *attrname, char *value,
char *errorbuf, int apply )
{
slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
+ long timeout;
+ char *endp;
if ( config_value_is_null( attrname, value, errorbuf, 0 )) {
return LDAP_OPERATIONS_ERROR;
}
+ errno = 0;
+ timeout = strtol(value, &endp, 10);
+ if ( *endp != '\0' || errno == ERANGE){
+ PR_snprintf(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, "(%s) value (%s) "
+ "is invalid\n",attrname, value);
+ return LDAP_OPERATIONS_ERROR;
+ }
+
if ( apply ) {
CFG_LOCK_WRITE(slapdFrontendConfig);
- slapdFrontendConfig->outbound_ldap_io_timeout = atoi( value );
+ slapdFrontendConfig->outbound_ldap_io_timeout = timeout;
CFG_UNLOCK_WRITE(slapdFrontendConfig);
}
return LDAP_SUCCESS;
@@ -7253,7 +7300,7 @@ config_set_connection_buffer( const char *attrname, char *value,
slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
if ( config_value_is_null( attrname, value, errorbuf, 0 )) {
- return LDAP_OPERATIONS_ERROR;
+ return LDAP_OPERATIONS_ERROR;
}
if ((strcasecmp(value, "0") != 0) && (strcasecmp(value, "1") != 0) &&
@@ -7265,7 +7312,7 @@ config_set_connection_buffer( const char *attrname, char *value,
}
if ( !apply ) {
- return retVal;
+ return retVal;
}
PR_AtomicSet(&slapdFrontendConfig->connection_buffer, atoi(value));
@@ -7277,13 +7324,23 @@ config_set_listen_backlog_size( const char *attrname, char *value,
char *errorbuf, int apply )
{
slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
+ long size;
+ char *endp;
if ( config_value_is_null( attrname, value, errorbuf, 0 )) {
return LDAP_OPERATIONS_ERROR;
}
+ errno = 0;
+ size = strtol(value, &endp, 10);
+ if ( *endp != '\0' || errno == ERANGE){
+ PR_snprintf(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, "(%s) value (%s) "
+ "is invalid\n",attrname, value);
+ return LDAP_OPERATIONS_ERROR;
+ }
+
if ( apply ) {
- PR_AtomicSet(&slapdFrontendConfig->listen_backlog_size, atoi(value));
+ PR_AtomicSet(&slapdFrontendConfig->listen_backlog_size, size);
}
return LDAP_SUCCESS;
}
| 0 |
9c6e9bb12327a2d50e651221614d34984b605427
|
389ds/389-ds-base
|
Ticket 47627 - changelog iteration should ignore cleaned rids when getting the minCSN
Description: If a change is not found in the change log the server will look for a min csn
to start the replay. This minCSN should not come from a cleaned RUV element.
https://fedorahosted.org/389/ticket/47627
Reviewed by: rmeggins & lkrispenz(Thanks!!)
|
commit 9c6e9bb12327a2d50e651221614d34984b605427
Author: Mark Reynolds <[email protected]>
Date: Wed Dec 11 11:25:44 2013 -0500
Ticket 47627 - changelog iteration should ignore cleaned rids when getting the minCSN
Description: If a change is not found in the change log the server will look for a min csn
to start the replay. This minCSN should not come from a cleaned RUV element.
https://fedorahosted.org/389/ticket/47627
Reviewed by: rmeggins & lkrispenz(Thanks!!)
diff --git a/ldap/servers/plugins/replication/cl5_api.c b/ldap/servers/plugins/replication/cl5_api.c
index 35553b9c2..0fecf8c0f 100644
--- a/ldap/servers/plugins/replication/cl5_api.c
+++ b/ldap/servers/plugins/replication/cl5_api.c
@@ -5252,7 +5252,7 @@ static int _cl5PositionCursorForReplay (ReplicaId consumerRID, const RUV *consum
{
/* use the supplier min csn for the buffer start csn - we know
this csn is in our changelog */
- if ((RUV_SUCCESS == ruv_get_min_csn(supplierRuv, &startCSN)) &&
+ if ((RUV_SUCCESS == ruv_get_min_csn_ext(supplierRuv, &startCSN, 1 /* ignore cleaned rids */)) &&
startCSN)
{ /* must now free startCSN */
if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) {
diff --git a/ldap/servers/plugins/replication/repl5_ruv.c b/ldap/servers/plugins/replication/repl5_ruv.c
index d6a7448ce..500fd7a26 100644
--- a/ldap/servers/plugins/replication/repl5_ruv.c
+++ b/ldap/servers/plugins/replication/repl5_ruv.c
@@ -998,9 +998,9 @@ ruv_covers_csn_cleanallruv(const RUV *ruv, const CSN *csn)
* or max{maxcsns of all ruv elements} if get_the_max != 0.
*/
static int
-ruv_get_min_or_max_csn(const RUV *ruv, CSN **csn, int get_the_max, ReplicaId rid)
+ruv_get_min_or_max_csn(const RUV *ruv, CSN **csn, int get_the_max, ReplicaId rid, int ignore_cleaned_rid)
{
- int return_value;
+ int return_value = RUV_SUCCESS;
if (ruv == NULL || csn == NULL)
{
@@ -1012,6 +1012,7 @@ ruv_get_min_or_max_csn(const RUV *ruv, CSN **csn, int get_the_max, ReplicaId rid
CSN *found = NULL;
RUVElement *replica;
int cookie;
+
slapi_rwlock_rdlock (ruv->lock);
for (replica = dl_get_first (ruv->elements, &cookie); replica;
replica = dl_get_next (ruv->elements, &cookie))
@@ -1028,6 +1029,10 @@ ruv_get_min_or_max_csn(const RUV *ruv, CSN **csn, int get_the_max, ReplicaId rid
{
continue;
}
+ if(ignore_cleaned_rid && is_cleaned_rid(replica->rid)){
+ continue;
+ }
+
if(rid){ /* we are only interested in this rid's maxcsn */
if(replica->rid == rid){
found = replica->csn;
@@ -1041,36 +1046,55 @@ ruv_get_min_or_max_csn(const RUV *ruv, CSN **csn, int get_the_max, ReplicaId rid
found = replica->csn;
}
}
- }
+ }
+
if (found == NULL)
{
- *csn = NULL;
+ *csn = NULL;
}
else
{
*csn = csn_dup (found);
}
slapi_rwlock_unlock (ruv->lock);
- return_value = RUV_SUCCESS;
}
return return_value;
}
int
-ruv_get_rid_max_csn(const RUV *ruv, CSN **csn, ReplicaId rid){
- return ruv_get_min_or_max_csn(ruv, csn, 1 /* get the max */, rid);
+ruv_get_rid_max_csn(const RUV *ruv, CSN **csn, ReplicaId rid)
+{
+ return ruv_get_rid_max_csn_ext(ruv, csn, rid, 0);
+}
+
+int
+ruv_get_rid_max_csn_ext(const RUV *ruv, CSN **csn, ReplicaId rid, int ignore_cleaned_rid)
+{
+ return ruv_get_min_or_max_csn(ruv, csn, 1 /* get the max */, rid, ignore_cleaned_rid);
}
int
ruv_get_max_csn(const RUV *ruv, CSN **csn)
{
- return ruv_get_min_or_max_csn(ruv, csn, 1 /* get the max */, 0 /* rid */);
+ return ruv_get_max_csn_ext(ruv, csn, 0);
+}
+
+int
+ruv_get_max_csn_ext(const RUV *ruv, CSN **csn, int ignore_cleaned_rid)
+{
+ return ruv_get_min_or_max_csn(ruv, csn, 1 /* get the max */, 0 /* rid */, ignore_cleaned_rid);
}
int
ruv_get_min_csn(const RUV *ruv, CSN **csn)
{
- return ruv_get_min_or_max_csn(ruv, csn, 0 /* get the min */, 0 /* rid */);
+ return ruv_get_min_csn_ext(ruv, csn, 0);
+}
+
+int
+ruv_get_min_csn_ext(const RUV *ruv, CSN **csn, int ignore_cleaned_rid)
+{
+ return ruv_get_min_or_max_csn(ruv, csn, 0 /* get the min */, 0 /* rid */, ignore_cleaned_rid);
}
int
diff --git a/ldap/servers/plugins/replication/repl5_ruv.h b/ldap/servers/plugins/replication/repl5_ruv.h
index a5a0c19f5..ae7d25c41 100644
--- a/ldap/servers/plugins/replication/repl5_ruv.h
+++ b/ldap/servers/plugins/replication/repl5_ruv.h
@@ -123,8 +123,11 @@ PRBool ruv_covers_csn(const RUV *ruv, const CSN *csn);
PRBool ruv_covers_csn_strict(const RUV *ruv, const CSN *csn);
PRBool ruv_covers_csn_cleanallruv(const RUV *ruv, const CSN *csn);
int ruv_get_min_csn(const RUV *ruv, CSN **csn);
+int ruv_get_min_csn_ext(const RUV *ruv, CSN **csn, int ignore_cleaned_rid);
int ruv_get_max_csn(const RUV *ruv, CSN **csn);
+int ruv_get_max_csn_ext(const RUV *ruv, CSN **csn, int ignore_cleaned_rid);
int ruv_get_rid_max_csn(const RUV *ruv, CSN **csn, ReplicaId rid);
+int ruv_get_rid_max_csn_ext(const RUV *ruv, CSN **csn, ReplicaId rid, int ignore_cleaned_rid);
int ruv_enumerate_elements (const RUV *ruv, FNEnumRUV fn, void *arg);
Slapi_Value **ruv_last_modified_to_valuearray(RUV *ruv);
Slapi_Value **ruv_to_valuearray(RUV *ruv);
| 0 |
260adce00e292e7e120e16c21bae924ca9393169
|
389ds/389-ds-base
|
Ticket 48358 - Prepare lib389 for Fedora Packaging
Description: Created spec file, and updated setup.py/setup.cfg. Also
restructured the code layout to all be under /lib389
https://fedorahosted.org/389/ticket/48358
Reviewed by: spichugi(Thanks!)
|
commit 260adce00e292e7e120e16c21bae924ca9393169
Author: Mark Reynolds <[email protected]>
Date: Wed Dec 2 10:51:51 2015 -0500
Ticket 48358 - Prepare lib389 for Fedora Packaging
Description: Created spec file, and updated setup.py/setup.cfg. Also
restructured the code layout to all be under /lib389
https://fedorahosted.org/389/ticket/48358
Reviewed by: spichugi(Thanks!)
diff --git a/src/lib389/VERSION b/src/lib389/VERSION
index 5bc4571bb..658aed929 100644
--- a/src/lib389/VERSION
+++ b/src/lib389/VERSION
@@ -1,2 +1,2 @@
-1.0.0
+1.0.1
diff --git a/src/lib389/lib389.spec b/src/lib389/lib389.spec
new file mode 100644
index 000000000..85bbbfa09
--- /dev/null
+++ b/src/lib389/lib389.spec
@@ -0,0 +1,65 @@
+%{!?__python2: %global __python2 %__python}
+%{!?python2_sitelib: %global python2_sitelib %(%{__python2} -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())")}
+
+%define name lib389
+%define version 1.0.1
+%define release 1
+
+Summary: A library for accessing, testing, and configuring the 389 Directory Server
+Name: %{name}
+Version: %{version}
+Release: %{release}%{?dist}
+Source0: %{name}-%{version}.tar.bz2
+License: GPLv3+
+Group: Development/Libraries
+#BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-buildroot
+BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n)
+Prefix: %{_prefix}
+BuildArch: noarch
+Vendor: Red Hat Inc. <[email protected]>
+Url: http://port389.org/wiki/Upstream_test_framework
+Requires: python-ldap pytest python-krbV
+
+# Currently python-ldap is not python3 compatible, so lib389 only works with
+# python 2.7
+
+%description
+This repository contains tools and libraries for accessing, testing, and
+configuring the 389 Directory Server.
+
+%prep
+%setup -qc
+mv %{name}-%{version} python2
+
+%build
+pushd python2
+# Remove CFLAGS=... for noarch packages (unneeded)
+CFLAGS="$RPM_OPT_FLAGS" %{__python2} setup.py build
+popd
+
+%install
+rm -rf $RPM_BUILD_ROOT
+pushd python2
+%{__python2} setup.py install -O1 --skip-build --root $RPM_BUILD_ROOT
+popd
+
+%check
+pushd python2
+%{__python2} setup.py test
+popd
+
+%clean
+rm -rf $RPM_BUILD_ROOT
+
+%files
+%defattr(-,root,root,-)
+%doc
+%{python2_sitelib}/*
+
+%changelog
+* Tue Dec 1 2015 Mark Reynolds <[email protected]> - 1.0.1-1
+- Initial Fedora Package
+
+
+
+
diff --git a/src/lib389/clitools/__init__.py b/src/lib389/lib389/clitools/__init__.py
similarity index 100%
rename from src/lib389/clitools/__init__.py
rename to src/lib389/lib389/clitools/__init__.py
diff --git a/src/lib389/clitools/ds_list_instances.py b/src/lib389/lib389/clitools/ds_list_instances.py
similarity index 100%
rename from src/lib389/clitools/ds_list_instances.py
rename to src/lib389/lib389/clitools/ds_list_instances.py
diff --git a/src/lib389/clitools/ds_monitor_backend.py b/src/lib389/lib389/clitools/ds_monitor_backend.py
similarity index 100%
rename from src/lib389/clitools/ds_monitor_backend.py
rename to src/lib389/lib389/clitools/ds_monitor_backend.py
diff --git a/src/lib389/clitools/ds_monitor_server.py b/src/lib389/lib389/clitools/ds_monitor_server.py
similarity index 100%
rename from src/lib389/clitools/ds_monitor_server.py
rename to src/lib389/lib389/clitools/ds_monitor_server.py
diff --git a/src/lib389/clitools/ds_schema_attributetype_list.py b/src/lib389/lib389/clitools/ds_schema_attributetype_list.py
similarity index 100%
rename from src/lib389/clitools/ds_schema_attributetype_list.py
rename to src/lib389/lib389/clitools/ds_schema_attributetype_list.py
diff --git a/src/lib389/clitools/ds_schema_attributetype_query.py b/src/lib389/lib389/clitools/ds_schema_attributetype_query.py
similarity index 100%
rename from src/lib389/clitools/ds_schema_attributetype_query.py
rename to src/lib389/lib389/clitools/ds_schema_attributetype_query.py
diff --git a/src/lib389/clitools/ds_start.py b/src/lib389/lib389/clitools/ds_start.py
similarity index 100%
rename from src/lib389/clitools/ds_start.py
rename to src/lib389/lib389/clitools/ds_start.py
diff --git a/src/lib389/clitools/ds_stop.py b/src/lib389/lib389/clitools/ds_stop.py
similarity index 100%
rename from src/lib389/clitools/ds_stop.py
rename to src/lib389/lib389/clitools/ds_stop.py
diff --git a/src/lib389/clitools/krb_create_keytab.py b/src/lib389/lib389/clitools/krb_create_keytab.py
similarity index 100%
rename from src/lib389/clitools/krb_create_keytab.py
rename to src/lib389/lib389/clitools/krb_create_keytab.py
diff --git a/src/lib389/clitools/krb_create_principal.py b/src/lib389/lib389/clitools/krb_create_principal.py
similarity index 100%
rename from src/lib389/clitools/krb_create_principal.py
rename to src/lib389/lib389/clitools/krb_create_principal.py
diff --git a/src/lib389/clitools/krb_create_realm.py b/src/lib389/lib389/clitools/krb_create_realm.py
similarity index 100%
rename from src/lib389/clitools/krb_create_realm.py
rename to src/lib389/lib389/clitools/krb_create_realm.py
diff --git a/src/lib389/clitools/krb_destroy_realm.py b/src/lib389/lib389/clitools/krb_destroy_realm.py
similarity index 100%
rename from src/lib389/clitools/krb_destroy_realm.py
rename to src/lib389/lib389/clitools/krb_destroy_realm.py
diff --git a/src/lib389/tests/__init__.py b/src/lib389/lib389/tests/__init__.py
similarity index 100%
rename from src/lib389/tests/__init__.py
rename to src/lib389/lib389/tests/__init__.py
diff --git a/src/lib389/tests/aci_parse_test.py b/src/lib389/lib389/tests/aci_parse_test.py
similarity index 100%
rename from src/lib389/tests/aci_parse_test.py
rename to src/lib389/lib389/tests/aci_parse_test.py
diff --git a/src/lib389/tests/agreement_test.py b/src/lib389/lib389/tests/agreement_test.py
similarity index 100%
rename from src/lib389/tests/agreement_test.py
rename to src/lib389/lib389/tests/agreement_test.py
diff --git a/src/lib389/tests/backend_test.py b/src/lib389/lib389/tests/backend_test.py
similarity index 100%
rename from src/lib389/tests/backend_test.py
rename to src/lib389/lib389/tests/backend_test.py
diff --git a/src/lib389/tests/config.py b/src/lib389/lib389/tests/config.py
similarity index 100%
rename from src/lib389/tests/config.py
rename to src/lib389/lib389/tests/config.py
diff --git a/src/lib389/tests/config_test.py b/src/lib389/lib389/tests/config_test.py
similarity index 100%
rename from src/lib389/tests/config_test.py
rename to src/lib389/lib389/tests/config_test.py
diff --git a/src/lib389/tests/conftest.py b/src/lib389/lib389/tests/conftest.py
similarity index 100%
rename from src/lib389/tests/conftest.py
rename to src/lib389/lib389/tests/conftest.py
diff --git a/src/lib389/tests/dereference_test.py b/src/lib389/lib389/tests/dereference_test.py
similarity index 100%
rename from src/lib389/tests/dereference_test.py
rename to src/lib389/lib389/tests/dereference_test.py
diff --git a/src/lib389/tests/dirsrv_test.py b/src/lib389/lib389/tests/dirsrv_test.py
similarity index 100%
rename from src/lib389/tests/dirsrv_test.py
rename to src/lib389/lib389/tests/dirsrv_test.py
diff --git a/src/lib389/tests/dsadmin_basic_test.py b/src/lib389/lib389/tests/dsadmin_basic_test.py
similarity index 100%
rename from src/lib389/tests/dsadmin_basic_test.py
rename to src/lib389/lib389/tests/dsadmin_basic_test.py
diff --git a/src/lib389/tests/dsadmin_create_remove_test.py b/src/lib389/lib389/tests/dsadmin_create_remove_test.py
similarity index 100%
rename from src/lib389/tests/dsadmin_create_remove_test.py
rename to src/lib389/lib389/tests/dsadmin_create_remove_test.py
diff --git a/src/lib389/tests/dsadmin_test.py b/src/lib389/lib389/tests/dsadmin_test.py
similarity index 100%
rename from src/lib389/tests/dsadmin_test.py
rename to src/lib389/lib389/tests/dsadmin_test.py
diff --git a/src/lib389/tests/effective_rights_test.py b/src/lib389/lib389/tests/effective_rights_test.py
similarity index 100%
rename from src/lib389/tests/effective_rights_test.py
rename to src/lib389/lib389/tests/effective_rights_test.py
diff --git a/src/lib389/tests/entry_test.py b/src/lib389/lib389/tests/entry_test.py
similarity index 100%
rename from src/lib389/tests/entry_test.py
rename to src/lib389/lib389/tests/entry_test.py
diff --git a/src/lib389/tests/krb5_create_test.py b/src/lib389/lib389/tests/krb5_create_test.py
similarity index 100%
rename from src/lib389/tests/krb5_create_test.py
rename to src/lib389/lib389/tests/krb5_create_test.py
diff --git a/src/lib389/tests/mappingTree_test.py b/src/lib389/lib389/tests/mappingTree_test.py
similarity index 100%
rename from src/lib389/tests/mappingTree_test.py
rename to src/lib389/lib389/tests/mappingTree_test.py
diff --git a/src/lib389/tests/replica_test.py b/src/lib389/lib389/tests/replica_test.py
similarity index 100%
rename from src/lib389/tests/replica_test.py
rename to src/lib389/lib389/tests/replica_test.py
diff --git a/src/lib389/tests/schema_test.py b/src/lib389/lib389/tests/schema_test.py
similarity index 100%
rename from src/lib389/tests/schema_test.py
rename to src/lib389/lib389/tests/schema_test.py
diff --git a/src/lib389/tests/suffix_test.py b/src/lib389/lib389/tests/suffix_test.py
similarity index 100%
rename from src/lib389/tests/suffix_test.py
rename to src/lib389/lib389/tests/suffix_test.py
diff --git a/src/lib389/tests/test_module_proxy.py b/src/lib389/lib389/tests/test_module_proxy.py
similarity index 100%
rename from src/lib389/tests/test_module_proxy.py
rename to src/lib389/lib389/tests/test_module_proxy.py
diff --git a/src/lib389/tests/utils_test.py b/src/lib389/lib389/tests/utils_test.py
similarity index 100%
rename from src/lib389/tests/utils_test.py
rename to src/lib389/lib389/tests/utils_test.py
diff --git a/src/lib389/setup.cfg b/src/lib389/setup.cfg
index 33fb3daea..62bdb21c5 100644
--- a/src/lib389/setup.cfg
+++ b/src/lib389/setup.cfg
@@ -1,3 +1,3 @@
[bdist_rpm]
-requires=python-ldap pytest
+requires=python-ldap pytest python-krbV
diff --git a/src/lib389/setup.py b/src/lib389/setup.py
index 2e3ed398d..ebae599e7 100644
--- a/src/lib389/setup.py
+++ b/src/lib389/setup.py
@@ -26,28 +26,27 @@ with open(path.join(here, 'README'), 'r') as f:
setup(
name='lib389',
+ license='GPLv3+',
version=version,
- description='A library for accessing 389 directory server',
+ description='A library for accessing and configuring the 389 Directory ' +
+ 'Server',
long_description=long_description,
-
url='http://port389.org/wiki/Upstream_test_framework',
author='Red Hat Inc.',
author_email='[email protected]',
- classifiers=[
- 'Development Status :: 3 - Alpha',
- 'Intended Audience :: Developers',
- 'Operating System :: POSIX :: Linux',
- 'Programming Language :: Python :: 2',
- 'Programming Language :: Python :: 2.6',
- 'Programming Language :: Python :: 2.7',
- 'Topic :: Software Development :: Libraries',
- 'Topic :: Software Development :: Quality Assurance',
- 'Topic :: Software Development :: Testing'],
-
- keywords='389 directory server test',
+ classifiers=[
+ 'Development Status :: 4 - Beta',
+ 'Intended Audience :: Developers',
+ 'Operating System :: POSIX :: Linux',
+ 'Programming Language :: Python :: 2 :: Only',
+ 'Programming Language :: Python :: 2.7',
+ 'Topic :: Software Development :: Libraries',
+ 'Topic :: Software Development :: Quality Assurance',
+ 'Topic :: Software Development :: Testing'],
+
+ keywords='389 directory server test configure',
packages=find_packages(exclude=['tests*']),
-
install_requires=['python-ldap', 'pytest', 'python-krbV'],
)
| 0 |
683bc575af84d8cbe11ad56efdfa5d99db3cebc1
|
389ds/389-ds-base
|
Ticket 49994 - comment out dev paths
Description: Accidentally left dev paths for CLI tools in UI uncommented
https://pagure.io/389-ds-base/issue/49994
|
commit 683bc575af84d8cbe11ad56efdfa5d99db3cebc1
Author: Mark Reynolds <[email protected]>
Date: Fri Nov 23 11:08:12 2018 -0500
Ticket 49994 - comment out dev paths
Description: Accidentally left dev paths for CLI tools in UI uncommented
https://pagure.io/389-ds-base/issue/49994
diff --git a/src/cockpit/389-console/src/ds.js b/src/cockpit/389-console/src/ds.js
index e78113873..46233ff5a 100644
--- a/src/cockpit/389-console/src/ds.js
+++ b/src/cockpit/389-console/src/ds.js
@@ -29,13 +29,13 @@ var DSCTL = "dsctl";
var DSCREATE = "dscreate";
var ENV = "";
-
+/*
// Used for local development testing
var DSCONF = '/home/mareynol/source/ds389/389-ds-base/src/lib389/cli/dsconf';
var DSCTL = '/home/mareynol/source/ds389/389-ds-base/src/lib389/cli/dsctl';
var DSCREATE = '/home/mareynol/source/ds389/389-ds-base/src/lib389/cli/dscreate';
var ENV = 'PYTHONPATH=/home/mareynol/source/ds389/389-ds-base/src/lib389';
-
+*/
/*
* Console logging function for CLI commands
| 0 |
220bfa6843f5655ddb3bb6f44c0e6890c95080cc
|
389ds/389-ds-base
|
Bug 651571 - When attrcrypt is on, entrydn is stored in the backend db
https://bugzilla.redhat.com/show_bug.cgi?id=651571
Description: If an entry contains a to-be-encrypted attribute,
id2entry_add_ext handles a copy of the entry in which the attribute
is encrypted, then the copy is being stored in the backend db.
On the other hand, an entrydn attribute is supposed to remove from
the entry before storing the entry in the db. There was a bug
there: the removal was done on the original entry, but not on the
encrypted copy. This patch correctly removes the entrydn attribute
from the encrypted entry to be stored in the db.
|
commit 220bfa6843f5655ddb3bb6f44c0e6890c95080cc
Author: Noriko Hosoi <[email protected]>
Date: Tue Nov 9 12:02:30 2010 -0800
Bug 651571 - When attrcrypt is on, entrydn is stored in the backend db
https://bugzilla.redhat.com/show_bug.cgi?id=651571
Description: If an entry contains a to-be-encrypted attribute,
id2entry_add_ext handles a copy of the entry in which the attribute
is encrypted, then the copy is being stored in the backend db.
On the other hand, an entrydn attribute is supposed to remove from
the entry before storing the entry in the db. There was a bug
there: the removal was done on the original entry, but not on the
encrypted copy. This patch correctly removes the entrydn attribute
from the encrypted entry to be stored in the db.
diff --git a/ldap/servers/slapd/back-ldbm/id2entry.c b/ldap/servers/slapd/back-ldbm/id2entry.c
index 71ea940e7..57ada09ae 100644
--- a/ldap/servers/slapd/back-ldbm/id2entry.c
+++ b/ldap/servers/slapd/back-ldbm/id2entry.c
@@ -98,7 +98,7 @@ id2entry_add_ext( backend *be, struct backentry *e, back_txn *txn, int encrypt
Slapi_Attr *eattr = NULL;
struct backdn *oldbdn = NULL;
Slapi_DN *sdn =
- slapi_sdn_dup(slapi_entry_get_sdn_const(e->ep_entry));
+ slapi_sdn_dup(slapi_entry_get_sdn_const(entry_to_use));
struct backdn *bdn = backdn_init(sdn, e->ep_id, 0);
options |= SLAPI_DUMP_RDN_ENTRY;
@@ -117,15 +117,15 @@ id2entry_add_ext( backend *be, struct backentry *e, back_txn *txn, int encrypt
}
CACHE_RETURN(&inst->inst_dncache, &bdn);
- LDAPDebug( LDAP_DEBUG_TRACE,
+ LDAPDebug2Args( LDAP_DEBUG_TRACE,
"=> id2entry_add (dncache) ( %lu, \"%s\" )\n",
- (u_long)e->ep_id, slapi_entry_get_dn_const(e->ep_entry), 0 );
+ (u_long)e->ep_id, slapi_entry_get_dn_const(entry_to_use) );
/* If entrydn exists in the entry, we have to remove it before
* writing the entry to the database. */
- if (0 == slapi_entry_attr_find(e->ep_entry,
+ if (0 == slapi_entry_attr_find(entry_to_use,
LDBM_ENTRYDN_STR, &eattr)) {
/* entrydn exists in the entry. let's removed it. */
- slapi_entry_delete_values(e->ep_entry, LDBM_ENTRYDN_STR, NULL);
+ slapi_entry_delete_values(entry_to_use, LDBM_ENTRYDN_STR, NULL);
}
}
data.dptr = slapi_entry2str_with_options(entry_to_use, &len, options);
@@ -148,6 +148,8 @@ id2entry_add_ext( backend *be, struct backentry *e, back_txn *txn, int encrypt
if (0 == rc)
{
+ /* Putting the entry into the entry cache.
+ * We don't use the encrypted entry here. */
if (entryrdn_get_switch()) {
struct backentry *parententry = NULL;
ID parentid = slapi_entry_attr_get_ulong(e->ep_entry, "parentid");
@@ -207,7 +209,8 @@ id2entry_add_ext( backend *be, struct backentry *e, back_txn *txn, int encrypt
}
done:
- /* If we had an encrypted entry, we no longer need it */
+ /* If we had an encrypted entry, we no longer need it.
+ * Note: encrypted_entry is not in the entry cache. */
if (encrypted_entry) {
backentry_free(&encrypted_entry);
}
| 0 |
c8ac6fcae4f2577393322348c795f8947679b642
|
389ds/389-ds-base
|
Ticket 49476 - backend refactoring phase1, fix failing tests
this patch fixes a couple of failing tests
- passwordpolicy, failing sometimes, with and without backend patch,
adding a sleep makes it pass
- ticket48906 - check did look for backend config params in the (now)
wrong entry
- ticket48252 - incorrect parameters passed to db2index, which only
had effect with th enew backend code
- ticket49076 - bug in bdb_config code, fixe
|
commit c8ac6fcae4f2577393322348c795f8947679b642
Author: Ludwig Krispenz <[email protected]>
Date: Thu Sep 26 11:34:15 2019 +0200
Ticket 49476 - backend refactoring phase1, fix failing tests
this patch fixes a couple of failing tests
- passwordpolicy, failing sometimes, with and without backend patch,
adding a sleep makes it pass
- ticket48906 - check did look for backend config params in the (now)
wrong entry
- ticket48252 - incorrect parameters passed to db2index, which only
had effect with th enew backend code
- ticket49076 - bug in bdb_config code, fixe
diff --git a/dirsrvtests/tests/suites/password/pwdPolicy_warning_test.py b/dirsrvtests/tests/suites/password/pwdPolicy_warning_test.py
index 3ba4f5295..2f05eb6e7 100644
--- a/dirsrvtests/tests/suites/password/pwdPolicy_warning_test.py
+++ b/dirsrvtests/tests/suites/password/pwdPolicy_warning_test.py
@@ -448,6 +448,7 @@ def test_when_maxage_and_warning_are_the_same(topology_st, global_policy_default
user.rebind(USER_PASSWD)
user.reset_password(USER_PASSWD)
+ time.sleep(2)
log.info("Binding with {} and requesting the password expiry warning time"
.format(USER_DN))
res_ctrls = get_password_warning(topology_st)
diff --git a/dirsrvtests/tests/tickets/ticket48252_test.py b/dirsrvtests/tests/tickets/ticket48252_test.py
index 15ceaf0d8..05419ba19 100644
--- a/dirsrvtests/tests/tickets/ticket48252_test.py
+++ b/dirsrvtests/tests/tickets/ticket48252_test.py
@@ -80,7 +80,7 @@ def test_ticket48252_run_0(topology_st):
assert in_index_file(topology_st, 0, 'cn') is False
log.info(" db2index - reindexing %s ..." % 'cn')
topology_st.standalone.stop()
- assert topology_st.standalone.db2index(bename=None, suffixes='cn')
+ assert topology_st.standalone.db2index(suffixes=[DEFAULT_SUFFIX], attrs=['cn'])
topology_st.standalone.start()
assert in_index_file(topology_st, 0, 'cn') is False
log.info(" entry %s is not in the cn index file after reindexed." % del_rdn)
@@ -105,7 +105,7 @@ def test_ticket48252_run_1(topology_st):
log.info(" db2index - reindexing %s ..." % 'objectclass')
topology_st.standalone.stop()
- assert topology_st.standalone.db2index(bename=None, suffixes='objectclass')
+ assert topology_st.standalone.db2index(suffixes=[DEFAULT_SUFFIX], attrs=['objectclass'])
topology_st.standalone.start()
entry = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, '(&(objectclass=nstombstone)(%s))' % del_rdn)
assert len(entry) == 1
diff --git a/dirsrvtests/tests/tickets/ticket48906_test.py b/dirsrvtests/tests/tickets/ticket48906_test.py
index 832996448..9a20c1d71 100644
--- a/dirsrvtests/tests/tickets/ticket48906_test.py
+++ b/dirsrvtests/tests/tickets/ticket48906_test.py
@@ -46,6 +46,7 @@ DBCACHE_ATTR_CONFIG = "nsslapd-dbcachesize"
DBCACHE_ATTR_GUARDIAN = "cachesize"
ldbm_config = "cn=config,%s" % (DN_LDBM)
+bdb_ldbm_config = "cn=bdb,cn=config,%s" % (DN_LDBM)
ldbm_monitor = "cn=database,cn=monitor,%s" % (DN_LDBM)
@@ -92,7 +93,7 @@ def test_ticket48906_setup(topology_st):
def _check_configured_value(topology_st, attr=DBLOCK_ATTR_CONFIG, expected_value=None, required=False):
- entries = topology_st.standalone.search_s(ldbm_config, ldap.SCOPE_BASE, 'cn=config')
+ entries = topology_st.standalone.search_s(bdb_ldbm_config, ldap.SCOPE_BASE, 'cn=bdb')
if required:
assert (entries[0].hasValue(attr))
elif entries[0].hasValue(attr):
diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_config.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_config.c
index ecb946cc4..c33c41baf 100644
--- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_config.c
+++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_config.c
@@ -2169,21 +2169,26 @@ int
bdb_public_config_set(struct ldbminfo *li, char *attrname, int apply_mod, int mod_op, int phase, char *value)
{
char err_buf[SLAPI_DSE_RETURNTEXT_SIZE];
- struct berval bval;
int rc = LDAP_SUCCESS;
- if (!value) {
+ if (!value && SLAPI_IS_MOD_ADD(mod_op)) {
slapi_log_err(SLAPI_LOG_ERR,
"bdb_public_internal_set", "Error: no value for config attr: %s\n",
attrname);
return -1;
}
- bval.bv_val = value;
- bval.bv_len = strlen(value);
- rc = bdb_config_set((void *)li, attrname, bdb_config_param, &bval,
- err_buf, phase, apply_mod,
- mod_op);
+ if (value) {
+ struct berval bval;
+ bval.bv_val = value;
+ bval.bv_len = strlen(value);
+
+ rc = bdb_config_set((void *)li, attrname, bdb_config_param, &bval,
+ err_buf, phase, apply_mod, mod_op);
+ } else {
+ rc = bdb_config_set((void *)li, attrname, bdb_config_param, NULL,
+ err_buf, phase, apply_mod, mod_op);
+ }
if (rc != LDAP_SUCCESS) {
slapi_log_err(SLAPI_LOG_ERR,
"bdb_public_config_set", "Error setting instance config attr %s to %s: %s\n",
| 0 |
acbde67b9fb7e087c506d95013d32a1fe7c43f9d
|
389ds/389-ds-base
|
Issue 5397 - Fix various memory leaks
Description:
Fixed memory leaks in:
- Upgrade code when we check if an expected plugin is present, we didn't
free the search results.
- Filter optimizer introduced sr_norm_filter_intent which dupped a filter
but never freed it.
- Replication connections would leak the replication manager's
credentials.
relates: https://github.com/389ds/389-ds-base/issues/5397
Reviewed by: progier & jchapman (Thanks!!)
|
commit acbde67b9fb7e087c506d95013d32a1fe7c43f9d
Author: Mark Reynolds <[email protected]>
Date: Fri Aug 5 14:07:18 2022 -0400
Issue 5397 - Fix various memory leaks
Description:
Fixed memory leaks in:
- Upgrade code when we check if an expected plugin is present, we didn't
free the search results.
- Filter optimizer introduced sr_norm_filter_intent which dupped a filter
but never freed it.
- Replication connections would leak the replication manager's
credentials.
relates: https://github.com/389ds/389-ds-base/issues/5397
Reviewed by: progier & jchapman (Thanks!!)
diff --git a/ldap/servers/plugins/replication/repl5_connection.c b/ldap/servers/plugins/replication/repl5_connection.c
index 0e3e87908..809ffeeac 100644
--- a/ldap/servers/plugins/replication/repl5_connection.c
+++ b/ldap/servers/plugins/replication/repl5_connection.c
@@ -249,6 +249,7 @@ conn_delete_internal(Repl_Connection *conn)
slapi_ch_free_string(&conn->last_ldap_errmsg);
slapi_ch_free((void **)&conn->hostname);
slapi_ch_free((void **)&conn->binddn);
+ slapi_ch_free((void **)&conn->creds);
slapi_ch_free((void **)&conn->plain);
}
diff --git a/ldap/servers/slapd/add.c b/ldap/servers/slapd/add.c
index aa940e895..e70fc603d 100644
--- a/ldap/servers/slapd/add.c
+++ b/ldap/servers/slapd/add.c
@@ -395,6 +395,7 @@ slapi_exists_or_add_internal(
if (search_result == LDAP_SUCCESS) {
slapi_pblock_get(search_pb, SLAPI_NENTRIES, &search_nentries);
}
+ slapi_free_search_results_internal(search_pb);
slapi_pblock_destroy(search_pb);
slapi_log_error(SLAPI_LOG_DEBUG, "slapi_exists_or_add_internal", "search_internal result -> %d, %d\n", search_result, search_nentries);
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_search.c b/ldap/servers/slapd/back-ldbm/ldbm_search.c
index 19ee0a1eb..cd33d30fd 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_search.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_search.c
@@ -1945,6 +1945,7 @@ delete_search_result_set(Slapi_PBlock *pb, back_search_result_set **sr)
rc, filt_errs);
}
slapi_filter_free((*sr)->sr_norm_filter, 1);
+ slapi_filter_free((*sr)->sr_norm_filter_intent, 1);
memset(*sr, 0, sizeof(back_search_result_set));
slapi_ch_free((void **)sr);
return;
| 0 |
e754339c441ea30b5c555171e6723ea74375d523
|
389ds/389-ds-base
|
Coverity defect: Resource leak 13110
This commit 94b123780b21e503b78bceca9d60904206ef91fa
introduced the resource leak.
Trac Ticket #447 - Possible to add invalid attribute
to nsslapd-allowed-to-delete-attrs
Fix description: This patch calls slapi_ch_array_free for the
allocated charray "allowed".
|
commit e754339c441ea30b5c555171e6723ea74375d523
Author: Noriko Hosoi <[email protected]>
Date: Wed Nov 14 13:41:30 2012 -0800
Coverity defect: Resource leak 13110
This commit 94b123780b21e503b78bceca9d60904206ef91fa
introduced the resource leak.
Trac Ticket #447 - Possible to add invalid attribute
to nsslapd-allowed-to-delete-attrs
Fix description: This patch calls slapi_ch_array_free for the
allocated charray "allowed".
diff --git a/ldap/servers/slapd/libglobs.c b/ldap/servers/slapd/libglobs.c
index a7e504f43..b19c2d9c7 100644
--- a/ldap/servers/slapd/libglobs.c
+++ b/ldap/servers/slapd/libglobs.c
@@ -6508,6 +6508,7 @@ config_set_allowed_to_delete_attrs( const char *attrname, char *value,
slapi_ch_free_string(&vcopy);
vcopy = slapi_ch_strdup(value);
}
+ slapi_ch_array_free(allowed);
CFG_LOCK_WRITE(slapdFrontendConfig);
slapi_ch_free_string(&(slapdFrontendConfig->allowed_to_delete_attrs));
slapdFrontendConfig->allowed_to_delete_attrs = vcopy;
| 0 |
177f5c458d694375e5839fda203994db3dcfc813
|
389ds/389-ds-base
|
Issue 49761 - Fix more CI test issues
Description: Mark conflict resolution test cases as 'xfail' (ds49591).
Fix SASL test. Fix Python 3 bytes issues in ds-replcheck tool.
Roll back a part of the PBKDF2_SHA256 schema fix because upgrade fails
without this part of the code.
https://pagure.io/389-ds-base/issue/49761
Reviewed by: vashirov, mreynolds (Thanks!)
|
commit 177f5c458d694375e5839fda203994db3dcfc813
Author: Simon Pichugin <[email protected]>
Date: Thu Jun 7 21:17:52 2018 +0200
Issue 49761 - Fix more CI test issues
Description: Mark conflict resolution test cases as 'xfail' (ds49591).
Fix SASL test. Fix Python 3 bytes issues in ds-replcheck tool.
Roll back a part of the PBKDF2_SHA256 schema fix because upgrade fails
without this part of the code.
https://pagure.io/389-ds-base/issue/49761
Reviewed by: vashirov, mreynolds (Thanks!)
diff --git a/dirsrvtests/tests/suites/replication/conflict_resolve_test.py b/dirsrvtests/tests/suites/replication/conflict_resolve_test.py
index 06a2baae3..b7ddb1f95 100644
--- a/dirsrvtests/tests/suites/replication/conflict_resolve_test.py
+++ b/dirsrvtests/tests/suites/replication/conflict_resolve_test.py
@@ -396,6 +396,8 @@ class TestTwoMasters:
10. It should pass
"""
+ pytest.xfail("Issue 49591 - work in progress")
+
M1 = topology_m2.ms["master1"]
M2 = topology_m2.ms["master2"]
test_users_m1 = UserAccounts(M1, test_base.dn, rdn=None)
@@ -485,6 +487,8 @@ class TestTwoMasters:
8. It should pass
"""
+ pytest.xfail("Issue 49591 - work in progress")
+
M1 = topology_m2.ms["master1"]
M2 = topology_m2.ms["master2"]
repl = ReplicationManager(SUFFIX)
@@ -574,6 +578,8 @@ class TestTwoMasters:
14. It should pass
"""
+ pytest.xfail("Issue 49591 - work in progress")
+
M1 = topology_m2.ms["master1"]
M2 = topology_m2.ms["master2"]
repl = ReplicationManager(SUFFIX)
@@ -785,6 +791,8 @@ class TestThreeMasters:
8. It should pass
"""
+ pytest.xfail("Issue 49591 - work in progress")
+
M1 = topology_m3.ms["master1"]
M2 = topology_m3.ms["master2"]
M3 = topology_m3.ms["master3"]
diff --git a/dirsrvtests/tests/suites/sasl/regression_test.py b/dirsrvtests/tests/suites/sasl/regression_test.py
index 3a0ecb16b..4741008d5 100644
--- a/dirsrvtests/tests/suites/sasl/regression_test.py
+++ b/dirsrvtests/tests/suites/sasl/regression_test.py
@@ -54,11 +54,11 @@ def enable_ssl(server, ldapsport, mycert):
(ldap.MOD_REPLACE, 'nsslapd-ssl-check-hostname', 'off'),
(ldap.MOD_REPLACE, 'nsslapd-secureport', ldapsport)])
- server.rsa.create(properties={'objectclass': "top nsEncryptionModule".split(),
- 'cn': 'RSA',
- 'nsSSLPersonalitySSL': mycert,
- 'nsSSLToken': 'internal (software)',
- 'nsSSLActivation': 'on'})
+ server.rsa.ensure_state(properties={'objectclass': "top nsEncryptionModule".split(),
+ 'cn': 'RSA',
+ 'nsSSLPersonalitySSL': mycert,
+ 'nsSSLToken': 'internal (software)',
+ 'nsSSLActivation': 'on'})
def check_pems(confdir, mycacert, myservercert, myserverkey, notexist):
@@ -126,6 +126,12 @@ def doAndPrintIt(cmdline):
def create_keys_certs(topology_m2):
log.info("\n######################### Creating SSL Keys and Certs ######################\n")
+ for inst in topology_m2:
+ log.info("##### Ensure that nsslapd-extract-pemfiles is 'off' on {}".format(inst.serverid))
+ inst.config.set('nsslapd-extract-pemfiles', 'off')
+ log.info("##### restart {}".format(inst.serverid))
+ inst.restart()
+
global m1confdir
m1confdir = topology_m2.ms["master1"].confdir
global m2confdir
diff --git a/ldap/admin/src/scripts/ds-replcheck b/ldap/admin/src/scripts/ds-replcheck
index 62f911034..f94a6cb27 100755
--- a/ldap/admin/src/scripts/ds-replcheck
+++ b/ldap/admin/src/scripts/ds-replcheck
@@ -110,6 +110,14 @@ def convert_entries(entries):
for entry in entries:
new_entry = Entry(entry)
new_entry.data = {k.lower(): v for k, v in list(new_entry.data.items())}
+
+ # Decode nscpentrywsi bytes values for future use
+ nscpentrywsi_list = []
+ if 'nscpentrywsi' in new_entry.data:
+ for val in map(bytes.decode, new_entry.data['nscpentrywsi']):
+ nscpentrywsi_list.append(val)
+ new_entry.data['nscpentrywsi'] = nscpentrywsi_list
+
if new_entry.dn.endswith("cn=mapping tree,cn=config"):
'''Skip replica entry (ldapsearch brings this in because the filter
we use triggers an internal operation to return the config entry - so
diff --git a/ldap/servers/slapd/config.c b/ldap/servers/slapd/config.c
index 1b6db8d22..9b3b5a64e 100644
--- a/ldap/servers/slapd/config.c
+++ b/ldap/servers/slapd/config.c
@@ -36,6 +36,15 @@ extern char *localuser;
char *rel2abspath(char *);
static char *bootstrap_plugins[] = {
+ "dn: cn=PBKDF2_SHA256,cn=Password Storage Schemes,cn=plugins,cn=config\n"
+ "objectclass: top\n"
+ "objectclass: nsSlapdPlugin\n"
+ "cn: PBKDF2_SHA256\n"
+ "nsslapd-pluginpath: libpwdstorage-plugin\n"
+ "nsslapd-plugininitfunc: pbkdf2_sha256_pwd_storage_scheme_init\n"
+ "nsslapd-plugintype: pwdstoragescheme\n"
+ "nsslapd-pluginenabled: on",
+
NULL
};
| 0 |
0a82814c0bfb34c3bf24d9650423261a70350665
|
389ds/389-ds-base
|
Ticket 49137 - Add sasl plain tests, lib389 support
Bug Description: We need to test that SASL plain works in DS.
Fix Description: To do this, we need to fix sasl in lib389, we
correct openConnection to use more than just gssapi, we fix
start tls to work over multiple connections.
https://pagure.io/389-ds-base/issue/49137
Author: wibrown
Review by: mreynolds (THanks!)
|
commit 0a82814c0bfb34c3bf24d9650423261a70350665
Author: William Brown <[email protected]>
Date: Tue Feb 21 10:29:00 2017 +1000
Ticket 49137 - Add sasl plain tests, lib389 support
Bug Description: We need to test that SASL plain works in DS.
Fix Description: To do this, we need to fix sasl in lib389, we
correct openConnection to use more than just gssapi, we fix
start tls to work over multiple connections.
https://pagure.io/389-ds-base/issue/49137
Author: wibrown
Review by: mreynolds (THanks!)
diff --git a/src/lib389/lib389/__init__.py b/src/lib389/lib389/__init__.py
index e189c83a3..14ea6ee9c 100644
--- a/src/lib389/lib389/__init__.py
+++ b/src/lib389/lib389/__init__.py
@@ -98,6 +98,8 @@ if MAJOR >= 3 or (MAJOR == 2 and MINOR >= 7):
RE_DBMONATTR = re.compile(r'^([a-zA-Z]+)-([1-9][0-9]*)$')
RE_DBMONATTRSUN = re.compile(r'^([a-zA-Z]+)-([a-zA-Z]+)$')
+TRACE_LEVEL = 0
+
# My logger
log = logging.getLogger(__name__)
@@ -273,56 +275,15 @@ class DirSrv(SimpleLDAPObject, object):
self.instdir = self.ds_paths.inst_dir
self.dbdir = self.ds_paths.db_dir
- def __localinit__(self):
- '''
- Establish a connection to the started instance. It binds with the
- binddn property, then it initializes various fields from DirSrv
- (via __initPart2)
-
- @param - self
-
- @return - None
-
- @raise ldap.LDAPError - if failure during initialization
- '''
- uri = self.toLDAPURL()
-
- SimpleLDAPObject.__init__(self, uri)
-
- # see if binddn is a dn or a uid that we need to lookup
- if self.binddn and not is_a_dn(self.binddn):
- self.simple_bind_s("", "") # anon
- ent = self.getEntry(CFGSUFFIX, ldap.SCOPE_SUBTREE,
- "(uid=%s)" % self.binddn,
- ['uid'])
- if ent:
- self.binddn = ent.dn
- else:
- raise ValueError("Error: could not find %s under %s" % (
- self.binddn, CFGSUFFIX))
-
- needtls = False
- while True:
- try:
- if needtls:
- self.start_tls_s()
- try:
- self.simple_bind_s(ensure_str(self.binddn), self.bindpw)
- except ldap.SERVER_DOWN as e:
- # TODO add server info in exception
- log.debug("Cannot connect to %r" % uri)
- raise e
- break
- except ldap.CONFIDENTIALITY_REQUIRED:
- needtls = True
- self.__initPart2()
-
def rebind(self):
"""Reconnect to the DS
@raise ldap.CONFIDENTIALITY_REQUIRED - missing TLS:
"""
- SimpleLDAPObject.__init__(self, self.toLDAPURL())
+ if hasattr(ldap, 'PYLDAP_VERSION') and MAJOR >= 3:
+ super(DirSrv, self).__init__(uri, bytes_mode=False, trace_level=TRACE_LEVEL)
+ else:
+ super(DirSrv, self).__init__(uri, trace_level=TRACE_LEVEL)
# self.start_tls_s()
self.simple_bind_s(ensure_str(self.binddn), self.bindpw)
@@ -1037,7 +998,7 @@ class DirSrv(SimpleLDAPObject, object):
self.state = DIRSRV_STATE_ALLOCATED
- def open(self, saslmethod=None, certdir=None, starttls=False, connOnly=False, reqcert=ldap.OPT_X_TLS_HARD):
+ def open(self, saslmethod=None, sasltoken=None, certdir=None, starttls=False, connOnly=False, reqcert=ldap.OPT_X_TLS_HARD):
'''
It opens a ldap bound connection to dirsrv so that online
administrative tasks are possible. It binds with the binddn
@@ -1048,6 +1009,7 @@ class DirSrv(SimpleLDAPObject, object):
@param self
@param saslmethod - None, or GSSAPI
+ @param sasltoken - The ldap.sasl token type to bind with.
@param certdir - Certificate directory for TLS
@return None
@@ -1058,22 +1020,24 @@ class DirSrv(SimpleLDAPObject, object):
if self.verbose:
self.log.info('open(): Connecting to uri %s' % uri)
if hasattr(ldap, 'PYLDAP_VERSION') and MAJOR >= 3:
- super(DirSrv, self).__init__(uri, bytes_mode=False)
+ super(DirSrv, self).__init__(uri, bytes_mode=False, trace_level=TRACE_LEVEL)
else:
- super(DirSrv, self).__init__(uri)
+ super(DirSrv, self).__init__(uri, trace_level=TRACE_LEVEL)
- if certdir:
+ if certdir is not None:
"""
We have a certificate directory, so lets start up TLS negotiations
"""
- ldap.set_option(ldap.OPT_X_TLS_CACERTDIR, certdir)
- log.debug("Using ca certificate %s" % certdir)
+ self.set_option(ldap.OPT_X_TLS_CACERTDIR, certdir)
+ log.debug("Using external ca certificate %s" % certdir)
+ else:
+ self.set_option(ldap.OPT_X_TLS_CACERTDIR, self.get_cert_dir())
+ log.debug("Using dirsrv ca certificate %s" % certdir)
if certdir or starttls:
try:
- # MUST be set on ldap. not the object, because pyldap is broken
- # and only works if you set this globally.
- ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, reqcert)
+ self.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, reqcert)
+ log.debug("Using certificate policy %s" % reqcert)
log.debug("ldap.OPT_X_TLS_REQUIRE_CERT = %s" % reqcert)
self.start_tls_s()
except ldap.LDAPError as e:
@@ -1095,6 +1059,9 @@ class DirSrv(SimpleLDAPObject, object):
log.debug("SASL/GSSAPI Bind Failed: %s" % str(e))
raise e
+ elif saslmethod and sasltoken is not None:
+ # Just pass the sasltoken in!
+ self.sasl_interactive_bind_s("", sasltoken)
elif saslmethod:
# Unknown or unsupported method
log.debug('Unsupported SASL method: %s' % saslmethod)
@@ -1146,7 +1113,7 @@ class DirSrv(SimpleLDAPObject, object):
# check that DirSrv was in DIRSRV_STATE_ONLINE state
if self.state == DIRSRV_STATE_ONLINE:
# Don't raise an error. Just move the state and return
- SimpleLDAPObject.unbind(self)
+ self.unbind_s()
self.state = DIRSRV_STATE_OFFLINE
diff --git a/src/lib389/lib389/_mapped_object.py b/src/lib389/lib389/_mapped_object.py
index 7d3afa1c9..93aad5027 100644
--- a/src/lib389/lib389/_mapped_object.py
+++ b/src/lib389/lib389/_mapped_object.py
@@ -267,8 +267,8 @@ class DSLdapObject(DSLogging):
# If the account can be bound to, this will attempt to do so. We don't check
# for exceptions, just pass them back!
- def bind(self, password=None):
- conn = self._instance.openConnection()
+ def bind(self, password=None, *args, **kwargs):
+ conn = self._instance.openConnection(*args, **kwargs)
conn.simple_bind_s(self.dn, password)
return conn
diff --git a/src/lib389/lib389/nss_ssl.py b/src/lib389/lib389/nss_ssl.py
index bf9876b06..36bc26ad3 100644
--- a/src/lib389/lib389/nss_ssl.py
+++ b/src/lib389/lib389/nss_ssl.py
@@ -23,8 +23,8 @@ CA_NAME = 'Self-Signed-CA'
CERT_NAME = 'Server-Cert'
PIN_TXT = 'pin.txt'
PWD_TXT = 'pwdfile.txt'
-ISSUER = 'CN=ca.unknown.example.com,O=testing,L=unknown,ST=Queensland,C=AU'
-SELF_ISSUER = 'CN={HOSTNAME},O=testing,L=unknown,ST=Queensland,C=AU'
+ISSUER = 'CN=ca.lib389.example.com,O=testing,L=lib389,ST=Queensland,C=AU'
+SELF_ISSUER = 'CN={HOSTNAME},O=testing,L=lib389,ST=Queensland,C=AU'
VALID = 2
@@ -125,6 +125,7 @@ class NssSsl(object):
result = check_output(cmd)
self.dirsrv.log.debug("nss output: %s" % result)
# Now extract the CAcert to a well know place.
+ # This allows us to point the cacert dir here and it "just works"
cmd = [
'/usr/bin/certutil',
'-L',
@@ -233,6 +234,9 @@ class NssSsl(object):
CERT_NAME,
'-s',
SELF_ISSUER.format(HOSTNAME=self.dirsrv.host),
+ # We MUST issue with SANs else ldap wont verify the name.
+ '-8',
+ self.dirsrv.host,
'-c',
CA_NAME,
'-g',
diff --git a/src/lib389/lib389/rootdse.py b/src/lib389/lib389/rootdse.py
index df98cbf27..8d3a6d197 100644
--- a/src/lib389/lib389/rootdse.py
+++ b/src/lib389/lib389/rootdse.py
@@ -27,6 +27,9 @@ class RootDSE(DSLdapObject):
def supports_sasl_ldapssotoken(self):
return self.present("supportedSASLMechanisms", "LDAPSSOTOKEN")
+ def supports_sasl_plain(self):
+ return self.present("supportedSASLMechanisms", "PLAIN")
+
def supports_exop_whoami(self):
return self.present("supportedExtension", "1.3.6.1.4.1.4203.1.11.3")
diff --git a/src/lib389/lib389/sasl.py b/src/lib389/lib389/sasl.py
index ee19c4095..e64e3c3b2 100644
--- a/src/lib389/lib389/sasl.py
+++ b/src/lib389/lib389/sasl.py
@@ -12,7 +12,7 @@ Lib389 python ldap sasl operations.
These should be upstreamed if possible.
"""
-from ldap.sasl import sasl, CB_PASS
+from ldap.sasl import sasl, CB_AUTHNAME, CB_PASS
class LdapSSOTokenSASL(sasl):
"""
@@ -29,6 +29,6 @@ class PlainSASL(sasl):
"""
def __init__(self, authz_id, passwd):
- auth_dict = { CB_USER:authz_id, CB_PASS:passwd }
+ auth_dict = { CB_AUTHNAME:authz_id, CB_PASS:passwd }
sasl.__init__(self, auth_dict, "PLAIN")
diff --git a/src/lib389/lib389/tools.py b/src/lib389/lib389/tools.py
index 10cc7aa48..51de6897b 100644
--- a/src/lib389/lib389/tools.py
+++ b/src/lib389/lib389/tools.py
@@ -857,18 +857,18 @@ class DirSrvTools(object):
for line in hostfp.readlines():
if ipPattern is None:
words = line.split()
- assert(words[1] == expectedHost)
- return True
+ if words[1] == expectedHost:
+ return True
else:
if line.find(ipPattern) >= 0:
words = line.split()
# We just want to make sure it's in there somewhere
- assert(expectedHost in words)
- return True
+ if expectedHost in words:
+ return True
except AssertionError:
raise AssertionError(
"Error: %s should contain '%s' host for %s" %
- ('/etc/hosts/', expectedHost, ipPattern))
+ ('/etc/hosts', expectedHost, ipPattern))
raise AssertionError(
"Error: /etc/hosts does not contain '%s' as a host for %s"
% (expectedHost, ipPattern))
| 0 |
431aba869708c7d47ee40380cb05c9a1380d1bd5
|
389ds/389-ds-base
|
Issue 50610 - Fix return code when it's nothing to free
Description: Fix the return code when NULL == clcrypt_handle
supplied to clcrypt_destroy.
https://pagure.io/389-ds-base/issue/50610
Reviewed by: mreynolds (Thanks!)
|
commit 431aba869708c7d47ee40380cb05c9a1380d1bd5
Author: Simon Pichugin <[email protected]>
Date: Wed May 13 13:26:30 2020 +0200
Issue 50610 - Fix return code when it's nothing to free
Description: Fix the return code when NULL == clcrypt_handle
supplied to clcrypt_destroy.
https://pagure.io/389-ds-base/issue/50610
Reviewed by: mreynolds (Thanks!)
diff --git a/ldap/servers/plugins/replication/cl_crypt.c b/ldap/servers/plugins/replication/cl_crypt.c
index d57075b65..838553479 100644
--- a/ldap/servers/plugins/replication/cl_crypt.c
+++ b/ldap/servers/plugins/replication/cl_crypt.c
@@ -87,6 +87,8 @@ clcrypt_destroy(void *clcrypt_handle)
slapi_log_err(SLAPI_LOG_TRACE, repl_plugin_name,
"-> clcrypt_destroy\n");
if (NULL == clcrypt_handle) {
+ /* Nothing to free */
+ rc = 0;
goto bail;
}
crypt_destroy.state_priv = clcrypt_handle;
| 0 |
cbcdf0506896e13badb32e336a435b2187fdd770
|
389ds/389-ds-base
|
Ticket 51247 - Container Healthcheck failure
Bug Description: Due to human error, a change to begin_healthcheck
was overlooked that causes containers to always report an unhealthy
state.
Fix Description: Fix the use of begin_healthcheck
fixes: https://pagure.io/389-ds-base/issue/51247
fixes #51247
Author: William Brown <[email protected]>
Review by: ???
|
commit cbcdf0506896e13badb32e336a435b2187fdd770
Author: William Brown <[email protected]>
Date: Mon Aug 24 12:43:07 2020 +1000
Ticket 51247 - Container Healthcheck failure
Bug Description: Due to human error, a change to begin_healthcheck
was overlooked that causes containers to always report an unhealthy
state.
Fix Description: Fix the use of begin_healthcheck
fixes: https://pagure.io/389-ds-base/issue/51247
fixes #51247
Author: William Brown <[email protected]>
Review by: ???
diff --git a/src/lib389/cli/dscontainer b/src/lib389/cli/dscontainer
index a519eefb5..098705abd 100755
--- a/src/lib389/cli/dscontainer
+++ b/src/lib389/cli/dscontainer
@@ -330,6 +330,9 @@ binddn = cn=Directory Manager
healthy = False
max_failure_count = 20
for i in range(0, max_failure_count):
+ if ds_proc is None:
+ log.warning("ns-slapd pid has disappeared ...")
+ break
(check_again, healthy) = begin_healthcheck(ds_proc)
if check_again is False:
break
@@ -355,10 +358,9 @@ binddn = cn=Directory Manager
def begin_healthcheck(ds_proc):
- if ds_proc is None:
- log.warning("ns-slapd pid has disappeared ...")
- return (False, False)
- if ds_proc.poll() is not None:
+ # We skip the pid check if ds_proc is none because that means it's coming from the
+ # container healthcheck.
+ if ds_proc is not None and ds_proc.poll() is not None:
# Ruh-Roh
log.warning("ns-slapd pid has completed, you should check the error log ...")
return (False, False)
@@ -425,7 +427,7 @@ container host.
if args.runit:
begin_magic()
elif args.healthcheck:
- if begin_healthcheck() is True:
+ if begin_healthcheck(None) is (False, True):
sys.exit(0)
else:
sys.exit(1)
| 0 |
ad7885eae64a2085a89d516c1106b578142be502
|
389ds/389-ds-base
|
Ticket #47928 - Disable SSL v3, by default.
Description:
Changing the default SSL Version Min value from TLS 1.1 to TLS 1.0.
In dn: cn=encryption,cn=config,
0) Setting no SSL version attrs (using defaults); supported max is TLS1.2
==>
SSL Initialization - Configured SSL version range: min: TLS1.0, max: TLS1.2
1) Setting old/new SSL version attrs; no conflict; supported max is TLS1.2
sslVersionMin: TLS1.0
sslVersionMax: TLS1.3
nsSSL3: off
nsTLS1: on
==>
SSL Initialization - Configured SSL version range: min: TLS1.0, max: TLS1.2
2) Setting new SSL version attrs; supported max is TLS1.2
sslVersionMin: TLS1.0
sslVersionMax: TLS1.3
==>
SSL Initialization - Configured SSL version range: min: TLS1.0, max: TLS1.2
3) Setting old/new SSL version attrs; conflict (new min is stricter); supported max is TLS1.2
nsSSL3: on
sslVersionMin: TLS1.0
==>
SSL alert: Found unsecure configuration: nsSSL3: on; We strongly recommend to dis
able nsSSL3 in cn=encryption,cn=config.
SSL alert: Configured range: min: TLS1.0, max: TLS1.2; but both nsSSL3 and nsTLS1
are on. Respect the supported range.
SSL Initialization - Configured SSL version range: min: TLS1.0, max: TLS1.2
4) Setting old/new SSL version attrs; conflict (old min is stricter); supported max is TLS1.2
nsSSL3: off
sslVersionMin: SSL3
sslVersionMax: SSL3
==>
SSL alert: nsTLS1 is on, but the version range is lower than "TLS1.0"; Configuring
the version range as default min: TLS1.0, max: TLS1.2.
SSL Initialization - Configured SSL version range: min: TLS1.0, max: TLS1.2
5) Setting old/new SSL version attrs; no conflict; setting SSL3
nsSSL3: on
nsTLS1: off
sslVersionMin: SSL3
sslVersionMax: SSL3
==>
SSL alert: Found unsecure configuration: nsSSL3: on; We strongly recommend to disable
nsSSL3 in cn=encryption,cn=config.
SSL alert: Too low configured range: min: SSL3, max: SSL3; We strongly recommend
to set sslVersionMin higher than TLS1.0.
SSL Initialization - Configured SSL version range: min: SSL3, max: SSL3
https://fedorahosted.org/389/ticket/47928
Reviewed by [email protected] (Thank you, Mark!!)
|
commit ad7885eae64a2085a89d516c1106b578142be502
Author: Noriko Hosoi <[email protected]>
Date: Thu Nov 13 12:14:48 2014 -0800
Ticket #47928 - Disable SSL v3, by default.
Description:
Changing the default SSL Version Min value from TLS 1.1 to TLS 1.0.
In dn: cn=encryption,cn=config,
0) Setting no SSL version attrs (using defaults); supported max is TLS1.2
==>
SSL Initialization - Configured SSL version range: min: TLS1.0, max: TLS1.2
1) Setting old/new SSL version attrs; no conflict; supported max is TLS1.2
sslVersionMin: TLS1.0
sslVersionMax: TLS1.3
nsSSL3: off
nsTLS1: on
==>
SSL Initialization - Configured SSL version range: min: TLS1.0, max: TLS1.2
2) Setting new SSL version attrs; supported max is TLS1.2
sslVersionMin: TLS1.0
sslVersionMax: TLS1.3
==>
SSL Initialization - Configured SSL version range: min: TLS1.0, max: TLS1.2
3) Setting old/new SSL version attrs; conflict (new min is stricter); supported max is TLS1.2
nsSSL3: on
sslVersionMin: TLS1.0
==>
SSL alert: Found unsecure configuration: nsSSL3: on; We strongly recommend to dis
able nsSSL3 in cn=encryption,cn=config.
SSL alert: Configured range: min: TLS1.0, max: TLS1.2; but both nsSSL3 and nsTLS1
are on. Respect the supported range.
SSL Initialization - Configured SSL version range: min: TLS1.0, max: TLS1.2
4) Setting old/new SSL version attrs; conflict (old min is stricter); supported max is TLS1.2
nsSSL3: off
sslVersionMin: SSL3
sslVersionMax: SSL3
==>
SSL alert: nsTLS1 is on, but the version range is lower than "TLS1.0"; Configuring
the version range as default min: TLS1.0, max: TLS1.2.
SSL Initialization - Configured SSL version range: min: TLS1.0, max: TLS1.2
5) Setting old/new SSL version attrs; no conflict; setting SSL3
nsSSL3: on
nsTLS1: off
sslVersionMin: SSL3
sslVersionMax: SSL3
==>
SSL alert: Found unsecure configuration: nsSSL3: on; We strongly recommend to disable
nsSSL3 in cn=encryption,cn=config.
SSL alert: Too low configured range: min: SSL3, max: SSL3; We strongly recommend
to set sslVersionMin higher than TLS1.0.
SSL Initialization - Configured SSL version range: min: SSL3, max: SSL3
https://fedorahosted.org/389/ticket/47928
Reviewed by [email protected] (Thank you, Mark!!)
diff --git a/ldap/servers/slapd/fedse.c b/ldap/servers/slapd/fedse.c
index 87f45a181..d10fb3e0d 100644
--- a/ldap/servers/slapd/fedse.c
+++ b/ldap/servers/slapd/fedse.c
@@ -110,7 +110,7 @@ static const char *internal_entries[] =
"cn:encryption\n"
"nsSSLSessionTimeout:0\n"
"nsSSLClientAuth:allowed\n"
- "sslVersionMin:tls1.1\n",
+ "sslVersionMin:TLS1.0\n",
"dn:cn=monitor\n"
"objectclass:top\n"
diff --git a/ldap/servers/slapd/ssl.c b/ldap/servers/slapd/ssl.c
index 5d6919aee..6b51e0c40 100644
--- a/ldap/servers/slapd/ssl.c
+++ b/ldap/servers/slapd/ssl.c
@@ -87,13 +87,23 @@
/* TLS1.1 is defined in RFC4346. */
#define NSS_TLS11 1
#else
-/*
- * TLS1.0 is defined in RFC2246.
- * Close to SSL 3.0.
- */
#define NSS_TLS10 1
#endif
+/******************************************************************************
+ * Default SSL Version Rule
+ * Old SSL version attributes:
+ * nsSSL3: off -- nsSSL3 == SSL_LIBRARY_VERSION_3_0
+ * nsTLS1: on -- nsTLS1 == SSL_LIBRARY_VERSION_TLS_1_0 and greater
+ * Note: TLS1.0 is defined in RFC2246, which is close to SSL 3.0.
+ * New SSL version attributes:
+ * sslVersionMin: TLS1.0
+ * sslVersionMax: max ssl version supported by NSS
+ ******************************************************************************/
+
+#define DEFVERSION "TLS1.0"
+#define CURRENT_DEFAULT_SSL_VERSION SSL_LIBRARY_VERSION_TLS_1_0
+
extern char* slapd_SSL3ciphers;
extern symbol_t supported_ciphers[];
#if !defined(NSS_TLS10) /* NSS_TLS11 or newer */
@@ -253,12 +263,12 @@ static lookup_cipher _lookup_cipher[] = {
PRBool enableSSL2 = PR_FALSE;
/*
* nsSSL3: on -- disable SSLv3 by default.
- * Corresonding to SSL_LIBRARY_VERSION_3_0 and SSL_LIBRARY_VERSION_TLS_1_0
+ * Corresonding to SSL_LIBRARY_VERSION_3_0
*/
PRBool enableSSL3 = PR_FALSE;
/*
* nsTLS1: on -- enable TLS1 by default.
- * Corresonding to SSL_LIBRARY_VERSION_TLS_1_1 and greater.
+ * Corresonding to SSL_LIBRARY_VERSION_TLS_1_0 and greater.
*/
PRBool enableTLS1 = PR_TRUE;
@@ -927,14 +937,14 @@ restrict_SSLVersionRange(void)
slapd_SSL_warn("Found unsecure configuration: nsSSL3: on; "
"We strongly recommend to disable nsSSL3 in %s.", configDN);
if (enableTLS1) {
- if (slapdNSSVersions.min > SSL_LIBRARY_VERSION_TLS_1_0) {
+ if (slapdNSSVersions.min >= CURRENT_DEFAULT_SSL_VERSION) {
slapd_SSL_warn("Configured range: min: %s, max: %s; "
"but both nsSSL3 and nsTLS1 are on. "
"Respect the supported range.",
mymin, mymax);
enableSSL3 = PR_FALSE;
}
- if (slapdNSSVersions.max < SSL_LIBRARY_VERSION_TLS_1_1) {
+ if (slapdNSSVersions.max < CURRENT_DEFAULT_SSL_VERSION) {
slapd_SSL_warn("Configured range: min: %s, max: %s; "
"but both nsSSL3 and nsTLS1 are on. "
"Resetting the max to the supported max SSL version: %s.",
@@ -943,7 +953,7 @@ restrict_SSLVersionRange(void)
}
} else {
/* nsTLS1 is explicitly set to off. */
- if (enabledNSSVersions.min > SSL_LIBRARY_VERSION_TLS_1_0) {
+ if (enabledNSSVersions.min >= CURRENT_DEFAULT_SSL_VERSION) {
slapd_SSL_warn("Supported range: min: %s, max: %s; "
"but nsSSL3 is on and nsTLS1 is off. "
"Respect the supported range.",
@@ -951,20 +961,20 @@ restrict_SSLVersionRange(void)
slapdNSSVersions.min = SSLVGreater(slapdNSSVersions.min, enabledNSSVersions.min);
enableSSL3 = PR_FALSE;
enableTLS1 = PR_TRUE;
- } else if (slapdNSSVersions.min > SSL_LIBRARY_VERSION_TLS_1_0) {
+ } else if (slapdNSSVersions.min >= CURRENT_DEFAULT_SSL_VERSION) {
slapd_SSL_warn("Configured range: min: %s, max: %s; "
"but nsSSL3 is on and nsTLS1 is off. "
"Respect the configured range.",
mymin, mymax);
enableSSL3 = PR_FALSE;
enableTLS1 = PR_TRUE;
- } else if (slapdNSSVersions.max < SSL_LIBRARY_VERSION_TLS_1_1) {
+ } else if (slapdNSSVersions.min < CURRENT_DEFAULT_SSL_VERSION) {
slapd_SSL_warn("Too low configured range: min: %s, max: %s; "
- "We strongly recommend to set sslVersionMax higher than %s.",
- mymin, mymax, emax);
+ "We strongly recommend to set sslVersionMin higher than %s.",
+ mymin, mymax, DEFVERSION);
} else {
/*
- * slapdNSSVersions.min <= SSL_LIBRARY_VERSION_TLS_1_0 &&
+ * slapdNSSVersions.min < SSL_LIBRARY_VERSION_TLS_1_0 &&
* slapdNSSVersions.max >= SSL_LIBRARY_VERSION_TLS_1_1
*/
slapd_SSL_warn("Configured range: min: %s, max: %s; "
@@ -976,7 +986,7 @@ restrict_SSLVersionRange(void)
}
} else {
if (enableTLS1) {
- if (enabledNSSVersions.max < SSL_LIBRARY_VERSION_TLS_1_1) {
+ if (enabledNSSVersions.max < CURRENT_DEFAULT_SSL_VERSION) {
/* TLS1 is on, but TLS1 is not supported by NSS. */
slapd_SSL_warn("Supported range: min: %s, max: %s; "
"Setting the version range based upon the supported range.",
@@ -985,17 +995,17 @@ restrict_SSLVersionRange(void)
slapdNSSVersions.min = enabledNSSVersions.min;
enableSSL3 = PR_TRUE;
enableTLS1 = PR_FALSE;
- } else if ((slapdNSSVersions.max < SSL_LIBRARY_VERSION_TLS_1_1) ||
- (slapdNSSVersions.min < SSL_LIBRARY_VERSION_TLS_1_1)) {
+ } else if ((slapdNSSVersions.max < CURRENT_DEFAULT_SSL_VERSION) ||
+ (slapdNSSVersions.min < CURRENT_DEFAULT_SSL_VERSION)) {
slapdNSSVersions.max = enabledNSSVersions.max;
- slapdNSSVersions.min = SSLVGreater(SSL_LIBRARY_VERSION_TLS_1_1, enabledNSSVersions.min);
- slapd_SSL_warn("Default SSL Version settings; "
- "Configuring the version range as min: %s, max: %s; ",
- mymin, mymax);
+ slapdNSSVersions.min = SSLVGreater(CURRENT_DEFAULT_SSL_VERSION, enabledNSSVersions.min);
+ slapd_SSL_warn("nsTLS1 is on, but the version range is lower than \"%s\"; "
+ "Configuring the version range as default min: %s, max: %s.",
+ DEFVERSION, DEFVERSION, emax);
} else {
/*
- * slapdNSSVersions.min >= SSL_LIBRARY_VERSION_TLS_1_1 &&
- * slapdNSSVersions.max >= SSL_LIBRARY_VERSION_TLS_1_1
+ * slapdNSSVersions.min >= SSL_LIBRARY_VERSION_TLS_1_0 &&
+ * slapdNSSVersions.max >= SSL_LIBRARY_VERSION_TLS_1_0
*/
;
}
@@ -1004,14 +1014,14 @@ restrict_SSLVersionRange(void)
"Respect the configured range.",
emin, emax);
/* nsTLS1 is explicitly set to off. */
- if (slapdNSSVersions.min > SSL_LIBRARY_VERSION_TLS_1_0) {
+ if (slapdNSSVersions.min >= CURRENT_DEFAULT_SSL_VERSION) {
enableTLS1 = PR_TRUE;
- } else if (slapdNSSVersions.max < SSL_LIBRARY_VERSION_TLS_1_1) {
+ } else if (slapdNSSVersions.max < CURRENT_DEFAULT_SSL_VERSION) {
enableSSL3 = PR_TRUE;
} else {
/*
- * slapdNSSVersions.min <= SSL_LIBRARY_VERSION_TLS_1_0 &&
- * slapdNSSVersions.max >= SSL_LIBRARY_VERSION_TLS_1_1
+ * slapdNSSVersions.min < SSL_LIBRARY_VERSION_TLS_1_0 &&
+ * slapdNSSVersions.max >= SSL_LIBRARY_VERSION_TLS_1_0
*/
enableSSL3 = PR_TRUE;
enableTLS1 = PR_TRUE;
@@ -1434,17 +1444,17 @@ set_NSS_version(char *val, PRUint16 *rval, int ismin)
sscanf(vp, "%4f", &tlsv);
if (tlsv < 1.1) { /* TLS1.0 */
if (ismin) {
- if (enabledNSSVersions.min > SSL_LIBRARY_VERSION_TLS_1_0) {
+ if (enabledNSSVersions.min > CURRENT_DEFAULT_SSL_VERSION) {
slapd_SSL_warn("Security Initialization: The value of sslVersionMin "
"\"%s\" is lower than the supported version; "
"the default value \"%s\" is used.",
val, emin);
(*rval) = enabledNSSVersions.min;
} else {
- (*rval) = SSL_LIBRARY_VERSION_TLS_1_0;
+ (*rval) = CURRENT_DEFAULT_SSL_VERSION;
}
} else {
- if (enabledNSSVersions.max < SSL_LIBRARY_VERSION_TLS_1_0) {
+ if (enabledNSSVersions.max < CURRENT_DEFAULT_SSL_VERSION) {
/* never happens */
slapd_SSL_warn("Security Initialization: The value of sslVersionMax "
"\"%s\" is higher than the supported version; "
@@ -1452,7 +1462,7 @@ set_NSS_version(char *val, PRUint16 *rval, int ismin)
val, emax);
(*rval) = enabledNSSVersions.max;
} else {
- (*rval) = SSL_LIBRARY_VERSION_TLS_1_0;
+ (*rval) = CURRENT_DEFAULT_SSL_VERSION;
}
}
} else if (tlsv < 1.2) { /* TLS1.1 */
@@ -1906,7 +1916,7 @@ slapd_ssl_init2(PRFileDesc **fd, int startTLS)
} else {
enableTLS1 = slapi_entry_attr_get_bool( e, "nsTLS1" );
}
- } else if (enabledNSSVersions.max > SSL_LIBRARY_VERSION_TLS_1_0) {
+ } else if (enabledNSSVersions.max >= CURRENT_DEFAULT_SSL_VERSION) {
enableTLS1 = PR_TRUE; /* If available, enable TLS1 */
}
slapi_ch_free_string( &val );
| 0 |
4c718586774d3a781eae54775c6f0d5d63ece45d
|
389ds/389-ds-base
|
Issue 6397 - Remove deprecated setting for HR time stamps in logs
Remove the code for checking is CLOCK exists and the
logging_hr_timestamps config settings
Also fixed some compiler warnings
relates: https://github.com/389ds/389-ds-base/issues/6397
Reviewed by: progier & spichugi (Thanks!!)
|
commit 4c718586774d3a781eae54775c6f0d5d63ece45d
Author: Mark Reynolds <[email protected]>
Date: Fri Nov 8 08:53:54 2024 -0500
Issue 6397 - Remove deprecated setting for HR time stamps in logs
Remove the code for checking is CLOCK exists and the
logging_hr_timestamps config settings
Also fixed some compiler warnings
relates: https://github.com/389ds/389-ds-base/issues/6397
Reviewed by: progier & spichugi (Thanks!!)
diff --git a/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py b/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
index 6fb038cc5..8670db312 100644
--- a/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
+++ b/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
@@ -37,10 +37,10 @@ pytestmark = pytest.mark.tier1
logging.getLogger(__name__).setLevel(logging.DEBUG)
log = logging.getLogger(__name__)
-PLUGIN_TIMESTAMP = 'nsslapd-logging-hr-timestamps-enabled'
PLUGIN_LOGGING = 'nsslapd-plugin-logging'
USER1_DN = 'uid=user1,' + DEFAULT_SUFFIX
+
def add_users(topology_st, users_num):
users = UserAccounts(topology_st, DEFAULT_SUFFIX)
log.info('Adding %d users' % users_num)
@@ -205,6 +205,7 @@ def set_audit_log_config_values(topology_st, request, enabled, logsize):
def set_audit_log_config_values_to_rotate(topology_st, request):
set_audit_log_config_values(topology_st, request, 'on', '1')
+
@pytest.fixture(scope="function")
def disable_access_log_buffering(topology_st, request):
log.info('Disable access log buffering')
@@ -217,6 +218,7 @@ def disable_access_log_buffering(topology_st, request):
return disable_access_log_buffering
+
def create_backend(inst, rdn, suffix):
# We only support dc= in this test.
assert suffix.startswith('dc=')
@@ -243,54 +245,9 @@ def create_backend(inst, rdn, suffix):
return be1
-def test_check_default(topology_st):
- """Check the default value of nsslapd-logging-hr-timestamps-enabled,
- it should be ON
-
- :id: 2d15002e-9ed3-4796-b0bb-bf04e4e59bd3
-
- :setup: Standalone instance
-
- :steps:
- 1. Fetch the value of nsslapd-logging-hr-timestamps-enabled attribute
- 2. Test that the attribute value should be "ON" by default
-
- :expectedresults:
- 1. Value should be fetched successfully
- 2. Value should be "ON" by default
- """
-
- # Get the default value of nsslapd-logging-hr-timestamps-enabled attribute
- default = topology_st.standalone.config.get_attr_val_utf8(PLUGIN_TIMESTAMP)
-
- # Now check it should be ON by default
- assert default == "on"
- log.debug(default)
-
-
-def test_plugin_set_invalid(topology_st):
- """Try to set some invalid values for nsslapd-logging-hr-timestamps-enabled
- attribute
-
- :id: c60a68d2-703a-42bf-a5c2-4040736d511a
-
- :setup: Standalone instance
-
- :steps:
- 1. Set some "JUNK" value of nsslapd-logging-hr-timestamps-enabled attribute
-
- :expectedresults:
- 1. There should be an operation error
- """
-
- log.info('test_plugin_set_invalid - Expect to fail with junk value')
- with pytest.raises(ldap.OPERATIONS_ERROR):
- topology_st.standalone.config.set(PLUGIN_TIMESTAMP, 'JUNK')
-
def test_log_plugin_on(topology_st, remove_users):
- """Check access logs for millisecond, when
- nsslapd-logging-hr-timestamps-enabled=ON
+ """Check access logs for millisecond
:id: 65ae4e2a-295f-4222-8d69-12124bc7a872
@@ -323,55 +280,6 @@ def test_log_plugin_on(topology_st, remove_users):
assert topology_st.standalone.ds_access_log.match(r'^\[.+\d{9}.+\].+')
-def test_log_plugin_off(topology_st, remove_users):
- """Milliseconds should be absent from access logs when
- nsslapd-logging-hr-timestamps-enabled=OFF
-
- :id: b3400e46-d940-4574-b399-e3f4b49bc4b5
-
- :setup: Standalone instance
-
- :steps:
- 1. Set nsslapd-logging-hr-timestamps-enabled=OFF
- 2. Restart the server
- 3. Delete old access logs
- 4. Do search operations to generate fresh access logs
- 5. Restart the server
- 6. Check access logs
-
- :expectedresults:
- 1. Attribute nsslapd-logging-hr-timestamps-enabled should be set to "OFF"
- 2. Server should restart
- 3. Access logs should be deleted
- 4. Search operation should PASS
- 5. Server should restart
- 6. There should not be any milliseconds added in the access logs
- """
-
- log.info('Bug 1273549 - Check access logs for missing millisecond, when attribute is OFF')
-
- log.info('test_log_plugin_off - set the configuration attribute to OFF')
- topology_st.standalone.config.set(PLUGIN_TIMESTAMP, 'OFF')
-
- log.info('Restart the server to flush the logs')
- topology_st.standalone.restart(timeout=10)
-
- log.info('test_log_plugin_off - delete the previous access logs')
- topology_st.standalone.deleteAccessLogs()
-
- # Now generate some fresh logs
- add_users(topology_st.standalone, 10)
- search_users(topology_st.standalone)
-
- log.info('Restart the server to flush the logs')
- topology_st.standalone.restart(timeout=10)
-
- log.info('check access log that microseconds are not present')
- access_log_lines = topology_st.standalone.ds_access_log.readlines()
- assert len(access_log_lines) > 0
- assert not topology_st.standalone.ds_access_log.match(r'^\[.+\d{9}.+\].+')
-
-
@pytest.mark.xfail(ds_is_older('1.4.0'), reason="May fail on 1.3.x because of bug 1358706")
def test_internal_log_server_level_0(topology_st, clean_access_logs, disable_access_log_buffering):
"""Tests server-initiated internal operations
@@ -391,7 +299,6 @@ def test_internal_log_server_level_0(topology_st, clean_access_logs, disable_acc
topo = topology_st.standalone
default_log_level = topo.config.get_attr_val_utf8(LOG_ACCESS_LEVEL)
-
log.info('Set nsslapd-plugin-logging to on')
topo.config.set(PLUGIN_LOGGING, 'ON')
diff --git a/dirsrvtests/tests/suites/healthcheck/health_config_test.py b/dirsrvtests/tests/suites/healthcheck/health_config_test.py
index d0b56f94c..823f69852 100644
--- a/dirsrvtests/tests/suites/healthcheck/health_config_test.py
+++ b/dirsrvtests/tests/suites/healthcheck/health_config_test.py
@@ -59,6 +59,7 @@ def run_healthcheck_and_flush_log(topology, instance, searched_code, json, searc
log.info('Use healthcheck without --json option')
args.json = json
health_check_run(instance, topology.logcap.log, args)
+
assert topology.logcap.contains(searched_code)
log.info('Healthcheck returned searched code: %s' % searched_code)
@@ -85,48 +86,6 @@ def setup_ldif(topology_st, request):
request.addfinalizer(fin)
[email protected](ds_is_older("1.4.1"), reason="Not implemented")
-def test_healthcheck_logging_format_should_be_revised(topology_st):
- """Check if HealthCheck returns DSCLE0001 code
-
- :id: 277d7980-123b-481b-acba-d90921b9f5ac
- :setup: Standalone instance
- :steps:
- 1. Create DS instance
- 2. Set nsslapd-logging-hr-timestamps-enabled to 'off'
- 3. Use HealthCheck without --json option
- 4. Use HealthCheck with --json option
- 5. Set nsslapd-logging-hr-timestamps-enabled to 'on'
- 6. Use HealthCheck without --json option
- 7. Use HealthCheck with --json option
- :expectedresults:
- 1. Success
- 2. Success
- 3. Healthcheck reports DSCLE0001 code and related details
- 4. Healthcheck reports DSCLE0001 code and related details
- 5. Success
- 6. Healthcheck reports no issue found
- 7. Healthcheck reports no issue found
- """
-
- RET_CODE = 'DSCLE0001'
-
- standalone = topology_st.standalone
-
- log.info('Set nsslapd-logging-hr-timestamps-enabled to off')
- standalone.config.set('nsslapd-logging-hr-timestamps-enabled', 'off')
- standalone.config.set("nsslapd-accesslog-logbuffering", "on")
-
- run_healthcheck_and_flush_log(topology_st, standalone, json=False, searched_code=RET_CODE)
- run_healthcheck_and_flush_log(topology_st, standalone, json=True, searched_code=RET_CODE)
-
- log.info('Set nsslapd-logging-hr-timestamps-enabled to off')
- standalone.config.set('nsslapd-logging-hr-timestamps-enabled', 'on')
-
- run_healthcheck_and_flush_log(topology_st, standalone, json=False, searched_code=CMD_OUTPUT)
- run_healthcheck_and_flush_log(topology_st, standalone, json=True, searched_code=JSON_OUTPUT)
-
-
@pytest.mark.xfail(ds_is_older("1.4.1"), reason="Not implemented")
def test_healthcheck_RI_plugin_is_misconfigured(topology_st):
"""Check if HealthCheck returns DSRILE0001 code
@@ -157,6 +116,8 @@ def test_healthcheck_RI_plugin_is_misconfigured(topology_st):
standalone = topology_st.standalone
+ standalone.config.set("nsslapd-accesslog-logbuffering", "on")
+
plugin = ReferentialIntegrityPlugin(standalone)
plugin.disable()
plugin.enable()
@@ -408,7 +369,6 @@ def test_healthcheck_notes_unindexed_search(topology_st, setup_ldif):
db_cfg = DatabaseConfig(standalone)
db_cfg.set([('nsslapd-idlistscanlimit', '100')])
-
log.info('Stopping the server and running offline import...')
standalone.stop()
assert standalone.ldif2db(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX], encrypt=None, excludeSuffixes=None,
@@ -477,6 +437,7 @@ def test_healthcheck_notes_unknown_attribute(topology_st, setup_ldif):
run_healthcheck_and_flush_log(topology_st, standalone, RET_CODE, json=False)
run_healthcheck_and_flush_log(topology_st, standalone, RET_CODE, json=True)
+
def test_healthcheck_unauth_binds(topology_st):
"""Check if HealthCheck returns DSCLE0003 code when unauthorized binds are
allowed
@@ -509,6 +470,7 @@ def test_healthcheck_unauth_binds(topology_st):
log.info('Reset nsslapd-allow-unauthenticated-binds to off')
inst.config.set("nsslapd-allow-unauthenticated-binds", "off")
+
def test_healthcheck_accesslog_buffering(topology_st):
"""Check if HealthCheck returns DSCLE0004 code when acccess log buffering
is disabled
@@ -541,6 +503,7 @@ def test_healthcheck_accesslog_buffering(topology_st):
log.info('Reset nsslapd-accesslog-logbuffering to on')
inst.config.set("nsslapd-accesslog-logbuffering", "on")
+
def test_healthcheck_securitylog_buffering(topology_st):
"""Check if HealthCheck returns DSCLE0005 code when security log buffering
is disabled
@@ -573,6 +536,7 @@ def test_healthcheck_securitylog_buffering(topology_st):
log.info('Reset nnsslapd-securitylog-logbuffering to on')
inst.config.set("nsslapd-securitylog-logbuffering", "on")
+
def test_healthcheck_auditlog_buffering(topology_st):
"""Check if HealthCheck returns DSCLE0006 code when audit log buffering
is disabled
diff --git a/dirsrvtests/tests/suites/healthcheck/healthcheck_test.py b/dirsrvtests/tests/suites/healthcheck/healthcheck_test.py
index ebf6e48d4..bed85591f 100644
--- a/dirsrvtests/tests/suites/healthcheck/healthcheck_test.py
+++ b/dirsrvtests/tests/suites/healthcheck/healthcheck_test.py
@@ -146,8 +146,7 @@ def test_healthcheck_list_checks(topology_st):
3. Success
"""
- output_list = ['config:hr_timestamp',
- 'config:passwordscheme',
+ output_list = ['config:passwordscheme',
'backends:userroot:cl_trimming',
'backends:userroot:mappingtree',
'backends:userroot:search',
@@ -194,7 +193,6 @@ def test_healthcheck_list_errors(topology_st):
'DSBLE0006 :: BDB is still used as a backend',
'DSCERTLE0001 :: Certificate about to expire',
'DSCERTLE0002 :: Certificate expired',
- 'DSCLE0001 :: Different log timestamp format',
'DSCLE0002 :: Weak passwordStorageScheme',
'DSCLE0003 :: Unauthorized Binds Allowed',
'DSCLE0004 :: Access Log buffering disabled',
@@ -240,8 +238,7 @@ def test_healthcheck_check_option(topology_st):
3. Success
"""
- output_list = ['config:hr_timestamp',
- 'config:passwordscheme',
+ output_list = ['config:passwordscheme',
# 'config:accesslog_buffering', Skip test access log buffering is disabled
'config:securitylog_buffering',
'config:unauth_binds',
diff --git a/dirsrvtests/tests/suites/replication/wait_for_async_feature_test.py b/dirsrvtests/tests/suites/replication/wait_for_async_feature_test.py
index 911cd5dcb..c5ab585e4 100644
--- a/dirsrvtests/tests/suites/replication/wait_for_async_feature_test.py
+++ b/dirsrvtests/tests/suites/replication/wait_for_async_feature_test.py
@@ -157,15 +157,13 @@ def test_behavior_with_value(topology_m2, waitfor_async_attr, entries):
None, '2000', '0', '-5'
:steps:
1. Set Replication Debugging loglevel for the errorlog
- 2. Set nsslapd-logging-hr-timestamps-enabled to 'off' on both suppliers
- 3. Gather all sync attempts, group by timestamp
- 4. Take the most common timestamp and assert it has appeared
+ 2. Gather all sync attempts, group by timestamp
+ 3. Take the most common timestamp and assert it has appeared
in the set range
:expectedresults:
1. Replication Debugging loglevel should be set
- 2. nsslapd-logging-hr-timestamps-enabled should be set
- 3. Operation should be successful
- 4. Errors log should have all timestamp appear
+ 2. Operation should be successful
+ 3. Errors log should have all timestamp appear
"""
supplier1 = topology_m2.ms["supplier1"]
diff --git a/ldap/servers/plugins/memberof/memberof.c b/ldap/servers/plugins/memberof/memberof.c
index 88ba42b0c..c9b8520e9 100644
--- a/ldap/servers/plugins/memberof/memberof.c
+++ b/ldap/servers/plugins/memberof/memberof.c
@@ -868,8 +868,8 @@ bail:
}
/* Perform fixup (similar as fixup task) on all backends */
-int
-perform_needed_fixup()
+static int
+perform_needed_fixup(void)
{
task_data td = {0};
MemberOfConfig config = {0};
@@ -1354,13 +1354,11 @@ memberof_postop_del(Slapi_PBlock *pb)
struct slapi_entry *e = NULL;
Slapi_DN *copied_sdn;
PRBool deferred_update;
- MemberofDeferredList* deferred_list;
/* retrieve deferred update params that are valid until shutdown */
memberof_rlock_config();
mainConfig = memberof_get_config();
deferred_update = mainConfig->deferred_update;
- deferred_list = mainConfig->deferred_list;
memberof_unlock_config();
if (deferred_update) {
@@ -1740,13 +1738,11 @@ memberof_postop_modrdn(Slapi_PBlock *pb)
Slapi_DN *origin_sdn;
Slapi_DN *copied_sdn;
PRBool deferred_update;
- MemberofDeferredList* deferred_list;
/* retrieve deferred update params that are valid until shutdown */
memberof_rlock_config();
mainConfig = memberof_get_config();
deferred_update = mainConfig->deferred_update;
- deferred_list = mainConfig->deferred_list;
memberof_unlock_config();
if (deferred_update) {
@@ -2060,13 +2056,11 @@ memberof_postop_modify(Slapi_PBlock *pb)
MemberOfConfig *mainConfig = 0;
MemberOfConfig configCopy = {0};
PRBool deferred_update;
- MemberofDeferredList* deferred_list;
/* retrieve deferred update params that are valid until shutdown */
memberof_rlock_config();
mainConfig = memberof_get_config();
deferred_update = mainConfig->deferred_update;
- deferred_list = mainConfig->deferred_list;
memberof_unlock_config();
if (deferred_update) {
@@ -2322,13 +2316,11 @@ memberof_postop_add(Slapi_PBlock *pb)
MemberOfConfig *mainConfig;
Slapi_DN *copied_sdn;
PRBool deferred_update;
- MemberofDeferredList* deferred_list;
/* retrieve deferred update params that are valid until shutdown */
memberof_rlock_config();
mainConfig = memberof_get_config();
deferred_update = mainConfig->deferred_update;
- deferred_list = mainConfig->deferred_list;
memberof_unlock_config();
if (deferred_update) {
@@ -2972,7 +2964,7 @@ memberof_mod_attr_list_r(Slapi_PBlock *pb, MemberOfConfig *config, int mod, Slap
op_this_val = slapi_value_new_string(slapi_sdn_get_ndn(op_this_sdn));
slapi_value_set_flags(op_this_val, SLAPI_ATTR_FLAG_NORMALIZED_CIS);
- /* For gcc -analyser: ignore false positive about dn_str
+ /* For gcc -analyser: ignore false positive about dn_str
* (last_str cannot be NULL if last_size > bv->bv_len)
*/
#pragma GCC diagnostic push
@@ -4176,14 +4168,14 @@ static memberof_cached_value *
ancestors_cache_lookup(MemberOfConfig *config, const char *ndn)
{
memberof_cached_value *e;
-#if defined(DEBUG) && defined(HAVE_CLOCK_GETTIME)
+#if defined(DEBUG)
long int start;
struct timespec tsnow;
#endif
cache_stat.total_lookup++;
-#if defined(DEBUG) && defined(HAVE_CLOCK_GETTIME)
+#if defined(DEBUG)
if (clock_gettime(CLOCK_REALTIME, &tsnow) != 0) {
start = 0;
} else {
@@ -4193,7 +4185,7 @@ ancestors_cache_lookup(MemberOfConfig *config, const char *ndn)
e = (memberof_cached_value *) PL_HashTableLookupConst(config->ancestors_cache, (const void *) ndn);
-#if defined(DEBUG) && defined(HAVE_CLOCK_GETTIME)
+#if defined(DEBUG)
if (start) {
if (clock_gettime(CLOCK_REALTIME, &tsnow) == 0) {
cache_stat.cumul_duration_lookup += (tsnow.tv_nsec - start);
@@ -4209,14 +4201,14 @@ static PRBool
ancestors_cache_remove(MemberOfConfig *config, const char *ndn)
{
PRBool rc;
-#if defined(DEBUG) && defined(HAVE_CLOCK_GETTIME)
+#if defined(DEBUG)
long int start;
struct timespec tsnow;
#endif
cache_stat.total_remove++;
-#if defined(DEBUG) && defined(HAVE_CLOCK_GETTIME)
+#if defined(DEBUG)
if (clock_gettime(CLOCK_REALTIME, &tsnow) != 0) {
start = 0;
} else {
@@ -4227,7 +4219,7 @@ ancestors_cache_remove(MemberOfConfig *config, const char *ndn)
rc = PL_HashTableRemove(config->ancestors_cache, (const void *)ndn);
-#if defined(DEBUG) && defined(HAVE_CLOCK_GETTIME)
+#if defined(DEBUG)
if (start) {
if (clock_gettime(CLOCK_REALTIME, &tsnow) == 0) {
cache_stat.cumul_duration_remove += (tsnow.tv_nsec - start);
@@ -4241,13 +4233,13 @@ static PLHashEntry *
ancestors_cache_add(MemberOfConfig *config, const void *key, void *value)
{
PLHashEntry *e;
-#if defined(DEBUG) && defined(HAVE_CLOCK_GETTIME)
+#if defined(DEBUG)
long int start;
struct timespec tsnow;
#endif
cache_stat.total_add++;
-#if defined(DEBUG) && defined(HAVE_CLOCK_GETTIME)
+#if defined(DEBUG)
if (clock_gettime(CLOCK_REALTIME, &tsnow) != 0) {
start = 0;
} else {
@@ -4257,7 +4249,7 @@ ancestors_cache_add(MemberOfConfig *config, const void *key, void *value)
e = PL_HashTableAdd(config->ancestors_cache, key, value);
-#if defined(DEBUG) && defined(HAVE_CLOCK_GETTIME)
+#if defined(DEBUG)
if (start) {
if (clock_gettime(CLOCK_REALTIME, &tsnow) == 0) {
cache_stat.cumul_duration_add += (tsnow.tv_nsec - start);
diff --git a/ldap/servers/slapd/back-ldbm/vlv.c b/ldap/servers/slapd/back-ldbm/vlv.c
index 36d3fde91..d8a29f5db 100644
--- a/ldap/servers/slapd/back-ldbm/vlv.c
+++ b/ldap/servers/slapd/back-ldbm/vlv.c
@@ -1477,20 +1477,14 @@ vlv_filter_candidates(backend *be, Slapi_PBlock *pb, const IDList *candidates, c
/* Check to see if our journey is really necessary */
if (counter++ % 10 == 0) {
-/* check time limit */
-#ifdef HAVE_CLOCK_GETTIME
+ /* check time limit */
if (slapi_timespec_expire_check(expire_time) == TIMER_EXPIRED) {
- slapi_log_err(SLAPI_LOG_TRACE, "vlv_filter_candidates", "LDAP_TIMELIMIT_EXCEEDED\n");
+ slapi_log_err(SLAPI_LOG_TRACE, "vlv_filter_candidates",
+ "LDAP_TIMELIMIT_EXCEEDED\n");
return_value = LDAP_TIMELIMIT_EXCEEDED;
done = 1;
}
-#else
- time_t curtime = current_time();
- if (time_up != -1 && curtime > time_up) {
- return_value = LDAP_TIMELIMIT_EXCEEDED;
- done = 1;
- }
-#endif
+
/* check lookthrough limit */
if (lookthrough_limit != -1 && lookedat > lookthrough_limit) {
return_value = LDAP_ADMINLIMIT_EXCEEDED;
@@ -1875,7 +1869,7 @@ vlv_print_access_log(Slapi_PBlock *pb, struct vlv_request *vlvi, struct vlv_resp
} else {
char fmt[18+NUMLEN];
char *msg = NULL;
- PR_snprintf(fmt, (sizeof fmt), "VLV %%d:%%d:%%.%ds %%s", vlvi->value.bv_len);
+ PR_snprintf(fmt, (sizeof fmt), "VLV %%d:%%d:%%.%lds %%s", vlvi->value.bv_len);
msg = slapi_ch_smprintf(fmt,
vlvi->beforeCount,
vlvi->afterCount,
diff --git a/ldap/servers/slapd/libglobs.c b/ldap/servers/slapd/libglobs.c
index 73ca248af..3eb0d9fe5 100644
--- a/ldap/servers/slapd/libglobs.c
+++ b/ldap/servers/slapd/libglobs.c
@@ -197,7 +197,6 @@ slapi_onoff_t init_auditlogbuffering;
slapi_onoff_t init_auditlog_logging_hide_unhashed_pw;
slapi_onoff_t init_auditfaillog_logging_enabled;
slapi_onoff_t init_auditfaillog_logging_hide_unhashed_pw;
-slapi_onoff_t init_logging_hr_timestamps;
slapi_onoff_t init_csnlogging;
slapi_onoff_t init_pw_unlock;
slapi_onoff_t init_pw_must_change;
@@ -1389,13 +1388,6 @@ static struct config_get_and_set
NULL, 0,
(void **)&global_slapdFrontendConfig.securitylog,
CONFIG_STRING_OR_EMPTY, NULL, "", NULL /* prevents deletion when null */},
-/* warning: initialization makes pointer from integer without a cast [enabled by default]. Why do we get this? */
-#ifdef HAVE_CLOCK_GETTIME
- {CONFIG_LOGGING_HR_TIMESTAMPS, config_set_logging_hr_timestamps,
- NULL, 0,
- (void **)&global_slapdFrontendConfig.logging_hr_timestamps,
- CONFIG_ON_OFF, NULL, &init_logging_hr_timestamps, NULL},
-#endif
{CONFIG_EXTRACT_PEM, config_set_extract_pem,
NULL, 0,
(void **)&global_slapdFrontendConfig.extract_pem,
@@ -1957,11 +1949,6 @@ FrontendConfig_init(void)
init_auditfaillog_logging_hide_unhashed_pw =
cfg->auditfaillog_logging_hide_unhashed_pw = LDAP_ON;
init_auditfaillog_compress_enabled = cfg->auditfaillog_compress = LDAP_OFF;
-
-#ifdef HAVE_CLOCK_GETTIME
- init_logging_hr_timestamps =
- cfg->logging_hr_timestamps = LDAP_ON;
-#endif
init_entryusn_global = cfg->entryusn_global = LDAP_OFF;
cfg->entryusn_import_init = slapi_ch_strdup(SLAPD_ENTRYUSN_IMPORT_INIT);
cfg->default_naming_context = NULL; /* store normalized dn */
@@ -2248,26 +2235,6 @@ config_set_auditfaillog_unhashed_pw(const char *attrname, char *value, char *err
return retVal;
}
-#ifdef HAVE_CLOCK_GETTIME
-int32_t
-config_set_logging_hr_timestamps(const char *attrname, char *value, char *errorbuf, int apply)
-{
- slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
- int32_t retVal = LDAP_SUCCESS;
-
- retVal = config_set_onoff(attrname, value, &(slapdFrontendConfig->logging_hr_timestamps),
- errorbuf, apply);
- if (apply && retVal == LDAP_SUCCESS) {
- if (strcasecmp(value, "on") == 0) {
- log_enable_hr_timestamps();
- } else {
- log_disable_hr_timestamps();
- }
- }
- return retVal;
-}
-#endif
-
/*
* Utility function called by many of the config_set_XXX() functions.
* Returns a non-zero value if 'value' is NULL and zero if not.
diff --git a/ldap/servers/slapd/log.c b/ldap/servers/slapd/log.c
index 359bdd42d..bd6bbca33 100644
--- a/ldap/servers/slapd/log.c
+++ b/ldap/servers/slapd/log.c
@@ -47,7 +47,6 @@ PRUintn logbuf_tsdindex;
struct logbufinfo *logbuf_accum;
static struct logging_opts loginfo;
static int detached = 0;
-static int logging_hr_timestamps_enabled = 1;
//extern int slapd_ldap_debug;
@@ -2236,24 +2235,6 @@ log_set_expirationtimeunit(const char *attrname, char *expunit, int logtype, cha
return rv;
}
-/*
- * Enables HR timestamps in logs.
- */
-void
-log_enable_hr_timestamps()
-{
- logging_hr_timestamps_enabled = 1;
-}
-
-/*
- * Disables HR timestamps in logs.
- */
-void
-log_disable_hr_timestamps()
-{
- logging_hr_timestamps_enabled = 0;
-}
-
/******************************************************************************
* Write title line in log file
*****************************************************************************/
@@ -2766,29 +2747,20 @@ vslapd_log_emergency_error(LOGFD fp, const char *msg, int locked)
char tbuf[TBUFSIZE];
char buffer[SLAPI_LOG_BUFSIZ];
int size = TBUFSIZE;
+ struct timespec tsnow;
-#ifdef HAVE_CLOCK_GETTIME
- if (logging_hr_timestamps_enabled == 1) {
- struct timespec tsnow;
- if (clock_gettime(CLOCK_REALTIME, &tsnow) != 0) {
- syslog(LOG_EMERG, "vslapd_log_emergency_error, Unable to determine system time for message :: %s\n", msg);
- return;
- }
- if (format_localTime_hr_log(tsnow.tv_sec, tsnow.tv_nsec, sizeof(tbuf), tbuf, &size) != 0) {
- syslog(LOG_EMERG, "vslapd_log_emergency_error, Unable to format system time for message :: %s\n", msg);
- return;
- }
- } else {
-#endif
- time_t tnl;
- tnl = slapi_current_utc_time();
- if (format_localTime_log(tnl, sizeof(tbuf), tbuf, &size) != 0) {
- syslog(LOG_EMERG, "vslapd_log_emergency_error, Unable to format system time for message :: %s\n", msg);
- return;
- }
-#ifdef HAVE_CLOCK_GETTIME
+ if (clock_gettime(CLOCK_REALTIME, &tsnow) != 0) {
+ syslog(LOG_EMERG,
+ "vslapd_log_emergency_error, Unable to determine system time for message :: %s\n",
+ msg);
+ return;
+ }
+ if (format_localTime_hr_log(tsnow.tv_sec, tsnow.tv_nsec, sizeof(tbuf), tbuf, &size) != 0) {
+ syslog(LOG_EMERG,
+ "vslapd_log_emergency_error, Unable to format system time for message :: %s\n",
+ msg);
+ return;
}
-#endif
PR_snprintf(buffer, sizeof(buffer), "%s- EMERG - %s\n", tbuf, msg);
size = strlen(buffer);
@@ -2841,6 +2813,7 @@ vslapd_log_error(
int locked)
{
char buffer[SLAPI_LOG_BUFSIZ];
+ struct timespec tsnow;
char sev_name[10];
int blen = TBUFSIZE;
char *vbuf = NULL;
@@ -2852,32 +2825,21 @@ vslapd_log_error(
return -1;
}
-#ifdef HAVE_CLOCK_GETTIME
- if (logging_hr_timestamps_enabled == 1) {
- struct timespec tsnow;
- if (clock_gettime(CLOCK_REALTIME, &tsnow) != 0) {
- PR_snprintf(buffer, sizeof(buffer), "vslapd_log_error, Unable to determine system time for message :: %s", vbuf);
- log__error_emergency(buffer, 1, locked);
- return -1;
- }
- if (format_localTime_hr_log(tsnow.tv_sec, tsnow.tv_nsec, sizeof(buffer), buffer, &blen) != 0) {
- /* MSG may be truncated */
- PR_snprintf(buffer, sizeof(buffer), "vslapd_log_error, Unable to format system time for message :: %s", vbuf);
- log__error_emergency(buffer, 1, locked);
- return -1;
- }
- } else {
-#endif
- time_t tnl;
- tnl = slapi_current_utc_time();
- if (format_localTime_log(tnl, sizeof(buffer), buffer, &blen) != 0) {
- PR_snprintf(buffer, sizeof(buffer), "vslapd_log_error, Unable to format system time for message :: %s", vbuf);
- log__error_emergency(buffer, 1, locked);
- return -1;
- }
-#ifdef HAVE_CLOCK_GETTIME
+ if (clock_gettime(CLOCK_REALTIME, &tsnow) != 0) {
+ PR_snprintf(buffer, sizeof(buffer),
+ "vslapd_log_error, Unable to determine system time for message :: %s",
+ vbuf);
+ log__error_emergency(buffer, 1, locked);
+ return -1;
+ }
+ if (format_localTime_hr_log(tsnow.tv_sec, tsnow.tv_nsec, sizeof(buffer), buffer, &blen) != 0) {
+ /* MSG may be truncated */
+ PR_snprintf(buffer, sizeof(buffer),
+ "vslapd_log_error, Unable to format system time for message :: %s",
+ vbuf);
+ log__error_emergency(buffer, 1, locked);
+ return -1;
}
-#endif
/* Bug 561525: to be able to remove timestamp to not over pollute syslog, we may need
to skip the timestamp part of the message.
@@ -3060,6 +3022,7 @@ vslapd_log_access(const char *fmt, va_list ap)
int32_t blen = TBUFSIZE;
int32_t vlen;
int32_t rc = LDAP_SUCCESS;
+ struct timespec tsnow;
time_t tnl;
#ifdef SYSTEMTAP
@@ -3072,34 +3035,23 @@ vslapd_log_access(const char *fmt, va_list ap)
return -1;
}
-#ifdef HAVE_CLOCK_GETTIME
- if (logging_hr_timestamps_enabled == 1) {
- struct timespec tsnow;
- if (clock_gettime(CLOCK_REALTIME, &tsnow) != 0) {
- /* Make an error */
- PR_snprintf(buffer, sizeof(buffer), "vslapd_log_access, Unable to determine system time for message :: %s", vbuf);
- log__error_emergency(buffer, 1, 0);
- return -1;
- }
- tnl = tsnow.tv_sec;
- if (format_localTime_hr_log(tsnow.tv_sec, tsnow.tv_nsec, sizeof(buffer), buffer, &blen) != 0) {
- /* MSG may be truncated */
- PR_snprintf(buffer, sizeof(buffer), "vslapd_log_access, Unable to format system time for message :: %s", vbuf);
- log__error_emergency(buffer, 1, 0);
- return -1;
- }
- } else {
-#endif
- tnl = slapi_current_utc_time();
- if (format_localTime_log(tnl, sizeof(buffer), buffer, &blen) != 0) {
- /* MSG may be truncated */
- PR_snprintf(buffer, sizeof(buffer), "vslapd_log_access, Unable to format system time for message :: %s", vbuf);
- log__error_emergency(buffer, 1, 0);
- return -1;
- }
-#ifdef HAVE_CLOCK_GETTIME
+ if (clock_gettime(CLOCK_REALTIME, &tsnow) != 0) {
+ /* Make an error */
+ PR_snprintf(buffer, sizeof(buffer),
+ "vslapd_log_access, Unable to determine system time for message :: %s",
+ vbuf);
+ log__error_emergency(buffer, 1, 0);
+ return -1;
+ }
+ tnl = tsnow.tv_sec;
+ if (format_localTime_hr_log(tsnow.tv_sec, tsnow.tv_nsec, sizeof(buffer), buffer, &blen) != 0) {
+ /* MSG may be truncated */
+ PR_snprintf(buffer, sizeof(buffer),
+ "vslapd_log_access, Unable to format system time for message :: %s",
+ vbuf);
+ log__error_emergency(buffer, 1, 0);
+ return -1;
}
-#endif
if (SLAPI_LOG_BUFSIZ - blen < vlen) {
/* We won't be able to fit the message in! Uh-oh! */
diff --git a/ldap/servers/slapd/proto-slap.h b/ldap/servers/slapd/proto-slap.h
index 2501974ce..a267637d7 100644
--- a/ldap/servers/slapd/proto-slap.h
+++ b/ldap/servers/slapd/proto-slap.h
@@ -423,12 +423,6 @@ int config_set_maxsimplepaged_per_conn(const char *attrname, char *value, char *
int log_external_libs_debug_set_log_fn(void);
int log_set_backend(const char *attrname, char *value, int logtype, char *errorbuf, int apply);
-#ifdef HAVE_CLOCK_GETTIME
-int config_set_logging_hr_timestamps(const char *attrname, char *value, char *errorbuf, int apply);
-void log_enable_hr_timestamps(void);
-void log_disable_hr_timestamps(void);
-#endif
-
int config_get_SSLclientAuth(void);
int config_get_ssl_check_hostname(void);
tls_check_crl_t config_get_tls_check_crl(void);
diff --git a/ldap/servers/slapd/slap.h b/ldap/servers/slapd/slap.h
index c664af8df..fc81d08ea 100644
--- a/ldap/servers/slapd/slap.h
+++ b/ldap/servers/slapd/slap.h
@@ -2389,10 +2389,6 @@ typedef struct _slapdEntryPoints
#define CONFIG_EXTRACT_PEM "nsslapd-extract-pemfiles"
-#ifdef HAVE_CLOCK_GETTIME
-#define CONFIG_LOGGING_HR_TIMESTAMPS "nsslapd-logging-hr-timestamps-enabled"
-#endif
-
/* getenv alternative */
#define CONFIG_MALLOC_MXFAST "nsslapd-malloc-mxfast"
#define CONFIG_MALLOC_TRIM_THRESHOLD "nsslapd-malloc-trim-threshold"
@@ -2625,9 +2621,6 @@ typedef struct _slapdFrontendConfig
slapi_onoff_t auditfaillog_compress;
char *logging_backend;
-#ifdef HAVE_CLOCK_GETTIME
- slapi_onoff_t logging_hr_timestamps;
-#endif
slapi_onoff_t return_exact_case; /* Return attribute names with the same case
as they appear in at.conf */
diff --git a/ldap/servers/slapd/slapi-plugin.h b/ldap/servers/slapd/slapi-plugin.h
index bac1970e4..a0e7b3779 100644
--- a/ldap/servers/slapd/slapi-plugin.h
+++ b/ldap/servers/slapd/slapi-plugin.h
@@ -6778,7 +6778,7 @@ time_t slapi_current_time(void) __attribute__((deprecated));
* and should NOT be used for timer information.
*/
int32_t slapi_clock_gettime(struct timespec *tp);
-/*
+/*
* slapi_clock_gettime should have better been called
* slapi_clock_utc_gettime but sice the function pre-existed
* we are just adding an alias (to avoid risking to break
@@ -8321,8 +8321,6 @@ int slapi_is_special_rdn(const char *rdn, int flag);
*/
void DS_Sleep(PRIntervalTime ticks);
-
-#ifdef HAVE_CLOCK_GETTIME
/**
* Diffs two timespects a - b into *diff. This is useful with
* clock_monotonic to find time taken to perform operations.
@@ -8373,7 +8371,6 @@ void slapi_operation_workq_time_elapsed(Slapi_Operation *o, struct timespec *ela
* \param Slapi_Operation o - the operation which is inprogress
*/
void slapi_operation_set_time_started(Slapi_Operation *o);
-#endif
/**
* Store a 32bit integral value atomicly
diff --git a/src/lib389/lib389/config.py b/src/lib389/lib389/config.py
index eaa8a94df..acaf28526 100644
--- a/src/lib389/lib389/config.py
+++ b/src/lib389/lib389/config.py
@@ -23,7 +23,7 @@ from lib389 import Entry
from lib389._mapped_object import DSLdapObject
from lib389.utils import ensure_bytes, selinux_label_port, selinux_present
from lib389.lint import (
- DSCLE0001, DSCLE0002, DSCLE0003, DSCLE0004, DSCLE0005, DSCLE0006, DSELE0001
+ DSCLE0002, DSCLE0003, DSCLE0004, DSCLE0005, DSCLE0006, DSELE0001
)
class Config(DSLdapObject):
@@ -203,14 +203,6 @@ class Config(DSLdapObject):
def lint_uid(cls):
return 'config'
- def _lint_hr_timestamp(self):
- hr_timestamp = self.get_attr_val('nsslapd-logging-hr-timestamps-enabled')
- if ensure_bytes('on') != hr_timestamp:
- report = copy.deepcopy(DSCLE0001)
- report['fix'] = report['fix'].replace('YOUR_INSTANCE', self._instance.serverid)
- report['check'] = "config:hr_timestamp"
- yield report
-
def _lint_passwordscheme(self):
allowed_schemes = ['PBKDF2-SHA512', 'PBKDF2_SHA256', 'PBKDF2_SHA512', 'GOST_YESCRYPT']
u_password_scheme = self.get_attr_val_utf8('passwordStorageScheme')
diff --git a/src/lib389/lib389/lint.py b/src/lib389/lib389/lint.py
index 9baa710de..d0747f0f4 100644
--- a/src/lib389/lib389/lint.py
+++ b/src/lib389/lib389/lint.py
@@ -87,28 +87,6 @@ DSBLE0006 = {
}
# Config checks
-DSCLE0001 = {
- 'dsle': 'DSCLE0001',
- 'severity': 'LOW',
- 'description': 'Different log timestamp format.',
- 'items': ['cn=config', ],
- 'detail': """nsslapd-logging-hr-timestamps-enabled changes the log format in directory server from
-
-[07/Jun/2017:17:15:58 +1000]
-
-to
-
-[07/Jun/2017:17:15:58.716117312 +1000]
-
-This actually provides a performance improvement. Additionally, this setting will be
-removed in a future release.
-""",
- 'fix': """Set nsslapd-logging-hr-timestamps-enabled to on.
-You can use 'dsconf' to set this attribute. Here is an example:
-
- # dsconf slapd-YOUR_INSTANCE config replace nsslapd-logging-hr-timestamps-enabled=on"""
-}
-
DSCLE0002 = {
'dsle': 'DSCLE0002',
'severity': 'HIGH',
diff --git a/src/lib389/lib389/tests/healthcheck_test.py b/src/lib389/lib389/tests/healthcheck_test.py
index a36e6fbf8..767004813 100644
--- a/src/lib389/lib389/tests/healthcheck_test.py
+++ b/src/lib389/lib389/tests/healthcheck_test.py
@@ -31,11 +31,6 @@ def test_hc_encryption(topology_st):
assert result == DSELE0001
def test_hc_config(topology_st):
- # Check the HR timestamp
- topology_st.standalone.config.set('nsslapd-logging-hr-timestamps-enabled', 'off')
- result = topology_st.standalone.config._lint_hr_timestamp()
- assert result == DSCLE0001
-
# Check the password scheme check.
topology_st.standalone.config.set('passwordStorageScheme', 'SSHA')
result = topology_st.standalone.config._lint_passwordscheme()
| 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.