commit_id
string | repo
string | commit_message
string | diff
string | label
int64 |
---|---|---|---|---|
ca6e6538a65bc03f7b8e1c521b5d0ba6d7b82a9e
|
389ds/389-ds-base
|
wrap new style matching rule plugins for use in old style indexing code
Create wrappers for the new syntax plugin style matching rule code so that
we can use the old matching rule indexing functions. Introduced a new type
of indexer for Slapi_Value values. The old style used struct berval * values,
but the syntax plugins and a lot of newer code work with Slapi_Value* instead.
|
commit ca6e6538a65bc03f7b8e1c521b5d0ba6d7b82a9e
Author: Rich Megginson <[email protected]>
Date: Wed Feb 10 09:16:28 2010 -0700
wrap new style matching rule plugins for use in old style indexing code
Create wrappers for the new syntax plugin style matching rule code so that
we can use the old matching rule indexing functions. Introduced a new type
of indexer for Slapi_Value values. The old style used struct berval * values,
but the syntax plugins and a lot of newer code work with Slapi_Value* instead.
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_attr.c b/ldap/servers/slapd/back-ldbm/ldbm_attr.c
index 3866e121c..a73a53200 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_attr.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_attr.c
@@ -240,6 +240,25 @@ attr_index_config(
}
/* compute a->ai_index_rules: */
+ /* for index rules there are two uses:
+ * 1) a simple way to define an ordered index to support <= and >= searches
+ * for those attributes which do not have an ORDERING matching rule defined
+ * for them in their schema definition. The index generated is not a :RULE:
+ * index, it is a normal = EQUALITY index, with the keys ordered using the
+ * comparison function provided by the syntax plugin for the attribute. For
+ * example - the uidNumber attribute has INTEGER syntax, but the standard
+ * definition of the attribute does not specify an ORDERING matching rule.
+ * By default, this means that you cannot perform searches like
+ * (uidNumber>=501) - but many users expect to be able to perform this type of
+ * search. By specifying that you want an ordered index, using an integer
+ * matching rule, you can support indexed seaches of this type.
+ * 2) a RULE index - the index key prefix is :NAMEOROID: - this is used
+ * to support extensible match searches like (cn:fr-CA.3:=gilles), which would
+ * find the index key :fr-CA.3:gilles in the cn index.
+ * We check first to see if this is a simple ordered index - user specified an
+ * ordering matching rule compatible with the attribute syntax, and there is
+ * a compare function. If not, we assume it is a RULE index definition.
+ */
j = 0;
if (index_rules != NULL) for (; index_rules[j] != NULL; ++j);
if (j > 0) { /* there are some candidates */
@@ -250,47 +269,66 @@ attr_index_config(
/* Check that index_rules[j] is an official OID */
char* officialOID = NULL;
IFP mrINDEX = NULL;
- Slapi_PBlock* pb = slapi_pblock_new();
+ Slapi_PBlock* pb = NULL;
+ int do_continue = 0; /* can we skip the RULE parsing stuff? */
+
+ if ((p = strstr(index_rules[j], INDEX_ATTR_SUBSTRBEGIN))) {
+ _set_attr_substrlen(INDEX_SUBSTRBEGIN, index_rules[j],
+ &substrlens);
+ do_continue = 1; /* done with j - next j */
+ } else if ((p = strstr(index_rules[j], INDEX_ATTR_SUBSTRMIDDLE))) {
+ _set_attr_substrlen(INDEX_SUBSTRMIDDLE, index_rules[j],
+ &substrlens);
+ do_continue = 1; /* done with j - next j */
+ } else if ((p = strstr(index_rules[j], INDEX_ATTR_SUBSTREND))) {
+ _set_attr_substrlen(INDEX_SUBSTREND, index_rules[j],
+ &substrlens);
+ do_continue = 1; /* done with j - next j */
+ /* check if this is a simple ordering specification
+ for an attribute that has no ordering matching rule */
+ } else if (slapi_matchingrule_is_ordering(index_rules[j], attrsyntax_oid) &&
+ !a->ai_sattr.a_mr_ord_plugin) { /* no ordering for this attribute */
+ need_compare_fn = 1; /* get compare func for this attr */
+ do_continue = 1; /* done with j - next j */
+ }
+
+ if (do_continue) {
+ continue; /* done with index_rules[j] */
+ }
+
+ /* must be a RULE specification */
+ pb = slapi_pblock_new();
+ /* next check if this is a RULE type index
+ try to actually create an indexer and see if the indexer
+ actually has a regular INDEX_FN or an INDEX_SV_FN */
if (!slapi_pblock_set (pb, SLAPI_PLUGIN_MR_OID, index_rules[j]) &&
!slapi_pblock_set (pb, SLAPI_PLUGIN_MR_TYPE, a->ai_type) &&
!slapi_mr_indexer_create (pb) &&
- !slapi_pblock_get (pb, SLAPI_PLUGIN_MR_INDEX_FN, &mrINDEX) &&
- mrINDEX != NULL &&
+ ((!slapi_pblock_get (pb, SLAPI_PLUGIN_MR_INDEX_FN, &mrINDEX) &&
+ mrINDEX != NULL) ||
+ (!slapi_pblock_get (pb, SLAPI_PLUGIN_MR_INDEX_SV_FN, &mrINDEX) &&
+ mrINDEX != NULL)) &&
!slapi_pblock_get (pb, SLAPI_PLUGIN_MR_OID, &officialOID) &&
officialOID != NULL) {
if (!strcasecmp (index_rules[j], officialOID)) {
- official_rules[k++] = slapi_ch_strdup (officialOID);
+ official_rules[k++] = slapi_ch_strdup (officialOID);
} else {
- char* preamble = slapi_ch_smprintf("%s: line %d", fname, lineno);
- LDAPDebug (LDAP_DEBUG_ANY, "%s: use \"%s\" instead of \"%s\" (ignored)\n",
- preamble, officialOID, index_rules[j] );
- slapi_ch_free((void**)&preamble);
+ char* preamble = slapi_ch_smprintf("%s: line %d", fname, lineno);
+ LDAPDebug (LDAP_DEBUG_ANY, "%s: use \"%s\" instead of \"%s\" (ignored)\n",
+ preamble, officialOID, index_rules[j] );
+ slapi_ch_free((void**)&preamble);
}
- } else if ((p =
- strstr(index_rules[j], INDEX_ATTR_SUBSTRBEGIN))) {
- _set_attr_substrlen(INDEX_SUBSTRBEGIN, index_rules[j],
- &substrlens);
- } else if ((p =
- strstr(index_rules[j], INDEX_ATTR_SUBSTRMIDDLE))) {
- _set_attr_substrlen(INDEX_SUBSTRMIDDLE, index_rules[j],
- &substrlens);
- } else if ((p =
- strstr(index_rules[j], INDEX_ATTR_SUBSTREND))) {
- _set_attr_substrlen(INDEX_SUBSTREND, index_rules[j],
- &substrlens);
- } else if (!slapi_matchingrule_is_ordering(index_rules[j], attrsyntax_oid)) {
+ } else { /* we don't know what this is */
LDAPDebug (LDAP_DEBUG_ANY, "%s: line %d: "
"unknown or invalid matching rule \"%s\" in index configuration (ignored)\n",
fname, lineno, index_rules[j] );
- } else { /* assume builtin and use compare fn provided by syntax plugin */
- need_compare_fn = 1;
}
{/* It would improve speed to save the indexer, for future use.
But, for simplicity, we destroy it now: */
IFP mrDESTROY = NULL;
if (!slapi_pblock_get (pb, SLAPI_PLUGIN_DESTROY_FN, &mrDESTROY) &&
- mrDESTROY != NULL) {
- mrDESTROY (pb);
+ mrDESTROY != NULL) {
+ mrDESTROY (pb);
}
}
slapi_pblock_destroy (pb);
@@ -317,12 +355,8 @@ attr_index_config(
/* if user didn't specify an ordering rule in the index config,
see if the schema def for the attr defines one */
- if (!need_compare_fn) {
- asyntaxinfo *asi = attr_syntax_get_by_name( a->ai_type );
- if (asi && asi->asi_mr_ordering) {
- need_compare_fn = 1;
- }
- attr_syntax_return( asi );
+ if (!need_compare_fn && a->ai_sattr.a_mr_ord_plugin) {
+ need_compare_fn = 1;
}
if (need_compare_fn) {
diff --git a/ldap/servers/slapd/back-ldbm/matchrule.c b/ldap/servers/slapd/back-ldbm/matchrule.c
index 0734935fa..043c9682a 100644
--- a/ldap/servers/slapd/back-ldbm/matchrule.c
+++ b/ldap/servers/slapd/back-ldbm/matchrule.c
@@ -96,7 +96,17 @@ create_matchrule_indexer(Slapi_PBlock **pb,char* matchrule,char* type)
if ( (0 != return_value) || (mrINDEX == NULL) )
{
- return LDAP_OPERATIONS_ERROR;
+ /* doesn't have an old MR_INDEX_FN - look for MR_INDEX_SV_FN */
+ return_value = slapi_pblock_get (*pb, SLAPI_PLUGIN_MR_INDEX_SV_FN, &mrINDEX);
+
+ if ( (0 != return_value) || (mrINDEX == NULL) )
+ {
+ return LDAP_OPERATIONS_ERROR;
+ }
+ else
+ {
+ return LDAP_SUCCESS;
+ }
}
else
{
@@ -146,18 +156,10 @@ int
matchrule_values_to_keys_sv(Slapi_PBlock *pb,Slapi_Value **input_values,Slapi_Value ***output_values)
{
IFP mrINDEX = NULL;
- struct berval **bvi, **bvo;
-
- valuearray_get_bervalarray(input_values, &bvi);
- slapi_pblock_get (pb, SLAPI_PLUGIN_MR_INDEX_FN, &mrINDEX);
- slapi_pblock_set (pb, SLAPI_PLUGIN_MR_VALUES, bvi);
+ slapi_pblock_get (pb, SLAPI_PLUGIN_MR_INDEX_SV_FN, &mrINDEX);
+ slapi_pblock_set (pb, SLAPI_PLUGIN_MR_VALUES, input_values);
mrINDEX (pb);
- slapi_pblock_get (pb, SLAPI_PLUGIN_MR_KEYS, &bvo);
-
- slapi_pblock_set (pb, SLAPI_PLUGIN_MR_VALUES, NULL);
- ber_bvecfree(bvi);
-
- valuearray_init_bervalarray(bvo, output_values);
+ slapi_pblock_get (pb, SLAPI_PLUGIN_MR_KEYS, output_values);
return 0;
}
diff --git a/ldap/servers/slapd/pblock.c b/ldap/servers/slapd/pblock.c
index b5d994ade..7d8857459 100644
--- a/ldap/servers/slapd/pblock.c
+++ b/ldap/servers/slapd/pblock.c
@@ -1349,6 +1349,9 @@ slapi_pblock_get( Slapi_PBlock *pblock, int arg, void *value )
case SLAPI_PLUGIN_MR_INDEX_FN:
(*(IFP *)value) = pblock->pb_mr_index_fn;
break;
+ case SLAPI_PLUGIN_MR_INDEX_SV_FN:
+ (*(IFP *)value) = pblock->pb_mr_index_sv_fn;
+ break;
/* matching rule plugin arguments */
case SLAPI_PLUGIN_MR_OID:
@@ -2722,6 +2725,9 @@ slapi_pblock_set( Slapi_PBlock *pblock, int arg, void *value )
case SLAPI_PLUGIN_MR_INDEX_FN:
pblock->pb_mr_index_fn = (IFP) value;
break;
+ case SLAPI_PLUGIN_MR_INDEX_SV_FN:
+ pblock->pb_mr_index_sv_fn = (IFP) value;
+ break;
/* matching rule plugin arguments */
case SLAPI_PLUGIN_MR_OID:
diff --git a/ldap/servers/slapd/plugin_mr.c b/ldap/servers/slapd/plugin_mr.c
index 8f62a7a0f..014082b77 100644
--- a/ldap/servers/slapd/plugin_mr.c
+++ b/ldap/servers/slapd/plugin_mr.c
@@ -47,9 +47,16 @@
#include "slap.h"
+struct mr_indexer_private {
+ Slapi_Value **sva; /* if using index_sv_fn */
+ struct berval **bva; /* if using index_fn */
+};
+
static oid_item_t* global_mr_oids = NULL;
static PRLock* global_mr_oids_lock = NULL;
+static int default_mr_indexer_create(Slapi_PBlock* pb);
+
static void
init_global_mr_lock()
{
@@ -78,6 +85,27 @@ plugin_mr_find( const char *nameoroid )
return ( pi );
}
+static int
+plugin_mr_get_type(struct slapdplugin *pi)
+{
+ int rc = LDAP_FILTER_EQUALITY;
+ if (pi) {
+ char **str = pi->plg_mr_names;
+ for (; str && *str; ++str) {
+ if (PL_strcasestr(*str, "substr")) {
+ rc = LDAP_FILTER_SUBSTRINGS;
+ break;
+ }
+ if (PL_strcasestr(*str, "approx")) {
+ rc = LDAP_FILTER_APPROX;
+ break;
+ }
+ }
+ }
+
+ return rc;
+}
+
static struct slapdplugin*
plugin_mr_find_registered (char* oid)
{
@@ -144,15 +172,32 @@ slapi_mr_indexer_create (Slapi_PBlock* opb)
!(rc = slapi_pblock_get (&pb, SLAPI_PLUGIN_MR_INDEXER_CREATE_FN, &createFn)) &&
createFn != NULL &&
!(rc = createFn (&pb)) &&
- !(rc = slapi_pblock_get (&pb, SLAPI_PLUGIN_MR_INDEX_FN, &indexFn)) &&
- indexFn != NULL)
+ ((!(rc = slapi_pblock_get (&pb, SLAPI_PLUGIN_MR_INDEX_FN, &indexFn)) &&
+ indexFn != NULL) ||
+ (!(rc = slapi_pblock_get (&pb, SLAPI_PLUGIN_MR_INDEX_SV_FN, &indexFn)) &&
+ indexFn != NULL)))
{
/* Success: this plugin can handle it. */
memcpy (opb, &pb, sizeof(Slapi_PBlock));
plugin_mr_bind (oid, mrp); /* for future reference */
+ rc = 0; /* success */
break;
}
}
+ if (rc != 0) {
+ /* look for a new syntax-style mr plugin */
+ struct slapdplugin *pi = plugin_mr_find(oid);
+ if (pi) {
+ Slapi_PBlock pb;
+ memcpy (&pb, opb, sizeof(Slapi_PBlock));
+ slapi_pblock_set(&pb, SLAPI_PLUGIN, pi);
+ rc = default_mr_indexer_create(&pb);
+ if (!rc) {
+ memcpy (opb, &pb, sizeof(Slapi_PBlock));
+ plugin_mr_bind (oid, pi); /* for future reference */
+ }
+ }
+ }
}
}
return rc;
@@ -229,3 +274,136 @@ slapi_mr_filter_index (Slapi_Filter* f, Slapi_PBlock* pb)
return rc;
}
+static struct mr_indexer_private *
+mr_indexer_private_new()
+{
+ return (struct mr_indexer_private *)slapi_ch_calloc(1, sizeof(struct mr_indexer_private));
+}
+
+static void
+mr_indexer_private_done(struct mr_indexer_private *mrip)
+{
+ if (mrip && mrip->sva) {
+ valuearray_free(&mrip->sva);
+ } else if (mrip && mrip->bva) {
+ ber_bvecfree(mrip->bva);
+ mrip->bva = NULL;
+ }
+}
+
+static void
+mr_indexer_private_free(struct mr_indexer_private **mrip)
+{
+ if (mrip) {
+ mr_indexer_private_done(*mrip);
+ slapi_ch_free((void **)mrip);
+ }
+}
+
+/* this function takes SLAPI_PLUGIN_MR_VALUES as Slapi_Value ** and
+ returns SLAPI_PLUGIN_MR_KEYS as Slapi_Value **
+*/
+static int
+mr_wrap_mr_index_sv_fn(Slapi_PBlock* pb)
+{
+ int rc = -1;
+ Slapi_Value **in_vals = NULL;
+ Slapi_Value **out_vals = NULL;
+ struct slapdplugin *pi = NULL;
+
+ slapi_pblock_set(pb, SLAPI_PLUGIN_MR_KEYS, out_vals); /* make sure output is cleared */
+ slapi_pblock_get(pb, SLAPI_PLUGIN, &pi);
+ if (!pi) {
+ LDAPDebug0Args(LDAP_DEBUG_ANY, "mr_wrap_mr_index_sv_fn: error - no plugin specified\n");
+ } else if (!pi->plg_mr_values2keys) {
+ LDAPDebug0Args(LDAP_DEBUG_ANY, "mr_wrap_mr_index_sv_fn: error - plugin has no plg_mr_values2keys function\n");
+ } else {
+ struct mr_indexer_private *mrip = NULL;
+ int ftype = plugin_mr_get_type(pi);
+ slapi_pblock_get(pb, SLAPI_PLUGIN_MR_VALUES, &in_vals);
+ (*pi->plg_mr_values2keys)(pb, in_vals, &out_vals, ftype);
+ slapi_pblock_set(pb, SLAPI_PLUGIN_MR_KEYS, out_vals);
+ /* we have to save out_vals to free next time or during destroy */
+ slapi_pblock_get(pb, SLAPI_PLUGIN_OBJECT, &mrip);
+ mr_indexer_private_done(mrip); /* free old vals, if any */
+ mrip->sva = out_vals; /* save pointer for later */
+ rc = 0;
+ }
+ return rc;
+}
+
+/* this function takes SLAPI_PLUGIN_MR_VALUES as struct berval ** and
+ returns SLAPI_PLUGIN_MR_KEYS as struct berval **
+*/
+static int
+mr_wrap_mr_index_fn(Slapi_PBlock* pb)
+{
+ int rc = -1;
+ struct berval **in_vals = NULL;
+ struct berval **out_vals = NULL;
+ struct mr_indexer_private *mrip = NULL;
+ Slapi_Value **in_vals_sv = NULL;
+ Slapi_Value **out_vals_sv = NULL;
+
+ slapi_pblock_get(pb, SLAPI_PLUGIN_MR_VALUES, &in_vals); /* get bervals */
+ /* convert bervals to sv ary */
+ valuearray_init_bervalarray(in_vals, &in_vals_sv);
+ slapi_pblock_set(pb, SLAPI_PLUGIN_MR_VALUES, in_vals_sv); /* use sv */
+ rc = mr_wrap_mr_index_sv_fn(pb);
+ /* get result sv keys */
+ slapi_pblock_get(pb, SLAPI_PLUGIN_MR_KEYS, &out_vals_sv);
+ /* convert to bvec */
+ valuearray_get_bervalarray(out_vals_sv, &out_vals);
+ valuearray_free(&out_vals_sv); /* don't need svals */
+ /* we have to save out_vals to free next time or during destroy */
+ slapi_pblock_get(pb, SLAPI_PLUGIN_OBJECT, &mrip);
+ mr_indexer_private_done(mrip); /* free old vals, if any */
+ mrip->bva = out_vals; /* save pointer for later */
+
+ return rc;
+}
+
+static int
+default_mr_indexer_destroy(Slapi_PBlock* pb)
+{
+ struct mr_indexer_private *mrip = NULL;
+
+ slapi_pblock_get(pb, SLAPI_PLUGIN_OBJECT, &mrip);
+ mr_indexer_private_free(&mrip);
+ mrip = NULL;
+ slapi_pblock_set(pb, SLAPI_PLUGIN_OBJECT, mrip);
+
+ return 0;
+}
+
+/* this is the default mr indexer create func
+ for new syntax-style mr plugins */
+static int
+default_mr_indexer_create(Slapi_PBlock* pb)
+{
+ int rc = -1;
+ struct slapdplugin *pi = NULL;
+
+ slapi_pblock_get(pb, SLAPI_PLUGIN, &pi);
+ if (NULL == pi) {
+ LDAPDebug0Args(LDAP_DEBUG_ANY, "default_mr_indexer_create: error - no plugin specified\n");
+ goto done;
+ }
+
+ if (NULL == pi->plg_mr_values2keys) {
+ LDAPDebug1Arg(LDAP_DEBUG_ANY, "default_mr_indexer_create: error - plugin [%s] has no plg_mr_values2keys function\n",
+ pi->plg_name);
+ goto done;
+ }
+
+ slapi_pblock_set(pb, SLAPI_PLUGIN_OBJECT, mr_indexer_private_new());
+ slapi_pblock_set(pb, SLAPI_PLUGIN_MR_INDEX_FN, mr_wrap_mr_index_fn);
+ slapi_pblock_set(pb, SLAPI_PLUGIN_MR_INDEX_SV_FN, mr_wrap_mr_index_sv_fn);
+ slapi_pblock_set(pb, SLAPI_PLUGIN_DESTROY_FN, default_mr_indexer_destroy);
+ slapi_pblock_set(pb, SLAPI_PLUGIN_MR_INDEXER_CREATE_FN, default_mr_indexer_create);
+ rc = 0;
+
+done:
+ return rc;
+}
+
diff --git a/ldap/servers/slapd/slap.h b/ldap/servers/slapd/slap.h
index 9dea45257..adef7a8af 100644
--- a/ldap/servers/slapd/slap.h
+++ b/ldap/servers/slapd/slap.h
@@ -1440,7 +1440,7 @@ typedef struct slapi_pblock {
mrFilterMatchFn pb_mr_filter_match_fn;
IFP pb_mr_filter_index_fn;
IFP pb_mr_filter_reset_fn;
- IFP pb_mr_index_fn;
+ IFP pb_mr_index_fn; /* values and keys are struct berval ** */
char* pb_mr_oid;
char* pb_mr_type;
struct berval* pb_mr_value;
@@ -1540,6 +1540,7 @@ typedef struct slapi_pblock {
/* used in plugin init; pb_plugin is not ready, then */
LDAPControl **pb_search_ctrls; /* for search operations, allows plugins to provide
controls to pass for each entry or referral returned */
+ IFP pb_mr_index_sv_fn; /* values and keys are Slapi_Value ** */
} slapi_pblock;
/* index if substrlens */
diff --git a/ldap/servers/slapd/slapi-plugin.h b/ldap/servers/slapd/slapi-plugin.h
index 6ced381ec..5f0fc47f5 100644
--- a/ldap/servers/slapd/slapi-plugin.h
+++ b/ldap/servers/slapd/slapi-plugin.h
@@ -5456,6 +5456,7 @@ typedef struct slapi_plugindesc {
#define SLAPI_PLUGIN_MR_FILTER_INDEX_FN 603
#define SLAPI_PLUGIN_MR_FILTER_RESET_FN 604
#define SLAPI_PLUGIN_MR_INDEX_FN 605
+#define SLAPI_PLUGIN_MR_INDEX_SV_FN 606
/* matching rule plugin arguments */
#define SLAPI_PLUGIN_MR_OID 610
| 0 |
aa1bde4720781789d5cbe38b7d12c02e0a8c97af
|
389ds/389-ds-base
|
Issue: 50358 - Create a Bitwise Plugin class in plugins.py
Create a Bitwise Plugin class in plugins.py
https://pagure.io/389-ds-base/issue/50358
Author: aborah
Reviewed by: William Brown
|
commit aa1bde4720781789d5cbe38b7d12c02e0a8c97af
Author: Anuj Borah <[email protected]>
Date: Thu May 9 19:13:31 2019 +0530
Issue: 50358 - Create a Bitwise Plugin class in plugins.py
Create a Bitwise Plugin class in plugins.py
https://pagure.io/389-ds-base/issue/50358
Author: aborah
Reviewed by: William Brown
diff --git a/src/lib389/lib389/plugins.py b/src/lib389/lib389/plugins.py
index e7a1f9593..671e7df32 100644
--- a/src/lib389/lib389/plugins.py
+++ b/src/lib389/lib389/plugins.py
@@ -2084,3 +2084,16 @@ class PluginsLegacy(object):
self.conn.modify_s(dn, [(ldap.MOD_REPLACE,
PLUGIN_PROPNAME_TO_ATTRNAME[PLUGIN_ENABLE],
PLUGINS_ENABLE_OFF_VALUE)])
+
+
+class BitwisePlugin(Plugin):
+ """A single instance of Bitwise plugin entry
+
+ :param instance: An instance
+ :type instance: lib389.DirSrv
+ :param dn: Entry DN
+ :type dn: str
+ """
+
+ def __init__(self, instance, dn="cn=Bitwise Plugin,cn=plugins,cn=config"):
+ super(BitwisePlugin, self).__init__(instance, dn)
| 0 |
1623ec3dda3207deb9d17c63fac7501189ce1a63
|
389ds/389-ds-base
|
Add missing dependencies for python-lib389
Ticket 48405 - python-lib389 in rawhide is missing dependencies
Description: Add missing dependencies to python-lib389 spec file
https://fedorahosted.org/389/ticket/48405
Review by: mreynolds (Thanks!)
|
commit 1623ec3dda3207deb9d17c63fac7501189ce1a63
Author: Viktor Ashirov <[email protected]>
Date: Fri Jan 8 17:34:46 2016 +0100
Add missing dependencies for python-lib389
Ticket 48405 - python-lib389 in rawhide is missing dependencies
Description: Add missing dependencies to python-lib389 spec file
https://fedorahosted.org/389/ticket/48405
Review by: mreynolds (Thanks!)
diff --git a/src/lib389/python-lib389.spec b/src/lib389/python-lib389.spec
index 41af220f0..30a5b7f03 100644
--- a/src/lib389/python-lib389.spec
+++ b/src/lib389/python-lib389.spec
@@ -10,6 +10,9 @@ BuildArch: noarch
Url: http://port389.org/docs/389ds/FAQ/upstream-test-framework.html
BuildRequires: python2-devel, python-ldap, krb5-devel, python-setuptools
Requires: pytest
+Requires: python-ldap
+Requires: python-six
+Requires: python-krbV
%{?python_provide:%python_provide python2-lib389}
| 0 |
77e61a80d56da6fe8a9dd56c6c29acb86032e131
|
389ds/389-ds-base
|
Ticket #529 - dn normalization must handle multiple space characters in attributes
Description: commit 69ff83598d517bed84922b1c7dd67cab023b4d99
introduced 2 coverity defects:
13162: Resource leak
Free allocated strings: newrdn and parentdn
13163: Unused pointer value
Removed unused pointer "p"
https://fedorahosted.org/389/ticket/529
Reviewed by Rich (Thank you!!)
|
commit 77e61a80d56da6fe8a9dd56c6c29acb86032e131
Author: Noriko Hosoi <[email protected]>
Date: Wed Apr 10 10:35:12 2013 -0700
Ticket #529 - dn normalization must handle multiple space characters in attributes
Description: commit 69ff83598d517bed84922b1c7dd67cab023b4d99
introduced 2 coverity defects:
13162: Resource leak
Free allocated strings: newrdn and parentdn
13163: Unused pointer value
Removed unused pointer "p"
https://fedorahosted.org/389/ticket/529
Reviewed by Rich (Thank you!!)
diff --git a/ldap/servers/slapd/back-ldbm/import-threads.c b/ldap/servers/slapd/back-ldbm/import-threads.c
index 1c526eff5..3cb25d375 100644
--- a/ldap/servers/slapd/back-ldbm/import-threads.c
+++ b/ldap/servers/slapd/back-ldbm/import-threads.c
@@ -1780,11 +1780,10 @@ upgradedn_producer(void *param)
ID alt_id;
if (NULL == dn_norm_sp_conflicts) {
char buf[BUFSIZ];
- char *p;
int my_max = 8;
while (fgets(buf, BUFSIZ-1, job->upgradefd)) {
/* search "OID0: OID1 OID2 ... */
- if (!isdigit(*buf) || !(p = PL_strchr(buf, ':'))) {
+ if (!isdigit(*buf) || (NULL == PL_strchr(buf, ':'))) {
continue;
}
if (add_IDs_to_IDarray(&dn_norm_sp_conflicts, &my_max,
@@ -1799,11 +1798,9 @@ upgradedn_producer(void *param)
}
alt_id = is_conflict_ID(dn_norm_sp_conflicts, my_idx, temp_id);
if (alt_id) {
- char *parentdn = NULL;
- char *newrdn = NULL;
if (alt_id != temp_id) {
- parentdn = slapi_dn_parent(normdn);
- newrdn = slapi_create_dn_string("%s %u", rdn, temp_id);
+ char *newrdn = slapi_create_dn_string("%s %u", rdn, temp_id);
+ char *parentdn = slapi_dn_parent(normdn);
/* This entry is a conflict of alt_id */
LDAPDebug(LDAP_DEBUG_ANY,
"Entry %s (%lu) is a conflict of (%lu)\n",
@@ -1811,6 +1808,8 @@ upgradedn_producer(void *param)
LDAPDebug2Args(LDAP_DEBUG_ANY, "Renaming \"%s\" to \"%s\"\n",
rdn, newrdn);
normdn = slapi_ch_smprintf("%s,%s", newrdn, parentdn);
+ slapi_ch_free_string(&newrdn);
+ slapi_ch_free_string(&parentdn);
/* Reset DN and RDN in the entry */
slapi_sdn_done(&(e->e_sdn));
slapi_sdn_init_normdn_passin(&(e->e_sdn), normdn);
diff --git a/ldap/servers/slapd/back-ldbm/import.c b/ldap/servers/slapd/back-ldbm/import.c
index aedff9f8f..d341ea7f2 100644
--- a/ldap/servers/slapd/back-ldbm/import.c
+++ b/ldap/servers/slapd/back-ldbm/import.c
@@ -781,10 +781,8 @@ static int import_monitor_threads(ImportJob *job, int *status)
}
corestate = current_worker->state & CORESTATE;
if (current_worker->state == ABORTED) {
-LDAPDebug0Args(LDAP_DEBUG_ANY, "import_monitor_threads: current_worker->state is ABORTED\n");
goto error_abort;
} else if ((corestate == QUIT) || (corestate == FINISHED)) {
-LDAPDebug1Arg(LDAP_DEBUG_ANY, "import_monitor_threads: current_worker->state is %s\n", (corestate==QUIT)?"QUIT":"FINISHED");
if (DN_NORM_BT == (DN_NORM_BT & current_worker->state)) {
/* upgrading dn norm (both) is needed */
rc = NEED_DN_NORM_BT; /* Set the RC; Don't abort now;
| 0 |
956dc049368e79e179c26994a67f1590994647a7
|
389ds/389-ds-base
|
Bug 614511 - fix coverify Defect Type: Null pointer dereferences issues 11846 - 11891
https://bugzilla.redhat.com/show_bug.cgi?id=614511
Resolves: bug 614511
Bug description: Fix coverify Defect Type: Null pointer dereferences issues 11846 - 11891
description: Catch possible NULL pointer in cos_cache_query_attr().
|
commit 956dc049368e79e179c26994a67f1590994647a7
Author: Endi S. Dewata <[email protected]>
Date: Mon Jul 12 23:07:05 2010 -0500
Bug 614511 - fix coverify Defect Type: Null pointer dereferences issues 11846 - 11891
https://bugzilla.redhat.com/show_bug.cgi?id=614511
Resolves: bug 614511
Bug description: Fix coverify Defect Type: Null pointer dereferences issues 11846 - 11891
description: Catch possible NULL pointer in cos_cache_query_attr().
diff --git a/ldap/servers/plugins/cos/cos_cache.c b/ldap/servers/plugins/cos/cos_cache.c
index 89902f84b..d2e9bf877 100644
--- a/ldap/servers/plugins/cos/cos_cache.c
+++ b/ldap/servers/plugins/cos/cos_cache.c
@@ -2383,9 +2383,9 @@ static int cos_cache_query_attr(cos_cache *ptheCache, vattr_context *context, Sl
}
/* is this entry a child of the target tree(s)? */
- do
+ while(hit == 0 && pTargetTree)
{
- if(pTargetTree) {
+ {
int rc = 0;
char *tval = NULL;
size_t tlen = 0;
@@ -2403,8 +2403,7 @@ static int cos_cache_query_attr(cos_cache *ptheCache, vattr_context *context, Sl
pTargetTree->val = tval;
}
}
-
- if( pTargetTree->val == 0 ||
+ if( pTargetTree->val == 0 ||
slapi_dn_issuffix(pDn, pTargetTree->val) != 0 ||
(views_api && views_entry_exists(views_api, pTargetTree->val, e)) /* might be in a view */
)
@@ -2529,7 +2528,7 @@ static int cos_cache_query_attr(cos_cache *ptheCache, vattr_context *context, Sl
pTargetTree = pTargetTree->list.pNext;
- } while(hit == 0 && pTargetTree);
+ } /* while(hit == 0 && pTargetTree) */
if(hit==0 || merge_mode)
| 0 |
bae33f97c05932c95225beb2dd2b6f76c1970bef
|
389ds/389-ds-base
|
Ticket 50062 - Replace error by warning in the state machine defined in repl5_inc_run
Description: Replace error log severity message from ERR to WARNING
https://pagure.io/389-ds-base/issue/50062
Reviewed by: mreynolds(one line commit rule)
|
commit bae33f97c05932c95225beb2dd2b6f76c1970bef
Author: Mark Reynolds <[email protected]>
Date: Fri Nov 30 10:48:35 2018 -0500
Ticket 50062 - Replace error by warning in the state machine defined in repl5_inc_run
Description: Replace error log severity message from ERR to WARNING
https://pagure.io/389-ds-base/issue/50062
Reviewed by: mreynolds(one line commit rule)
diff --git a/ldap/servers/plugins/replication/repl5_inc_protocol.c b/ldap/servers/plugins/replication/repl5_inc_protocol.c
index 5cd4941de..2aefb795f 100644
--- a/ldap/servers/plugins/replication/repl5_inc_protocol.c
+++ b/ldap/servers/plugins/replication/repl5_inc_protocol.c
@@ -717,7 +717,7 @@ repl5_inc_run(Private_Repl_Protocol *prp)
} else if ((e1 = event_occurred(prp, EVENT_WINDOW_CLOSED)) ||
event_occurred(prp, EVENT_BACKOFF_EXPIRED)) {
/* this events - should not occur - log a warning and go to sleep */
- slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name,
+ slapi_log_err(SLAPI_LOG_WARNING, repl_plugin_name,
"repl5_inc_run - %s: "
"Event %s should not occur in state %s; going to sleep\n",
agmt_get_long_name(prp->agmt), e1 ? event2name(EVENT_WINDOW_CLOSED) : event2name(EVENT_BACKOFF_EXPIRED), state2name(current_state));
diff --git a/ldap/servers/plugins/replication/windows_inc_protocol.c b/ldap/servers/plugins/replication/windows_inc_protocol.c
index 871a7688c..853467eb9 100644
--- a/ldap/servers/plugins/replication/windows_inc_protocol.c
+++ b/ldap/servers/plugins/replication/windows_inc_protocol.c
@@ -364,7 +364,7 @@ windows_inc_run(Private_Repl_Protocol *prp)
} else if ((e1 = event_occurred(prp, EVENT_WINDOW_CLOSED)) ||
event_occurred(prp, EVENT_BACKOFF_EXPIRED)) {
/* this events - should not occur - log a warning and go to sleep */
- slapi_log_err(SLAPI_LOG_ERR, windows_repl_plugin_name,
+ slapi_log_err(SLAPI_LOG_WARNING, windows_repl_plugin_name,
"windows_inc_run - %s: "
"Event %s should not occur in state %s; going to sleep\n",
agmt_get_long_name(prp->agmt),
| 0 |
fa2fd420737603164e096473949bf1ed42ca6f21
|
389ds/389-ds-base
|
Entry USN
First cut for implementing Entry USN.
See http://directory.fedoraproject.org/wiki/Entry_USN for the design details.
This change includes a bug fix for "db2ldif -r"; event queue system was not
shutdown before the plugins are closed, which could have crashed the command
line utility.
|
commit fa2fd420737603164e096473949bf1ed42ca6f21
Author: Noriko Hosoi <[email protected]>
Date: Tue Jul 21 12:59:38 2009 -0700
Entry USN
First cut for implementing Entry USN.
See http://directory.fedoraproject.org/wiki/Entry_USN for the design details.
This change includes a bug fix for "db2ldif -r"; event queue system was not
shutdown before the plugins are closed, which could have crashed the command
line utility.
diff --git a/Makefile.am b/Makefile.am
index 05cb3566e..5ce6ccbe7 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -157,9 +157,10 @@ serverplugin_LTLIBRARIES = libacl-plugin.la libattr-unique-plugin.la \
libhttp-client-plugin.la liblinkedattrs-plugin.la \
libmemberof-plugin.la libpassthru-plugin.la libpwdstorage-plugin.la \
libreferint-plugin.la libreplication-plugin.la libretrocl-plugin.la \
- libroles-plugin.la libschemareload-plugin.la libstatechange-plugin.la \
- libsyntax-plugin.la libviews-plugin.la $(LIBPAM_PASSTHRU_PLUGIN) \
- $(LIBDNA_PLUGIN) $(LIBBITWISE_PLUGIN) $(LIBPRESENCE_PLUGIN)
+ libroles-plugin.la libstatechange-plugin.la libsyntax-plugin.la \
+ libviews-plugin.la libschemareload-plugin.la libusn-plugin.la \
+ $(LIBPAM_PASSTHRU_PLUGIN) $(LIBDNA_PLUGIN) \
+ $(LIBBITWISE_PLUGIN) $(LIBPRESENCE_PLUGIN)
nodist_property_DATA = ns-slapd.properties
@@ -314,6 +315,7 @@ task_SCRIPTS = ldap/admin/src/scripts/template-bak2db \
ldap/admin/src/scripts/template-ns-newpwpolicy.pl \
ldap/admin/src/scripts/template-schema-reload.pl \
ldap/admin/src/scripts/template-syntax-validate.pl \
+ ldap/admin/src/scripts/template-usn-tombstone-cleanup.pl \
ldap/admin/src/scripts/template-verify-db.pl \
ldap/admin/src/scripts/template-dbverify
@@ -617,6 +619,7 @@ libback_ldbm_la_SOURCES = ldap/servers/slapd/back-ldbm/ancestorid.c \
ldap/servers/slapd/back-ldbm/ldbm_modrdn.c \
ldap/servers/slapd/back-ldbm/ldbm_search.c \
ldap/servers/slapd/back-ldbm/ldbm_unbind.c \
+ ldap/servers/slapd/back-ldbm/ldbm_usn.c \
ldap/servers/slapd/back-ldbm/ldif2ldbm.c \
ldap/servers/slapd/back-ldbm/dbverify.c \
ldap/servers/slapd/back-ldbm/matchrule.c \
@@ -967,6 +970,15 @@ libsyntax_plugin_la_SOURCES = ldap/servers/plugins/syntaxes/bin.c \
libsyntax_plugin_la_CPPFLAGS = $(PLUGIN_CPPFLAGS)
libsyntax_plugin_la_LDFLAGS = -avoid-version
+#------------------------
+# libusn-plugin
+#------------------------
+libusn_plugin_la_SOURCES = ldap/servers/plugins/usn/usn.c \
+ ldap/servers/plugins/usn/usn_cleanup.c
+
+libusn_plugin_la_CPPFLAGS = $(PLUGIN_CPPFLAGS)
+libusn_plugin_la_LDFLAGS = -avoid-version
+
#------------------------
# libviews-plugin
#------------------------
diff --git a/Makefile.in b/Makefile.in
old mode 100755
new mode 100644
index 9f3208afa..865a7dbf7
--- a/Makefile.in
+++ b/Makefile.in
@@ -173,6 +173,7 @@ am_libback_ldbm_la_OBJECTS = \
ldap/servers/slapd/back-ldbm/libback_ldbm_la-ldbm_modrdn.lo \
ldap/servers/slapd/back-ldbm/libback_ldbm_la-ldbm_search.lo \
ldap/servers/slapd/back-ldbm/libback_ldbm_la-ldbm_unbind.lo \
+ ldap/servers/slapd/back-ldbm/libback_ldbm_la-ldbm_usn.lo \
ldap/servers/slapd/back-ldbm/libback_ldbm_la-ldif2ldbm.lo \
ldap/servers/slapd/back-ldbm/libback_ldbm_la-dbverify.lo \
ldap/servers/slapd/back-ldbm/libback_ldbm_la-matchrule.lo \
@@ -688,6 +689,14 @@ libsyntax_plugin_la_OBJECTS = $(am_libsyntax_plugin_la_OBJECTS)
libsyntax_plugin_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \
$(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \
$(libsyntax_plugin_la_LDFLAGS) $(LDFLAGS) -o $@
+libusn_plugin_la_LIBADD =
+am_libusn_plugin_la_OBJECTS = \
+ ldap/servers/plugins/usn/libusn_plugin_la-usn.lo \
+ ldap/servers/plugins/usn/libusn_plugin_la-usn_cleanup.lo
+libusn_plugin_la_OBJECTS = $(am_libusn_plugin_la_OBJECTS)
+libusn_plugin_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \
+ $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \
+ $(libusn_plugin_la_LDFLAGS) $(LDFLAGS) -o $@
libviews_plugin_la_LIBADD =
am_libviews_plugin_la_OBJECTS = \
ldap/servers/plugins/views/libviews_plugin_la-views.lo
@@ -899,10 +908,11 @@ SOURCES = $(libavl_a_SOURCES) $(libldaputil_a_SOURCES) \
$(libretrocl_plugin_la_SOURCES) $(libroles_plugin_la_SOURCES) \
$(libschemareload_plugin_la_SOURCES) $(libslapd_la_SOURCES) \
$(libstatechange_plugin_la_SOURCES) \
- $(libsyntax_plugin_la_SOURCES) $(libviews_plugin_la_SOURCES) \
- $(dbscan_bin_SOURCES) $(dsktune_bin_SOURCES) \
- $(infadd_bin_SOURCES) $(ldap_agent_bin_SOURCES) \
- $(ldclt_bin_SOURCES) $(ldif_bin_SOURCES) $(makstrdb_SOURCES) \
+ $(libsyntax_plugin_la_SOURCES) $(libusn_plugin_la_SOURCES) \
+ $(libviews_plugin_la_SOURCES) $(dbscan_bin_SOURCES) \
+ $(dsktune_bin_SOURCES) $(infadd_bin_SOURCES) \
+ $(ldap_agent_bin_SOURCES) $(ldclt_bin_SOURCES) \
+ $(ldif_bin_SOURCES) $(makstrdb_SOURCES) \
$(migratecred_bin_SOURCES) $(mmldif_bin_SOURCES) \
$(ns_slapd_SOURCES) $(pwdhash_bin_SOURCES) \
$(rsearch_bin_SOURCES)
@@ -927,13 +937,14 @@ DIST_SOURCES = $(libavl_a_SOURCES) $(libldaputil_a_SOURCES) \
$(libschemareload_plugin_la_SOURCES) \
$(am__libslapd_la_SOURCES_DIST) \
$(libstatechange_plugin_la_SOURCES) \
- $(libsyntax_plugin_la_SOURCES) $(libviews_plugin_la_SOURCES) \
- $(dbscan_bin_SOURCES) $(dsktune_bin_SOURCES) \
- $(infadd_bin_SOURCES) $(ldap_agent_bin_SOURCES) \
- $(am__ldclt_bin_SOURCES_DIST) $(ldif_bin_SOURCES) \
- $(makstrdb_SOURCES) $(migratecred_bin_SOURCES) \
- $(mmldif_bin_SOURCES) $(am__ns_slapd_SOURCES_DIST) \
- $(pwdhash_bin_SOURCES) $(rsearch_bin_SOURCES)
+ $(libsyntax_plugin_la_SOURCES) $(libusn_plugin_la_SOURCES) \
+ $(libviews_plugin_la_SOURCES) $(dbscan_bin_SOURCES) \
+ $(dsktune_bin_SOURCES) $(infadd_bin_SOURCES) \
+ $(ldap_agent_bin_SOURCES) $(am__ldclt_bin_SOURCES_DIST) \
+ $(ldif_bin_SOURCES) $(makstrdb_SOURCES) \
+ $(migratecred_bin_SOURCES) $(mmldif_bin_SOURCES) \
+ $(am__ns_slapd_SOURCES_DIST) $(pwdhash_bin_SOURCES) \
+ $(rsearch_bin_SOURCES)
man1dir = $(mandir)/man1
man8dir = $(mandir)/man8
NROFF = nroff
@@ -1252,9 +1263,10 @@ serverplugin_LTLIBRARIES = libacl-plugin.la libattr-unique-plugin.la \
libhttp-client-plugin.la liblinkedattrs-plugin.la \
libmemberof-plugin.la libpassthru-plugin.la libpwdstorage-plugin.la \
libreferint-plugin.la libreplication-plugin.la libretrocl-plugin.la \
- libroles-plugin.la libschemareload-plugin.la libstatechange-plugin.la \
- libsyntax-plugin.la libviews-plugin.la $(LIBPAM_PASSTHRU_PLUGIN) \
- $(LIBDNA_PLUGIN) $(LIBBITWISE_PLUGIN) $(LIBPRESENCE_PLUGIN)
+ libroles-plugin.la libstatechange-plugin.la libsyntax-plugin.la \
+ libviews-plugin.la libschemareload-plugin.la libusn-plugin.la \
+ $(LIBPAM_PASSTHRU_PLUGIN) $(LIBDNA_PLUGIN) \
+ $(LIBBITWISE_PLUGIN) $(LIBPRESENCE_PLUGIN)
nodist_property_DATA = ns-slapd.properties
noinst_LIBRARIES = libavl.a libldaputil.a
@@ -1408,6 +1420,7 @@ task_SCRIPTS = ldap/admin/src/scripts/template-bak2db \
ldap/admin/src/scripts/template-ns-newpwpolicy.pl \
ldap/admin/src/scripts/template-schema-reload.pl \
ldap/admin/src/scripts/template-syntax-validate.pl \
+ ldap/admin/src/scripts/template-usn-tombstone-cleanup.pl \
ldap/admin/src/scripts/template-verify-db.pl \
ldap/admin/src/scripts/template-dbverify
@@ -1657,6 +1670,7 @@ libback_ldbm_la_SOURCES = ldap/servers/slapd/back-ldbm/ancestorid.c \
ldap/servers/slapd/back-ldbm/ldbm_modrdn.c \
ldap/servers/slapd/back-ldbm/ldbm_search.c \
ldap/servers/slapd/back-ldbm/ldbm_unbind.c \
+ ldap/servers/slapd/back-ldbm/ldbm_usn.c \
ldap/servers/slapd/back-ldbm/ldif2ldbm.c \
ldap/servers/slapd/back-ldbm/dbverify.c \
ldap/servers/slapd/back-ldbm/matchrule.c \
@@ -2000,6 +2014,15 @@ libsyntax_plugin_la_SOURCES = ldap/servers/plugins/syntaxes/bin.c \
libsyntax_plugin_la_CPPFLAGS = $(PLUGIN_CPPFLAGS)
libsyntax_plugin_la_LDFLAGS = -avoid-version
+#------------------------
+# libusn-plugin
+#------------------------
+libusn_plugin_la_SOURCES = ldap/servers/plugins/usn/usn.c \
+ ldap/servers/plugins/usn/usn_cleanup.c
+
+libusn_plugin_la_CPPFLAGS = $(PLUGIN_CPPFLAGS)
+libusn_plugin_la_LDFLAGS = -avoid-version
+
#------------------------
# libviews-plugin
#------------------------
@@ -2626,6 +2649,9 @@ ldap/servers/slapd/back-ldbm/libback_ldbm_la-ldbm_search.lo: \
ldap/servers/slapd/back-ldbm/libback_ldbm_la-ldbm_unbind.lo: \
ldap/servers/slapd/back-ldbm/$(am__dirstamp) \
ldap/servers/slapd/back-ldbm/$(DEPDIR)/$(am__dirstamp)
+ldap/servers/slapd/back-ldbm/libback_ldbm_la-ldbm_usn.lo: \
+ ldap/servers/slapd/back-ldbm/$(am__dirstamp) \
+ ldap/servers/slapd/back-ldbm/$(DEPDIR)/$(am__dirstamp)
ldap/servers/slapd/back-ldbm/libback_ldbm_la-ldif2ldbm.lo: \
ldap/servers/slapd/back-ldbm/$(am__dirstamp) \
ldap/servers/slapd/back-ldbm/$(DEPDIR)/$(am__dirstamp)
@@ -3747,6 +3773,20 @@ ldap/servers/plugins/syntaxes/libsyntax_plugin_la-value.lo: \
ldap/servers/plugins/syntaxes/$(DEPDIR)/$(am__dirstamp)
libsyntax-plugin.la: $(libsyntax_plugin_la_OBJECTS) $(libsyntax_plugin_la_DEPENDENCIES)
$(libsyntax_plugin_la_LINK) -rpath $(serverplugindir) $(libsyntax_plugin_la_OBJECTS) $(libsyntax_plugin_la_LIBADD) $(LIBS)
+ldap/servers/plugins/usn/$(am__dirstamp):
+ @$(MKDIR_P) ldap/servers/plugins/usn
+ @: > ldap/servers/plugins/usn/$(am__dirstamp)
+ldap/servers/plugins/usn/$(DEPDIR)/$(am__dirstamp):
+ @$(MKDIR_P) ldap/servers/plugins/usn/$(DEPDIR)
+ @: > ldap/servers/plugins/usn/$(DEPDIR)/$(am__dirstamp)
+ldap/servers/plugins/usn/libusn_plugin_la-usn.lo: \
+ ldap/servers/plugins/usn/$(am__dirstamp) \
+ ldap/servers/plugins/usn/$(DEPDIR)/$(am__dirstamp)
+ldap/servers/plugins/usn/libusn_plugin_la-usn_cleanup.lo: \
+ ldap/servers/plugins/usn/$(am__dirstamp) \
+ ldap/servers/plugins/usn/$(DEPDIR)/$(am__dirstamp)
+libusn-plugin.la: $(libusn_plugin_la_OBJECTS) $(libusn_plugin_la_DEPENDENCIES)
+ $(libusn_plugin_la_LINK) -rpath $(serverplugindir) $(libusn_plugin_la_OBJECTS) $(libusn_plugin_la_LIBADD) $(LIBS)
ldap/servers/plugins/views/$(am__dirstamp):
@$(MKDIR_P) ldap/servers/plugins/views
@: > ldap/servers/plugins/views/$(am__dirstamp)
@@ -4484,6 +4524,10 @@ mostlyclean-compile:
-rm -f ldap/servers/plugins/uiduniq/libattr_unique_plugin_la-7bit.lo
-rm -f ldap/servers/plugins/uiduniq/libattr_unique_plugin_la-uid.$(OBJEXT)
-rm -f ldap/servers/plugins/uiduniq/libattr_unique_plugin_la-uid.lo
+ -rm -f ldap/servers/plugins/usn/libusn_plugin_la-usn.$(OBJEXT)
+ -rm -f ldap/servers/plugins/usn/libusn_plugin_la-usn.lo
+ -rm -f ldap/servers/plugins/usn/libusn_plugin_la-usn_cleanup.$(OBJEXT)
+ -rm -f ldap/servers/plugins/usn/libusn_plugin_la-usn_cleanup.lo
-rm -f ldap/servers/plugins/views/libviews_plugin_la-views.$(OBJEXT)
-rm -f ldap/servers/plugins/views/libviews_plugin_la-views.lo
-rm -f ldap/servers/slapd/back-ldbm/libback_ldbm_la-ancestorid.$(OBJEXT)
@@ -4572,6 +4616,8 @@ mostlyclean-compile:
-rm -f ldap/servers/slapd/back-ldbm/libback_ldbm_la-ldbm_search.lo
-rm -f ldap/servers/slapd/back-ldbm/libback_ldbm_la-ldbm_unbind.$(OBJEXT)
-rm -f ldap/servers/slapd/back-ldbm/libback_ldbm_la-ldbm_unbind.lo
+ -rm -f ldap/servers/slapd/back-ldbm/libback_ldbm_la-ldbm_usn.$(OBJEXT)
+ -rm -f ldap/servers/slapd/back-ldbm/libback_ldbm_la-ldbm_usn.lo
-rm -f ldap/servers/slapd/back-ldbm/libback_ldbm_la-ldif2ldbm.$(OBJEXT)
-rm -f ldap/servers/slapd/back-ldbm/libback_ldbm_la-ldif2ldbm.lo
-rm -f ldap/servers/slapd/back-ldbm/libback_ldbm_la-matchrule.$(OBJEXT)
@@ -5144,6 +5190,8 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@ldap/servers/plugins/syntaxes/$(DEPDIR)/libsyntax_plugin_la-value.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@ldap/servers/plugins/uiduniq/$(DEPDIR)/libattr_unique_plugin_la-7bit.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@ldap/servers/plugins/uiduniq/$(DEPDIR)/libattr_unique_plugin_la-uid.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@ldap/servers/plugins/usn/$(DEPDIR)/libusn_plugin_la-usn.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@ldap/servers/plugins/usn/$(DEPDIR)/libusn_plugin_la-usn_cleanup.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@ldap/servers/plugins/views/$(DEPDIR)/libviews_plugin_la-views.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@ldap/servers/slapd/$(DEPDIR)/ldap_agent_bin-agtmmap.Po@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@ldap/servers/slapd/$(DEPDIR)/libslapd_la-add.Plo@am__quote@
@@ -5316,6 +5364,7 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@ldap/servers/slapd/back-ldbm/$(DEPDIR)/libback_ldbm_la-ldbm_modrdn.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@ldap/servers/slapd/back-ldbm/$(DEPDIR)/libback_ldbm_la-ldbm_search.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@ldap/servers/slapd/back-ldbm/$(DEPDIR)/libback_ldbm_la-ldbm_unbind.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@ldap/servers/slapd/back-ldbm/$(DEPDIR)/libback_ldbm_la-ldbm_usn.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@ldap/servers/slapd/back-ldbm/$(DEPDIR)/libback_ldbm_la-ldif2ldbm.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@ldap/servers/slapd/back-ldbm/$(DEPDIR)/libback_ldbm_la-matchrule.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@ldap/servers/slapd/back-ldbm/$(DEPDIR)/libback_ldbm_la-misc.Plo@am__quote@
@@ -5994,6 +6043,13 @@ ldap/servers/slapd/back-ldbm/libback_ldbm_la-ldbm_unbind.lo: ldap/servers/slapd/
@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libback_ldbm_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o ldap/servers/slapd/back-ldbm/libback_ldbm_la-ldbm_unbind.lo `test -f 'ldap/servers/slapd/back-ldbm/ldbm_unbind.c' || echo '$(srcdir)/'`ldap/servers/slapd/back-ldbm/ldbm_unbind.c
+ldap/servers/slapd/back-ldbm/libback_ldbm_la-ldbm_usn.lo: ldap/servers/slapd/back-ldbm/ldbm_usn.c
+@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libback_ldbm_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT ldap/servers/slapd/back-ldbm/libback_ldbm_la-ldbm_usn.lo -MD -MP -MF ldap/servers/slapd/back-ldbm/$(DEPDIR)/libback_ldbm_la-ldbm_usn.Tpo -c -o ldap/servers/slapd/back-ldbm/libback_ldbm_la-ldbm_usn.lo `test -f 'ldap/servers/slapd/back-ldbm/ldbm_usn.c' || echo '$(srcdir)/'`ldap/servers/slapd/back-ldbm/ldbm_usn.c
+@am__fastdepCC_TRUE@ mv -f ldap/servers/slapd/back-ldbm/$(DEPDIR)/libback_ldbm_la-ldbm_usn.Tpo ldap/servers/slapd/back-ldbm/$(DEPDIR)/libback_ldbm_la-ldbm_usn.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='ldap/servers/slapd/back-ldbm/ldbm_usn.c' object='ldap/servers/slapd/back-ldbm/libback_ldbm_la-ldbm_usn.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libback_ldbm_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o ldap/servers/slapd/back-ldbm/libback_ldbm_la-ldbm_usn.lo `test -f 'ldap/servers/slapd/back-ldbm/ldbm_usn.c' || echo '$(srcdir)/'`ldap/servers/slapd/back-ldbm/ldbm_usn.c
+
ldap/servers/slapd/back-ldbm/libback_ldbm_la-ldif2ldbm.lo: ldap/servers/slapd/back-ldbm/ldif2ldbm.c
@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libback_ldbm_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT ldap/servers/slapd/back-ldbm/libback_ldbm_la-ldif2ldbm.lo -MD -MP -MF ldap/servers/slapd/back-ldbm/$(DEPDIR)/libback_ldbm_la-ldif2ldbm.Tpo -c -o ldap/servers/slapd/back-ldbm/libback_ldbm_la-ldif2ldbm.lo `test -f 'ldap/servers/slapd/back-ldbm/ldif2ldbm.c' || echo '$(srcdir)/'`ldap/servers/slapd/back-ldbm/ldif2ldbm.c
@am__fastdepCC_TRUE@ mv -f ldap/servers/slapd/back-ldbm/$(DEPDIR)/libback_ldbm_la-ldif2ldbm.Tpo ldap/servers/slapd/back-ldbm/$(DEPDIR)/libback_ldbm_la-ldif2ldbm.Plo
@@ -7912,6 +7968,20 @@ ldap/servers/plugins/syntaxes/libsyntax_plugin_la-value.lo: ldap/servers/plugins
@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libsyntax_plugin_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o ldap/servers/plugins/syntaxes/libsyntax_plugin_la-value.lo `test -f 'ldap/servers/plugins/syntaxes/value.c' || echo '$(srcdir)/'`ldap/servers/plugins/syntaxes/value.c
+ldap/servers/plugins/usn/libusn_plugin_la-usn.lo: ldap/servers/plugins/usn/usn.c
+@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libusn_plugin_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT ldap/servers/plugins/usn/libusn_plugin_la-usn.lo -MD -MP -MF ldap/servers/plugins/usn/$(DEPDIR)/libusn_plugin_la-usn.Tpo -c -o ldap/servers/plugins/usn/libusn_plugin_la-usn.lo `test -f 'ldap/servers/plugins/usn/usn.c' || echo '$(srcdir)/'`ldap/servers/plugins/usn/usn.c
+@am__fastdepCC_TRUE@ mv -f ldap/servers/plugins/usn/$(DEPDIR)/libusn_plugin_la-usn.Tpo ldap/servers/plugins/usn/$(DEPDIR)/libusn_plugin_la-usn.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='ldap/servers/plugins/usn/usn.c' object='ldap/servers/plugins/usn/libusn_plugin_la-usn.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libusn_plugin_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o ldap/servers/plugins/usn/libusn_plugin_la-usn.lo `test -f 'ldap/servers/plugins/usn/usn.c' || echo '$(srcdir)/'`ldap/servers/plugins/usn/usn.c
+
+ldap/servers/plugins/usn/libusn_plugin_la-usn_cleanup.lo: ldap/servers/plugins/usn/usn_cleanup.c
+@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libusn_plugin_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT ldap/servers/plugins/usn/libusn_plugin_la-usn_cleanup.lo -MD -MP -MF ldap/servers/plugins/usn/$(DEPDIR)/libusn_plugin_la-usn_cleanup.Tpo -c -o ldap/servers/plugins/usn/libusn_plugin_la-usn_cleanup.lo `test -f 'ldap/servers/plugins/usn/usn_cleanup.c' || echo '$(srcdir)/'`ldap/servers/plugins/usn/usn_cleanup.c
+@am__fastdepCC_TRUE@ mv -f ldap/servers/plugins/usn/$(DEPDIR)/libusn_plugin_la-usn_cleanup.Tpo ldap/servers/plugins/usn/$(DEPDIR)/libusn_plugin_la-usn_cleanup.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='ldap/servers/plugins/usn/usn_cleanup.c' object='ldap/servers/plugins/usn/libusn_plugin_la-usn_cleanup.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libusn_plugin_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o ldap/servers/plugins/usn/libusn_plugin_la-usn_cleanup.lo `test -f 'ldap/servers/plugins/usn/usn_cleanup.c' || echo '$(srcdir)/'`ldap/servers/plugins/usn/usn_cleanup.c
+
ldap/servers/plugins/views/libviews_plugin_la-views.lo: ldap/servers/plugins/views/views.c
@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libviews_plugin_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT ldap/servers/plugins/views/libviews_plugin_la-views.lo -MD -MP -MF ldap/servers/plugins/views/$(DEPDIR)/libviews_plugin_la-views.Tpo -c -o ldap/servers/plugins/views/libviews_plugin_la-views.lo `test -f 'ldap/servers/plugins/views/views.c' || echo '$(srcdir)/'`ldap/servers/plugins/views/views.c
@am__fastdepCC_TRUE@ mv -f ldap/servers/plugins/views/$(DEPDIR)/libviews_plugin_la-views.Tpo ldap/servers/plugins/views/$(DEPDIR)/libviews_plugin_la-views.Plo
@@ -9128,6 +9198,7 @@ clean-libtool:
-rm -rf ldap/servers/plugins/statechange/.libs ldap/servers/plugins/statechange/_libs
-rm -rf ldap/servers/plugins/syntaxes/.libs ldap/servers/plugins/syntaxes/_libs
-rm -rf ldap/servers/plugins/uiduniq/.libs ldap/servers/plugins/uiduniq/_libs
+ -rm -rf ldap/servers/plugins/usn/.libs ldap/servers/plugins/usn/_libs
-rm -rf ldap/servers/plugins/views/.libs ldap/servers/plugins/views/_libs
-rm -rf ldap/servers/slapd/.libs ldap/servers/slapd/_libs
-rm -rf ldap/servers/slapd/back-ldbm/.libs ldap/servers/slapd/back-ldbm/_libs
@@ -9642,6 +9713,8 @@ distclean-generic:
-rm -f ldap/servers/plugins/syntaxes/$(am__dirstamp)
-rm -f ldap/servers/plugins/uiduniq/$(DEPDIR)/$(am__dirstamp)
-rm -f ldap/servers/plugins/uiduniq/$(am__dirstamp)
+ -rm -f ldap/servers/plugins/usn/$(DEPDIR)/$(am__dirstamp)
+ -rm -f ldap/servers/plugins/usn/$(am__dirstamp)
-rm -f ldap/servers/plugins/views/$(DEPDIR)/$(am__dirstamp)
-rm -f ldap/servers/plugins/views/$(am__dirstamp)
-rm -f ldap/servers/slapd/$(DEPDIR)/$(am__dirstamp)
@@ -9682,7 +9755,7 @@ clean-am: clean-binPROGRAMS clean-generic clean-libtool \
distclean: distclean-am
-rm -f $(am__CONFIG_DISTCLEAN_FILES)
- -rm -rf ldap/libraries/libavl/$(DEPDIR) ldap/servers/plugins/acl/$(DEPDIR) ldap/servers/plugins/bitwise/$(DEPDIR) ldap/servers/plugins/chainingdb/$(DEPDIR) ldap/servers/plugins/collation/$(DEPDIR) ldap/servers/plugins/cos/$(DEPDIR) ldap/servers/plugins/distrib/$(DEPDIR) ldap/servers/plugins/dna/$(DEPDIR) ldap/servers/plugins/http/$(DEPDIR) ldap/servers/plugins/linkedattrs/$(DEPDIR) ldap/servers/plugins/memberof/$(DEPDIR) ldap/servers/plugins/pam_passthru/$(DEPDIR) ldap/servers/plugins/passthru/$(DEPDIR) ldap/servers/plugins/presence/$(DEPDIR) ldap/servers/plugins/pwdstorage/$(DEPDIR) ldap/servers/plugins/referint/$(DEPDIR) ldap/servers/plugins/replication/$(DEPDIR) ldap/servers/plugins/retrocl/$(DEPDIR) ldap/servers/plugins/rever/$(DEPDIR) ldap/servers/plugins/roles/$(DEPDIR) ldap/servers/plugins/schema_reload/$(DEPDIR) ldap/servers/plugins/shared/$(DEPDIR) ldap/servers/plugins/statechange/$(DEPDIR) ldap/servers/plugins/syntaxes/$(DEPDIR) ldap/servers/plugins/uiduniq/$(DEPDIR) ldap/servers/plugins/views/$(DEPDIR) ldap/servers/slapd/$(DEPDIR) ldap/servers/slapd/back-ldbm/$(DEPDIR) ldap/servers/slapd/tools/$(DEPDIR) ldap/servers/slapd/tools/ldclt/$(DEPDIR) ldap/servers/slapd/tools/rsearch/$(DEPDIR) ldap/servers/snmp/$(DEPDIR) ldap/systools/$(DEPDIR) lib/base/$(DEPDIR) lib/ldaputil/$(DEPDIR) lib/libaccess/$(DEPDIR) lib/libadmin/$(DEPDIR) lib/libsi18n/$(DEPDIR)
+ -rm -rf ldap/libraries/libavl/$(DEPDIR) ldap/servers/plugins/acl/$(DEPDIR) ldap/servers/plugins/bitwise/$(DEPDIR) ldap/servers/plugins/chainingdb/$(DEPDIR) ldap/servers/plugins/collation/$(DEPDIR) ldap/servers/plugins/cos/$(DEPDIR) ldap/servers/plugins/distrib/$(DEPDIR) ldap/servers/plugins/dna/$(DEPDIR) ldap/servers/plugins/http/$(DEPDIR) ldap/servers/plugins/linkedattrs/$(DEPDIR) ldap/servers/plugins/memberof/$(DEPDIR) ldap/servers/plugins/pam_passthru/$(DEPDIR) ldap/servers/plugins/passthru/$(DEPDIR) ldap/servers/plugins/presence/$(DEPDIR) ldap/servers/plugins/pwdstorage/$(DEPDIR) ldap/servers/plugins/referint/$(DEPDIR) ldap/servers/plugins/replication/$(DEPDIR) ldap/servers/plugins/retrocl/$(DEPDIR) ldap/servers/plugins/rever/$(DEPDIR) ldap/servers/plugins/roles/$(DEPDIR) ldap/servers/plugins/schema_reload/$(DEPDIR) ldap/servers/plugins/shared/$(DEPDIR) ldap/servers/plugins/statechange/$(DEPDIR) ldap/servers/plugins/syntaxes/$(DEPDIR) ldap/servers/plugins/uiduniq/$(DEPDIR) ldap/servers/plugins/usn/$(DEPDIR) ldap/servers/plugins/views/$(DEPDIR) ldap/servers/slapd/$(DEPDIR) ldap/servers/slapd/back-ldbm/$(DEPDIR) ldap/servers/slapd/tools/$(DEPDIR) ldap/servers/slapd/tools/ldclt/$(DEPDIR) ldap/servers/slapd/tools/rsearch/$(DEPDIR) ldap/servers/snmp/$(DEPDIR) ldap/systools/$(DEPDIR) lib/base/$(DEPDIR) lib/ldaputil/$(DEPDIR) lib/libaccess/$(DEPDIR) lib/libadmin/$(DEPDIR) lib/libsi18n/$(DEPDIR)
-rm -f Makefile
distclean-am: clean-am distclean-compile distclean-generic \
distclean-hdr distclean-libtool distclean-tags
@@ -9724,7 +9797,7 @@ installcheck-am:
maintainer-clean: maintainer-clean-am
-rm -f $(am__CONFIG_DISTCLEAN_FILES)
-rm -rf $(top_srcdir)/autom4te.cache
- -rm -rf ldap/libraries/libavl/$(DEPDIR) ldap/servers/plugins/acl/$(DEPDIR) ldap/servers/plugins/bitwise/$(DEPDIR) ldap/servers/plugins/chainingdb/$(DEPDIR) ldap/servers/plugins/collation/$(DEPDIR) ldap/servers/plugins/cos/$(DEPDIR) ldap/servers/plugins/distrib/$(DEPDIR) ldap/servers/plugins/dna/$(DEPDIR) ldap/servers/plugins/http/$(DEPDIR) ldap/servers/plugins/linkedattrs/$(DEPDIR) ldap/servers/plugins/memberof/$(DEPDIR) ldap/servers/plugins/pam_passthru/$(DEPDIR) ldap/servers/plugins/passthru/$(DEPDIR) ldap/servers/plugins/presence/$(DEPDIR) ldap/servers/plugins/pwdstorage/$(DEPDIR) ldap/servers/plugins/referint/$(DEPDIR) ldap/servers/plugins/replication/$(DEPDIR) ldap/servers/plugins/retrocl/$(DEPDIR) ldap/servers/plugins/rever/$(DEPDIR) ldap/servers/plugins/roles/$(DEPDIR) ldap/servers/plugins/schema_reload/$(DEPDIR) ldap/servers/plugins/shared/$(DEPDIR) ldap/servers/plugins/statechange/$(DEPDIR) ldap/servers/plugins/syntaxes/$(DEPDIR) ldap/servers/plugins/uiduniq/$(DEPDIR) ldap/servers/plugins/views/$(DEPDIR) ldap/servers/slapd/$(DEPDIR) ldap/servers/slapd/back-ldbm/$(DEPDIR) ldap/servers/slapd/tools/$(DEPDIR) ldap/servers/slapd/tools/ldclt/$(DEPDIR) ldap/servers/slapd/tools/rsearch/$(DEPDIR) ldap/servers/snmp/$(DEPDIR) ldap/systools/$(DEPDIR) lib/base/$(DEPDIR) lib/ldaputil/$(DEPDIR) lib/libaccess/$(DEPDIR) lib/libadmin/$(DEPDIR) lib/libsi18n/$(DEPDIR)
+ -rm -rf ldap/libraries/libavl/$(DEPDIR) ldap/servers/plugins/acl/$(DEPDIR) ldap/servers/plugins/bitwise/$(DEPDIR) ldap/servers/plugins/chainingdb/$(DEPDIR) ldap/servers/plugins/collation/$(DEPDIR) ldap/servers/plugins/cos/$(DEPDIR) ldap/servers/plugins/distrib/$(DEPDIR) ldap/servers/plugins/dna/$(DEPDIR) ldap/servers/plugins/http/$(DEPDIR) ldap/servers/plugins/linkedattrs/$(DEPDIR) ldap/servers/plugins/memberof/$(DEPDIR) ldap/servers/plugins/pam_passthru/$(DEPDIR) ldap/servers/plugins/passthru/$(DEPDIR) ldap/servers/plugins/presence/$(DEPDIR) ldap/servers/plugins/pwdstorage/$(DEPDIR) ldap/servers/plugins/referint/$(DEPDIR) ldap/servers/plugins/replication/$(DEPDIR) ldap/servers/plugins/retrocl/$(DEPDIR) ldap/servers/plugins/rever/$(DEPDIR) ldap/servers/plugins/roles/$(DEPDIR) ldap/servers/plugins/schema_reload/$(DEPDIR) ldap/servers/plugins/shared/$(DEPDIR) ldap/servers/plugins/statechange/$(DEPDIR) ldap/servers/plugins/syntaxes/$(DEPDIR) ldap/servers/plugins/uiduniq/$(DEPDIR) ldap/servers/plugins/usn/$(DEPDIR) ldap/servers/plugins/views/$(DEPDIR) ldap/servers/slapd/$(DEPDIR) ldap/servers/slapd/back-ldbm/$(DEPDIR) ldap/servers/slapd/tools/$(DEPDIR) ldap/servers/slapd/tools/ldclt/$(DEPDIR) ldap/servers/slapd/tools/rsearch/$(DEPDIR) ldap/servers/snmp/$(DEPDIR) ldap/systools/$(DEPDIR) lib/base/$(DEPDIR) lib/ldaputil/$(DEPDIR) lib/libaccess/$(DEPDIR) lib/libadmin/$(DEPDIR) lib/libsi18n/$(DEPDIR)
-rm -f Makefile
maintainer-clean-am: distclean-am maintainer-clean-generic
diff --git a/aclocal.m4 b/aclocal.m4
old mode 100755
new mode 100644
diff --git a/config.h.in b/config.h.in
old mode 100755
new mode 100644
diff --git a/ldap/admin/src/scripts/template-usn-tombstone-cleanup.pl.in b/ldap/admin/src/scripts/template-usn-tombstone-cleanup.pl.in
new file mode 100644
index 000000000..bd8c25753
--- /dev/null
+++ b/ldap/admin/src/scripts/template-usn-tombstone-cleanup.pl.in
@@ -0,0 +1,180 @@
+#{{PERL-EXEC}}
+#
+# BEGIN COPYRIGHT BLOCK
+# This Program is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free Software
+# Foundation; version 2 of the License.
+#
+# This Program is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# this Program; if not, write to the Free Software Foundation, Inc., 59 Temple
+# Place, Suite 330, Boston, MA 02111-1307 USA.
+#
+# In addition, as a special exception, Red Hat, Inc. gives You the additional
+# right to link the code of this Program with code not covered under the GNU
+# General Public License ("Non-GPL Code") and to distribute linked combinations
+# including the two, subject to the limitations in this paragraph. Non-GPL Code
+# permitted under this exception must only link to the code of this Program
+# through those well defined interfaces identified in the file named EXCEPTION
+# found in the source code files (the "Approved Interfaces"). The files of
+# Non-GPL Code may instantiate templates or use macros or inline functions from
+# the Approved Interfaces without causing the resulting work to be covered by
+# the GNU General Public License. Only Red Hat, Inc. may make changes or
+# additions to the list of Approved Interfaces. You must obey the GNU General
+# Public License in all respects for all of the Program code and other code used
+# in conjunction with the Program except the Non-GPL Code covered by this
+# exception. If you modify this file, you may extend this exception to your
+# version of the file, but you are not obligated to do so. If you do not wish to
+# provide this exception without modification, you must delete this exception
+# statement from your version and license this file solely under the GPL without
+# exception.
+#
+#
+# Copyright (C) 2009 Red Hat, Inc.
+# All rights reserved.
+# END COPYRIGHT BLOCK
+#
+
+sub usage {
+ print(STDERR "Usage: $0 [-v] -D rootdn { -w password | -w - | -j filename } -s suffix | -n backend [ -m maxusn_to_delete ]\n");
+ print(STDERR " Opts: -D rootdn - Directory Manager\n");
+ print(STDERR " : -w password - Directory Manager's password\n");
+ print(STDERR " : -w - - Prompt for Directory Manager's password\n");
+ print(STDERR " : -j filename - Read Directory Manager's password from file\n");
+ print(STDERR " : -s suffix - Suffix where USN tombstone entries are cleaned up\n");
+ print(STDERR " : -n backend - Backend instance in which USN tombstone entries \n");
+ print(STDERR " are cleaned up (alternative to suffix)\n");
+ print(STDERR " : -m maxusn_to_delete - USN tombstone entries are deleted up to \n");
+ print(STDERR " the entry with maxusn_to_delete\n");
+ print(STDERR " : -v - verbose\n");
+}
+
+$rootdn = "";
+$passwd = "";
+$passwdfile = "";
+$args = "";
+$suffix_arg = "";
+$backend_arg = "";
+$maxusn_arg = "";
+$verbose = 0;
+
+$prefix = "{{DS-ROOT}}";
+
+$ENV{'PATH'} = "$prefix@ldapsdk_bindir@:$prefix/usr/bin:@ldapsdk_bindir@:/usr/bin";
+$ENV{'LD_LIBRARY_PATH'} = "$prefix@nss_libdir@:$prefix/usr/lib:@nss_libdir@:/usr/lib";
+$ENV{'SHLIB_PATH'} = "$prefix@nss_libdir@:$prefix/usr/lib:@nss_libdir@:/usr/lib";
+
+$i = 0;
+while ($i <= $#ARGV)
+{
+ if ("$ARGV[$i]" eq "-s")
+ {
+ # suffix
+ $i++; $suffix_arg = $ARGV[$i];
+ }
+ elsif ("$ARGV[$i]" eq "-n")
+ {
+ # backend
+ $i++; $backend_arg = $ARGV[$i];
+ }
+ elsif ("$ARGV[$i]" eq "-m")
+ {
+ # max usn
+ $i++; $maxusn_arg = $ARGV[$i];
+ }
+ elsif ("$ARGV[$i]" eq "-D")
+ {
+ # Directory Manager
+ $i++; $rootdn = $ARGV[$i];
+ }
+ elsif ("$ARGV[$i]" eq "-w")
+ {
+ # Directory Manager's password
+ $i++; $passwd = $ARGV[$i];
+ }
+ elsif ("$ARGV[$i]" eq "-j")
+ {
+ # Read Directory Manager's password from a file
+ $i++; $passwdfile = $ARGV[$i];
+ }
+ elsif ("$ARGV[$i]" eq "-v")
+ {
+ # verbose
+ $verbose = 1;
+ }
+ else
+ {
+ &usage; exit(1);
+ }
+ $i++;
+}
+
+if ($passwdfile ne ""){
+# Open file and get the password
+ unless (open (RPASS, $passwdfile)) {
+ die "Error, cannot open password file $passwdfile\n";
+ }
+ $passwd = <RPASS>;
+ chomp($passwd);
+ close(RPASS);
+} elsif ($passwd eq "-"){
+# Read the password from terminal
+ print "Bind Password: ";
+ # Disable console echo
+ system("stty -echo");
+ # read the answer
+ $passwd = <STDIN>;
+ # Enable console echo
+ system("stty echo");
+ print "\n";
+ chop($passwd); # trim trailing newline
+}
+
+if ( $rootdn eq "" || $passwd eq "" )
+{
+ &usage;
+ exit(1);
+}
+
+$vstr = "";
+if ($verbose != 0)
+{
+ $vstr = "-v";
+}
+
+# Use a timestamp as part of the task entry name
+($s, $m, $h, $dy, $mn, $yr, $wdy, $ydy, $r) = localtime(time);
+$mn++; $yr += 1900;
+$taskname = "usn_cleanup_${yr}_${mn}_${dy}_${h}_${m}_${s}";
+
+# Build the task entry to add
+$dn = "dn: cn=$taskname, cn=USN tombstone cleanup task, cn=tasks, cn=config\n";
+$misc = "changetype: add\nobjectclass: top\nobjectclass: extensibleObject\n";
+$cn = "cn: $taskname\n";
+
+if ( $suffix_arg eq "" && $backend_arg eq "" )
+{
+ &usage;
+ exit(1);
+}
+elsif ( $suffix_arg ne "" )
+{
+ $args = "suffix: $suffix_arg\n";
+}
+else
+{
+ $args = "backend: $backend_arg\n";
+}
+
+if ( $maxusn_arg ne "" )
+{
+ $args = $args . "maxusn_to_delete: $maxusn_arg\n";
+}
+
+$entry = "${dn}${misc}${cn}${basedn}${args}";
+open(FOO, "| ldapmodify $vstr -h {{SERVER-NAME}} -p {{SERVER-PORT}} -D \"$rootdn\" -w \"$passwd\" -a" );
+print(FOO "$entry");
+close(FOO);
diff --git a/ldap/ldif/template-dse.ldif.in b/ldap/ldif/template-dse.ldif.in
index d0e292784..c2ac588f0 100644
--- a/ldap/ldif/template-dse.ldif.in
+++ b/ldap/ldif/template-dse.ldif.in
@@ -677,6 +677,17 @@ nsslapd-plugintype: preoperation
nsslapd-pluginenabled: off
nsslapd-plugin-depends-on-type: database
+dn: cn=USN,cn=plugins,cn=config
+objectclass: top
+objectclass: nsSlapdPlugin
+objectclass: extensibleObject
+cn: USN
+nsslapd-pluginpath: libusn-plugin
+nsslapd-plugininitfunc: usn_init
+nsslapd-plugintype: object
+nsslapd-pluginenabled: off
+nsslapd-plugin-depends-on-type: database
+
dn: cn=ldbm database,cn=plugins,cn=config
objectclass: top
objectclass: nsSlapdPlugin
@@ -724,6 +735,14 @@ cn: entrydn
nssystemindex: true
nsindextype: eq
+dn: cn=entryusn,cn=default indexes, cn=config,cn=ldbm database,cn=plugins,cn=config
+objectclass: top
+objectclass: nsIndex
+cn: entryusn
+nssystemindex: true
+nsindextype: eq
+nsmatchingrule: integerOrderingMatch
+
dn: cn=givenName,cn=default indexes, cn=config,cn=ldbm database,cn=plugins,cn=config
objectclass: top
objectclass: nsIndex
diff --git a/ldap/servers/plugins/usn/usn.c b/ldap/servers/plugins/usn/usn.c
new file mode 100644
index 000000000..2bb389a2b
--- /dev/null
+++ b/ldap/servers/plugins/usn/usn.c
@@ -0,0 +1,570 @@
+/** BEGIN COPYRIGHT BLOCK
+ * This Program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License as published by the Free Software
+ * Foundation; version 2 of the License.
+ *
+ * This Program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this Program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * In addition, as a special exception, Red Hat, Inc. gives You the additional
+ * right to link the code of this Program with code not covered under the GNU
+ * General Public License ("Non-GPL Code") and to distribute linked combinations
+ * including the two, subject to the limitations in this paragraph. Non-GPL Code
+ * permitted under this exception must only link to the code of this Program
+ * through those well defined interfaces identified in the file named EXCEPTION
+ * found in the source code files (the "Approved Interfaces"). The files of
+ * Non-GPL Code may instantiate templates or use macros or inline functions from
+ * the Approved Interfaces without causing the resulting work to be covered by
+ * the GNU General Public License. Only Red Hat, Inc. may make changes or
+ * additions to the list of Approved Interfaces. You must obey the GNU General
+ * Public License in all respects for all of the Program code and other code used
+ * in conjunction with the Program except the Non-GPL Code covered by this
+ * exception. If you modify this file, you may extend this exception to your
+ * version of the file, but you are not obligated to do so. If you do not wish to
+ * provide this exception without modification, you must delete this exception
+ * statement from your version and license this file solely under the GPL without
+ * exception.
+ *
+ *
+ * Copyright (C) 2009 Red Hat, Inc.
+ * All rights reserved.
+ * END COPYRIGHT BLOCK **/
+
+#ifdef HAVE_CONFIG_H
+# include <config.h>
+#endif
+
+#include "usn.h"
+
+static Slapi_PluginDesc pdesc = {
+ "USN", PLUGIN_MAGIC_VENDOR_STR, PRODUCTTEXT,
+ "USN (Update Sequence Number) plugin" };
+
+static CSNGen *_usn_csngen = NULL;
+
+static void *_usn_identity = NULL;
+
+static int usn_preop_init(Slapi_PBlock *pb);
+static int usn_bepreop_init(Slapi_PBlock *pb);
+static int usn_bepostop_init(Slapi_PBlock *pb);
+static int usn_rootdse_init();
+
+static int usn_preop_delete(Slapi_PBlock *pb);
+static int usn_bepreop_add(Slapi_PBlock *pb);
+static int usn_bepreop_delete(Slapi_PBlock *pb);
+static int usn_bepreop_modify(Slapi_PBlock *pb);
+static int usn_bepostop(Slapi_PBlock *pb);
+static int usn_start(Slapi_PBlock *pb);
+static int usn_close(Slapi_PBlock *pb);
+static int usn_get_attr(Slapi_PBlock *pb, const char* type, void *value);
+
+static int usn_rootdse_search(Slapi_PBlock *pb, Slapi_Entry* e,
+ Slapi_Entry* entryAfter, int *returncode, char *returntext, void *arg);
+
+/*
+ * Register USN plugin
+ * Note: USN counter initialization is done in the backend (ldbm_usn_init).
+ */
+int
+usn_init(Slapi_PBlock *pb)
+{
+ int rc = 0;
+ void *identity = NULL;
+ int enabled = 0;
+
+ slapi_log_error(SLAPI_LOG_TRACE, USN_PLUGIN_SUBSYSTEM,
+ "--> usn_init\n");
+
+ slapi_pblock_get(pb, SLAPI_PLUGIN_ENABLED, &enabled);
+
+ if (!enabled) {
+ /* not enabled */
+ goto bail;
+ }
+
+ slapi_pblock_get(pb, SLAPI_PLUGIN_IDENTITY, &identity);
+
+ /* slapi_register_plugin always returns SUCCESS (0) */
+ if (slapi_pblock_set(pb, SLAPI_PLUGIN_VERSION,
+ SLAPI_PLUGIN_VERSION_01) != 0 ||
+ slapi_pblock_set(pb, SLAPI_PLUGIN_DESCRIPTION,
+ (void *)&pdesc) != 0) {
+ slapi_log_error(SLAPI_LOG_FATAL, USN_PLUGIN_SUBSYSTEM,
+ "usn_init: failed to register version & description\n");
+ rc = -1;
+ goto bail;
+ }
+ if (slapi_pblock_set(pb, SLAPI_PLUGIN_START_FN,
+ (void *)usn_start) != 0 ) {
+ slapi_pblock_set(pb, SLAPI_PLUGIN_CLOSE_FN,
+ (void *)usn_close) != 0 ||
+ slapi_log_error(SLAPI_LOG_FATAL, USN_PLUGIN_SUBSYSTEM,
+ "usn_init: failed to register close callback & task\n");
+ rc = -1;
+ goto bail;
+ }
+
+ rc = slapi_register_plugin("preoperation", 1 /* Enabled */,
+ "usn_preop_init", usn_preop_init,
+ "USN preoperation plugin", NULL, identity);
+ rc = slapi_register_plugin("bepreoperation", 1 /* Enabled */,
+ "usn_bepreop_init", usn_bepreop_init,
+ "USN bepreoperation plugin", NULL, identity);
+ rc = slapi_register_plugin("bepostoperation", 1 /* Enabled */,
+ "usn_bepostop_init", usn_bepostop_init,
+ "USN bepostoperation plugin", NULL, identity);
+ usn_set_identity(identity);
+bail:
+ slapi_log_error(SLAPI_LOG_TRACE, USN_PLUGIN_SUBSYSTEM,
+ "<-- usn_init\n");
+ return rc;
+}
+
+static int
+usn_preop_init(Slapi_PBlock *pb)
+{
+ int rc = 0;
+
+ /* set up csn generator for tombstone */
+ _usn_csngen = csngen_new(USN_CSNGEN_ID, NULL);
+ if (NULL == _usn_csngen) {
+ slapi_log_error(SLAPI_LOG_FATAL, USN_PLUGIN_SUBSYSTEM,
+ "usn_preop_init: csngen_new failed\n");
+ rc = -1;
+ }
+
+ if (slapi_pblock_set(pb, SLAPI_PLUGIN_PRE_DELETE_FN,
+ (void *)usn_preop_delete) != 0) {
+ slapi_log_error(SLAPI_LOG_FATAL, USN_PLUGIN_SUBSYSTEM,
+ "usn_preop_init: failed to register preop plugin\n");
+ rc = -1;
+ }
+
+ return rc;
+}
+
+static int
+usn_bepreop_init(Slapi_PBlock *pb)
+{
+ int rc = 0;
+
+ if (slapi_pblock_set(pb, SLAPI_PLUGIN_BE_PRE_ADD_FN,
+ (void *)usn_bepreop_add) != 0 ||
+ slapi_pblock_set(pb, SLAPI_PLUGIN_BE_PRE_DELETE_FN,
+ (void *)usn_bepreop_delete) != 0 ||
+ slapi_pblock_set(pb, SLAPI_PLUGIN_BE_PRE_MODIFY_FN,
+ (void *)usn_bepreop_modify) != 0 ||
+ slapi_pblock_set(pb, SLAPI_PLUGIN_BE_PRE_MODRDN_FN,
+ (void *)usn_bepreop_modify) != 0) {
+ slapi_log_error(SLAPI_LOG_FATAL, USN_PLUGIN_SUBSYSTEM,
+ "usn_bepreop_init: failed to register bepreop plugin\n");
+ rc = -1;
+ }
+
+ return rc;
+}
+
+static int
+usn_bepostop_init(Slapi_PBlock *pb)
+{
+ int rc = 0;
+
+ if (slapi_pblock_set(pb, SLAPI_PLUGIN_BE_POST_ADD_FN,
+ (void *)usn_bepostop) != 0 ||
+ slapi_pblock_set(pb, SLAPI_PLUGIN_BE_POST_DELETE_FN,
+ (void *)usn_bepostop) != 0 ||
+ slapi_pblock_set(pb, SLAPI_PLUGIN_BE_POST_MODIFY_FN,
+ (void *)usn_bepostop) != 0 ||
+ slapi_pblock_set(pb, SLAPI_PLUGIN_BE_POST_MODRDN_FN,
+ (void *)usn_bepostop) != 0) {
+ slapi_log_error(SLAPI_LOG_FATAL, USN_PLUGIN_SUBSYSTEM,
+ "usn_bepostop_init: failed to register bepostop plugin\n");
+ rc = -1;
+ }
+
+ return rc;
+}
+
+static int
+usn_rootdse_init()
+{
+ int rc = -1;
+
+ if (slapi_config_register_callback(SLAPI_OPERATION_SEARCH, DSE_FLAG_PREOP,
+ "", LDAP_SCOPE_BASE, "(objectclass=*)",
+ usn_rootdse_search, NULL)) {
+ rc = 0;
+ }
+
+ return rc;
+}
+
+/*
+ * usn_start: usn_rootdse_init -- set rootdse callback to aggregate in rootDSE
+ * usn_cleanup_start -- initialize USN tombstone cleanup task
+ */
+static int
+usn_start(Slapi_PBlock *pb)
+{
+ int rc = 0;
+
+ slapi_log_error(SLAPI_LOG_TRACE, USN_PLUGIN_SUBSYSTEM, "--> usn_start\n");
+
+ rc = usn_rootdse_init();
+ rc |= usn_cleanup_start(pb);
+
+ slapi_log_error(SLAPI_LOG_TRACE, USN_PLUGIN_SUBSYSTEM, "<-- usn_start\n");
+
+ return rc;
+}
+
+/*
+ * usn_close: release the csn generator used to convert an entry to tombstone
+ */
+static int
+usn_close(Slapi_PBlock *pb)
+{
+ slapi_log_error(SLAPI_LOG_TRACE, USN_PLUGIN_SUBSYSTEM, "--> usn_close\n");
+
+ csngen_free(&_usn_csngen);
+
+ slapi_log_error(SLAPI_LOG_TRACE, USN_PLUGIN_SUBSYSTEM, "<-- usn_close\n");
+}
+
+/*
+ * usn_preop_delete -- set params to turn the entry to tombstone
+ */
+static int
+usn_preop_delete(Slapi_PBlock *pb)
+{
+ int rc = 0;
+ CSN *csn = NULL;
+ CSN *orig_csn = NULL;
+ Slapi_Operation *op = NULL;
+
+ slapi_log_error(SLAPI_LOG_TRACE, USN_PLUGIN_SUBSYSTEM,
+ "--> usn_preop_delete\n");
+
+ slapi_pblock_get(pb, SLAPI_OPERATION, &op);
+ orig_csn = operation_get_csn(op);
+
+ if (NULL == orig_csn) {
+ /*
+ * No other plugins hasn't set csn yet, so let's set USN's csn.
+ * If other plugin overrides csn and replica_attr_handler, that's fine.
+ */
+ rc = csngen_new_csn(_usn_csngen, &csn, PR_FALSE /* notify */);
+ if (CSN_SUCCESS != rc) {
+ slapi_log_error(SLAPI_LOG_FATAL, USN_PLUGIN_SUBSYSTEM,
+ "usn_preop_delete: csngen_new failed (%d)\n", rc);
+ goto bail;
+ }
+ operation_set_csn(op, csn);
+ slapi_operation_set_replica_attr_handler(op, (void *)usn_get_attr);
+ }
+bail:
+ slapi_log_error(SLAPI_LOG_TRACE, USN_PLUGIN_SUBSYSTEM,
+ "<-- usn_preop_delete\n");
+
+ return rc;
+}
+
+#define KEEP_PREV_USN 1
+
+static void
+_usn_add_next_usn(Slapi_Entry *e, Slapi_Backend *be, int flags)
+{
+ struct berval usn_berval = {0};
+ Slapi_Attr* attr = NULL;
+
+ if (NULL == be->be_usn_counter) {
+ /* USN plugin is not enabled */
+ return;
+ }
+
+ slapi_log_error(SLAPI_LOG_TRACE, USN_PLUGIN_SUBSYSTEM,
+ "--> _usn_add_next_usn\n");
+
+ /* add next USN to the entry; "be" contains the usn counter */
+ usn_berval.bv_val = slapi_ch_smprintf("%" NSPRIu64,
+ slapi_counter_get_value(be->be_usn_counter));
+ usn_berval.bv_len = strlen(usn_berval.bv_val);
+ slapi_entry_attr_find(e, SLAPI_ATTR_ENTRYUSN, &attr);
+ if (NULL == attr) { /* ENTRYUSN does not exist; add it */
+ Slapi_Value *usn_value = slapi_value_new_berval(&usn_berval);
+ slapi_entry_add_value(e, SLAPI_ATTR_ENTRYUSN, usn_value);
+ slapi_value_free(&usn_value);
+ } else { /* ENTRYUSN exists; replace it */
+ struct berval *new_bvals[2];
+ struct berval **prev_values = NULL;
+ if (KEEP_PREV_USN == flags) {
+ if (0 == slapi_attr_get_bervals_copy(attr, &prev_values)) {
+ slapi_entry_add_values(e,
+ SLAPI_ATTR_ENTRYUSN_PREV, prev_values);
+ ber_bvecfree(prev_values);
+ }
+ }
+ new_bvals[0] = &usn_berval;
+ new_bvals[1] = NULL;
+ slapi_entry_attr_replace(e, SLAPI_ATTR_ENTRYUSN, new_bvals);
+ }
+ slapi_ch_free_string(&usn_berval.bv_val);
+
+ slapi_log_error(SLAPI_LOG_TRACE, USN_PLUGIN_SUBSYSTEM,
+ "<-- _usn_add_next_usn\n");
+
+ return;
+}
+
+static int
+_usn_mod_next_usn(LDAPMod ***mods, Slapi_Backend *be)
+{
+ Slapi_Mods smods = {0};
+ struct berval *bvals[2];
+ struct berval usn_berval = {0};
+ char counter_buf[USN_COUNTER_BUF_LEN];
+
+ if (NULL == be->be_usn_counter) {
+ /* USN plugin is not enabled */
+ return LDAP_UNWILLING_TO_PERFORM;
+ }
+
+ slapi_log_error(SLAPI_LOG_TRACE, USN_PLUGIN_SUBSYSTEM,
+ "--> _usn_mod_next_usn\n");
+
+ /* add next USN to the mods; "be" contains the usn counter */
+ usn_berval.bv_val = counter_buf;
+ PR_snprintf(usn_berval.bv_val, USN_COUNTER_BUF_LEN, "%" NSPRIu64,
+ slapi_counter_get_value(be->be_usn_counter));
+ usn_berval.bv_len = strlen(usn_berval.bv_val);
+ bvals[0] = &usn_berval;
+ bvals[1] = NULL;
+
+ slapi_mods_init_byref(&smods, *mods);
+ /* bvals is duplicated by ber_bvdup in slapi_mods_add_modbvps */
+ slapi_mods_add_modbvps(&smods, LDAP_MOD_REPLACE | LDAP_MOD_BVALUES,
+ SLAPI_ATTR_ENTRYUSN, bvals);
+
+ *mods = slapi_mods_get_ldapmods_byref(&smods);
+
+ slapi_log_error(SLAPI_LOG_TRACE, USN_PLUGIN_SUBSYSTEM,
+ "<-- _usn_mod_next_usn\n");
+ return LDAP_SUCCESS;
+}
+
+/*
+ * usn_bepreop_add - add next USN to the entry to be added
+ */
+static int
+usn_bepreop_add(Slapi_PBlock *pb)
+{
+ Slapi_Entry *e = NULL;
+ Slapi_Backend *be = NULL;
+ int rc = LDAP_SUCCESS;
+
+ slapi_log_error(SLAPI_LOG_TRACE, USN_PLUGIN_SUBSYSTEM,
+ "--> usn_bepreop_add\n");
+
+ /* add next USN to the entry; "be" contains the usn counter */
+ slapi_pblock_get(pb, SLAPI_ADD_ENTRY, &e);
+ if (NULL == e) {
+ rc = LDAP_NO_SUCH_OBJECT;
+ goto bail;
+ }
+ slapi_pblock_get(pb, SLAPI_BACKEND, &be);
+ if (NULL == be) {
+ rc = LDAP_PARAM_ERROR;
+ goto bail;
+ }
+ _usn_add_next_usn(e, be, 0);
+bail:
+ slapi_log_error(SLAPI_LOG_TRACE, USN_PLUGIN_SUBSYSTEM,
+ "<-- usn_bepreop_add\n");
+ return rc;
+}
+
+/*
+ * usn_bepreop_delete -- add/replace next USN to the entry
+ * bepreop_delete is not called if the entry is tombstone
+ */
+static int
+usn_bepreop_delete(Slapi_PBlock *pb)
+{
+ Slapi_Entry *e = NULL;
+ Slapi_Backend *be = NULL;
+ int rc = LDAP_SUCCESS;
+
+ slapi_log_error(SLAPI_LOG_TRACE, USN_PLUGIN_SUBSYSTEM,
+ "--> usn_bepreop_delete\n");
+
+ /* add next USN to the entry; "be" contains the usn counter */
+ slapi_pblock_get(pb, SLAPI_DELETE_BEPREOP_ENTRY, &e);
+ if (NULL == e) {
+ rc = LDAP_NO_SUCH_OBJECT;
+ goto bail;
+ }
+ slapi_pblock_get(pb, SLAPI_BACKEND, &be);
+ if (NULL == be) {
+ rc = LDAP_PARAM_ERROR;
+ goto bail;
+ }
+ if (e->e_flags & SLAPI_ENTRY_FLAG_TOMBSTONE) {
+ Slapi_Operation *op = NULL;
+ slapi_pblock_get(pb, SLAPI_OPERATION, &op);
+ slapi_operation_set_flag(op, OP_FLAG_TOMBSTONE_ENTRY);
+ } else {
+ _usn_add_next_usn(e, be, KEEP_PREV_USN);
+ }
+bail:
+ slapi_log_error(SLAPI_LOG_TRACE, USN_PLUGIN_SUBSYSTEM,
+ "<-- usn_bepreop_delete\n");
+ return rc;
+}
+
+/*
+ * usn_bepreop_modify - add/replace next USN to the mods;
+ * shared by modify and modrdn
+ */
+static int
+usn_bepreop_modify (Slapi_PBlock *pb)
+{
+ LDAPMod **mods = NULL;
+ Slapi_Backend *be = NULL;
+ int rc = LDAP_SUCCESS;
+
+ slapi_log_error(SLAPI_LOG_TRACE, USN_PLUGIN_SUBSYSTEM,
+ "--> usn_bepreop_modify\n");
+
+ /* add/replace next USN to the mods; "be" contains the usn counter */
+ slapi_pblock_get(pb, SLAPI_MODIFY_MODS, &mods);
+ slapi_pblock_get(pb, SLAPI_BACKEND, &be);
+ if (NULL == be) {
+ rc = LDAP_PARAM_ERROR;
+ goto bail;
+ }
+ if (LDAP_SUCCESS == _usn_mod_next_usn(&mods, be)) {
+ slapi_pblock_set(pb, SLAPI_MODIFY_MODS, mods);
+ }
+bail:
+ slapi_log_error(SLAPI_LOG_TRACE, USN_PLUGIN_SUBSYSTEM,
+ "<-- usn_bepreop_modify\n");
+ return rc;
+}
+
+/* count up the counter */
+static int
+usn_bepostop (Slapi_PBlock *pb)
+{
+ int rc = -1;
+ Slapi_Backend *be = NULL;
+
+ slapi_log_error(SLAPI_LOG_TRACE, USN_PLUGIN_SUBSYSTEM,
+ "--> usn_bepostop\n");
+
+ /* if op is not successful, don't increment the counter */
+ slapi_pblock_get(pb, SLAPI_RESULT_CODE, &rc);
+ if (LDAP_SUCCESS != rc) {
+ goto bail;
+ }
+
+ slapi_pblock_get(pb, SLAPI_BACKEND, &be);
+ if (NULL == be) {
+ rc = LDAP_PARAM_ERROR;
+ goto bail;
+ }
+
+ if (be->be_usn_counter) {
+ slapi_counter_increment(be->be_usn_counter);
+ }
+bail:
+ slapi_log_error(SLAPI_LOG_TRACE, USN_PLUGIN_SUBSYSTEM,
+ "<-- usn_bepostop\n");
+ return rc;
+}
+
+/* mimic replication to turn on create_tombstone_entry */
+static int
+usn_get_attr(Slapi_PBlock *pb, const char* type, void *value)
+{
+ if (0 == strcasecmp(type, "nsds5ReplicaTombstonePurgeInterval")) {
+ *(int *)value = 1;
+ } else {
+ *(int *)value = 0;
+ }
+}
+
+void
+usn_set_identity(void *identity)
+{
+ _usn_identity = identity;
+}
+
+void *
+usn_get_identity()
+{
+ return _usn_identity;
+}
+
+/*
+ * usn_rootdse_search -- callback for the search on root DSN
+ * add lastusn value per backend
+ *
+ * example:
+ * ldapsearch -b "" -s base "(objectclass=*)" lastusn
+ * dn:
+ * lastusn;userroot: 72
+ * lastusn;testbackend: 15
+ */
+static int
+usn_rootdse_search(Slapi_PBlock *pb, Slapi_Entry* e, Slapi_Entry* entryAfter,
+ int *returncode, char *returntext, void *arg)
+{
+ char *cookie = NULL;
+ Slapi_Backend *be;
+ struct berval *vals[2];
+ struct berval usn_berval;
+ vals[0] = &usn_berval;
+ vals[1] = NULL;
+ char counter_buf[USN_COUNTER_BUF_LEN];
+ int attr_len = 64; /* length of lastusn;<backend_name> */
+ char *attr = (char *)slapi_ch_malloc(attr_len);
+ char *attr_subp = NULL;
+
+ slapi_log_error(SLAPI_LOG_TRACE, USN_PLUGIN_SUBSYSTEM,
+ "--> usn_rootdse_search\n");
+
+ usn_berval.bv_val = counter_buf;
+ PR_snprintf(attr, USN_LAST_USN_ATTR_CORE_LEN+1, "%s;", USN_LAST_USN);
+ attr_subp = attr + USN_LAST_USN_ATTR_CORE_LEN;
+ for (be = slapi_get_first_backend(&cookie); be;
+ be = slapi_get_next_backend(cookie)) {
+ if (NULL == be->be_usn_counter) { /* no counter == not a db backend */
+ continue;
+ }
+ /* get a next USN counter from be_usn_counter; then minus 1 from it */
+ PR_snprintf(usn_berval.bv_val, USN_COUNTER_BUF_LEN, "%" NSPRIu64,
+ slapi_counter_get_value(be->be_usn_counter)-1);
+ usn_berval.bv_len = strlen(usn_berval.bv_val);
+
+ if (USN_LAST_USN_ATTR_CORE_LEN + strlen(be->be_name) + 1 > attr_len) {
+ attr_len *= 2;
+ attr = (char *)slapi_ch_realloc(attr, attr_len);
+ attr_subp = attr + USN_LAST_USN_ATTR_CORE_LEN;
+ }
+ PR_snprintf(attr_subp, attr_len - USN_LAST_USN_ATTR_CORE_LEN,
+ "%s", be->be_name);
+ slapi_entry_attr_replace(e, attr, vals);
+ }
+
+ slapi_ch_free_string(&cookie);
+ slapi_ch_free_string(&attr);
+
+ slapi_log_error(SLAPI_LOG_TRACE, USN_PLUGIN_SUBSYSTEM,
+ "<-- usn_rootdse_search\n");
+ return SLAPI_DSE_CALLBACK_OK;
+}
diff --git a/ldap/servers/plugins/usn/usn.h b/ldap/servers/plugins/usn/usn.h
new file mode 100644
index 000000000..cf0cd1842
--- /dev/null
+++ b/ldap/servers/plugins/usn/usn.h
@@ -0,0 +1,57 @@
+/** BEGIN COPYRIGHT BLOCK
+ * This Program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License as published by the Free Software
+ * Foundation; version 2 of the License.
+ *
+ * This Program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this Program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * In addition, as a special exception, Red Hat, Inc. gives You the additional
+ * right to link the code of this Program with code not covered under the GNU
+ * General Public License ("Non-GPL Code") and to distribute linked combinations
+ * including the two, subject to the limitations in this paragraph. Non-GPL Code
+ * permitted under this exception must only link to the code of this Program
+ * through those well defined interfaces identified in the file named EXCEPTION
+ * found in the source code files (the "Approved Interfaces"). The files of
+ * Non-GPL Code may instantiate templates or use macros or inline functions from
+ * the Approved Interfaces without causing the resulting work to be covered by
+ * the GNU General Public License. Only Red Hat, Inc. may make changes or
+ * additions to the list of Approved Interfaces. You must obey the GNU General
+ * Public License in all respects for all of the Program code and other code used
+ * in conjunction with the Program except the Non-GPL Code covered by this
+ * exception. If you modify this file, you may extend this exception to your
+ * version of the file, but you are not obligated to do so. If you do not wish to
+ * provide this exception without modification, you must delete this exception
+ * statement from your version and license this file solely under the GPL without
+ * exception.
+ *
+ *
+ * Copyright (C) 2009 Red Hat, Inc.
+ * All rights reserved.
+ * END COPYRIGHT BLOCK **/
+
+#include <string.h>
+#include "slap.h"
+#include "slapi-plugin.h"
+
+#define USN_PLUGIN_SUBSYSTEM "usn-plugin"
+
+#define USN_CSNGEN_ID 65535
+
+#define USN_LAST_USN "lastusn"
+#define USN_LAST_USN_ATTR_CORE_LEN 8 /* lastusn; */
+
+#define USN_COUNTER_BUF_LEN 32 /* enough size for 64 bit inteters */
+
+/* usn.c */
+void usn_set_identity(void *identity);
+void *usn_get_identity();
+
+/* usn_cleanup.c */
+int usn_cleanup_start(Slapi_PBlock *pb);
+
diff --git a/ldap/servers/plugins/usn/usn_cleanup.c b/ldap/servers/plugins/usn/usn_cleanup.c
new file mode 100644
index 000000000..bf13073a5
--- /dev/null
+++ b/ldap/servers/plugins/usn/usn_cleanup.c
@@ -0,0 +1,326 @@
+/** BEGIN COPYRIGHT BLOCK
+ * This Program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License as published by the Free Software
+ * Foundation; version 2 of the License.
+ *
+ * This Program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this Program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * In addition, as a special exception, Red Hat, Inc. gives You the additional
+ * right to link the code of this Program with code not covered under the GNU
+ * General Public License ("Non-GPL Code") and to distribute linked combinations
+ * including the two, subject to the limitations in this paragraph. Non-GPL Code
+ * permitted under this exception must only link to the code of this Program
+ * through those well defined interfaces identified in the file named EXCEPTION
+ * found in the source code files (the "Approved Interfaces"). The files of
+ * Non-GPL Code may instantiate templates or use macros or inline functions from
+ * the Approved Interfaces without causing the resulting work to be covered by
+ * the GNU General Public License. Only Red Hat, Inc. may make changes or
+ * additions to the list of Approved Interfaces. You must obey the GNU General
+ * Public License in all respects for all of the Program code and other code used
+ * in conjunction with the Program except the Non-GPL Code covered by this
+ * exception. If you modify this file, you may extend this exception to your
+ * version of the file, but you are not obligated to do so. If you do not wish to
+ * provide this exception without modification, you must delete this exception
+ * statement from your version and license this file solely under the GPL without
+ * exception.
+ *
+ *
+ * Copyright (C) 2009 Red Hat, Inc.
+ * All rights reserved.
+ * END COPYRIGHT BLOCK **/
+
+#ifdef HAVE_CONFIG_H
+# include <config.h>
+#endif
+
+#include "usn.h"
+
+struct usn_cleanup_data {
+ char *suffix;
+ char *maxusn_to_delete;
+};
+
+
+static int usn_cleanup_add(Slapi_PBlock *pb, Slapi_Entry *e,
+ Slapi_Entry *eAfter, int *returncode, char *returntext, void *arg);
+
+int
+usn_cleanup_start(Slapi_PBlock *pb)
+{
+ int rc = slapi_task_register_handler("USN tombstone cleanup task",
+ usn_cleanup_add);
+ return rc;
+}
+
+/*
+ * Task thread
+ */
+static void
+usn_cleanup_thread(void *arg)
+{
+ Slapi_Task *task = (Slapi_Task *)arg;
+ int rv = 0;
+ int total_work = 2;
+ /* fetch our argument from the task */
+ struct usn_cleanup_data *cleanup_data =
+ (struct usn_cleanup_data*)slapi_task_get_data(task);
+ Slapi_PBlock *search_pb = NULL;
+ Slapi_Entry **entries = NULL, **ep = NULL;
+ Slapi_PBlock *delete_pb = NULL;
+ char *filter = "objectclass=nsTombstone";
+
+ slapi_log_error(SLAPI_LOG_TRACE, USN_PLUGIN_SUBSYSTEM,
+ "--> usn_cleanup_thread\n");
+
+ if (NULL == usn_get_identity()) { /* plugin is not initialized */
+ slapi_task_log_notice(task, "USN plugin is not initialized\n");
+ slapi_log_error(SLAPI_LOG_FATAL, USN_PLUGIN_SUBSYSTEM,
+ "USN tombstone cleanup: USN plugin is not initialized\n");
+ rv = -1;
+ goto bail;
+ }
+
+ /* update task state to show it's running */
+ slapi_task_begin(task, total_work);
+ if (cleanup_data->maxusn_to_delete) {
+ /* (&(objectclass=nsTombstone)(entryusn<=maxusn_to_delete)) */
+ int filter_len =
+ strlen(filter) + strlen(cleanup_data->maxusn_to_delete) + 32;
+ filter = (char *)slapi_ch_malloc(filter_len);
+ PR_snprintf(filter, filter_len,
+ "(&(objectclass=nsTombstone)(entryusn<=%s))",
+ cleanup_data->maxusn_to_delete);
+ }
+
+ search_pb = slapi_pblock_new();
+ slapi_search_internal_set_pb(search_pb, cleanup_data->suffix,
+ LDAP_SCOPE_SUBTREE, filter,
+ NULL, 0, NULL, NULL, usn_get_identity(), 0);
+ slapi_search_internal_pb(search_pb);
+ slapi_pblock_get(search_pb, SLAPI_PLUGIN_INTOP_RESULT, &rv);
+ if (LDAP_NO_SUCH_OBJECT == rv) {
+ slapi_task_log_notice(task,
+ "USN tombstone cleanup: no such suffix %s.\n",
+ cleanup_data->suffix);
+ slapi_task_log_status(task,
+ "USN tombstone cleanup: no such suffix %s.\n",
+ cleanup_data->suffix);
+ slapi_log_error(SLAPI_LOG_FATAL, USN_PLUGIN_SUBSYSTEM,
+ "USN tombstone cleanup: no such suffix %s.\n",
+ cleanup_data->suffix);
+ goto bail;
+ } else if (LDAP_SUCCESS != rv) {
+ slapi_task_log_notice(task,
+ "USN tombstone cleanup: searching tombstone entries "
+ "in %s failed; (%d).\n", cleanup_data->suffix, rv);
+ slapi_task_log_status(task,
+ "USN tombstone cleanup: searching tombstone entries in "
+ "%s failed; (%d).\n", cleanup_data->suffix, rv);
+ slapi_log_error(SLAPI_LOG_FATAL, USN_PLUGIN_SUBSYSTEM,
+ "USN tombstone cleanup: searching tombstone entries in "
+ "%s failed; (%d).\n", cleanup_data->suffix, rv);
+ goto bail;
+ }
+
+ slapi_task_log_notice(task,
+ "USN tombstone cleanup task starts (suffix: %s) ...\n",
+ cleanup_data->suffix);
+ slapi_log_error(SLAPI_LOG_FATAL, USN_PLUGIN_SUBSYSTEM,
+ "USN tombstone cleanup task starts (suffix: %s) ...\n",
+ cleanup_data->suffix);
+
+ slapi_pblock_get(search_pb, SLAPI_PLUGIN_INTOP_SEARCH_ENTRIES, &entries);
+
+ delete_pb = slapi_pblock_new();
+ for (ep = entries; ep && *ep; ep++) {
+ int delrv = 0;
+ const Slapi_DN *sdn = slapi_entry_get_sdn_const(*ep);
+
+ slapi_delete_internal_set_pb(delete_pb, slapi_sdn_get_dn(sdn),
+ NULL, NULL, usn_get_identity(), 0);
+ slapi_delete_internal_pb(delete_pb);
+ slapi_pblock_get(delete_pb, SLAPI_PLUGIN_INTOP_RESULT, &delrv);
+ if (LDAP_SUCCESS != delrv) {
+ slapi_task_log_notice(task,
+ "USN tombstone cleanup: deleting %s failed; (%d).\n",
+ slapi_sdn_get_dn(sdn), delrv);
+ slapi_task_log_status(task,
+ "USN tombstone cleanup: deleting %s failed; (%d).\n",
+ slapi_sdn_get_dn(sdn), delrv);
+ slapi_log_error(SLAPI_LOG_FATAL, USN_PLUGIN_SUBSYSTEM,
+ "USN tombstone cleanup: deleting %s failed; (%d).\n",
+ slapi_sdn_get_dn(sdn), delrv);
+ rv = delrv;
+ }
+
+ slapi_pblock_init(delete_pb);
+ slapi_task_inc_progress(task);
+ }
+ slapi_task_log_notice(task, "USN tombstone cleanup task finished.");
+ slapi_task_log_status(task, "USN tombstone cleanup task finished.");
+ slapi_log_error(SLAPI_LOG_FATAL, USN_PLUGIN_SUBSYSTEM,
+ "USN tombstone cleanup task finished.\n");
+bail:
+ slapi_free_search_results_internal(search_pb);
+ slapi_pblock_destroy(search_pb);
+ slapi_pblock_destroy(delete_pb);
+ if (cleanup_data->maxusn_to_delete) {
+ slapi_ch_free_string(&filter);
+ }
+ slapi_ch_free_string(&cleanup_data->maxusn_to_delete);
+ slapi_ch_free_string(&cleanup_data->suffix);
+ slapi_ch_free((void **)&cleanup_data);
+
+ /* this will queue the destruction of the task */
+ slapi_task_finish(task, rv);
+
+ slapi_log_error(SLAPI_LOG_TRACE, USN_PLUGIN_SUBSYSTEM,
+ "<-- usn_cleanup_thread\n");
+}
+
+#define MAPPING_TREE_BASE_DN "cn=mapping tree,cn=config"
+
+static int
+_usn_cleanup_is_mmr_enabled(const char *suffix)
+{
+ Slapi_PBlock *search_pb = NULL;
+ Slapi_Entry **entries = NULL;
+ char *base_dn = NULL;
+ int rc = 0; /* disabled, by default */
+
+ base_dn = slapi_ch_smprintf("cn=replica,cn=\"%s\",%s",
+ suffix, MAPPING_TREE_BASE_DN);
+ search_pb = slapi_pblock_new();
+ slapi_search_internal_set_pb(search_pb, base_dn, LDAP_SCOPE_ONELEVEL,
+ "objectclass=nsDS5ReplicationAgreement",
+ NULL, 0, NULL, NULL, usn_get_identity(), 0);
+ slapi_search_internal_pb(search_pb);
+ slapi_pblock_get(search_pb, SLAPI_PLUGIN_INTOP_RESULT, &rc);
+ if (LDAP_SUCCESS != rc) { /* agreement is not available */
+ goto bail;
+ }
+ slapi_pblock_get(search_pb, SLAPI_PLUGIN_INTOP_SEARCH_ENTRIES, &entries);
+ if (entries && *entries) {
+ rc = 1; /* At least one agreement on the suffix is found */
+ }
+bail:
+ slapi_free_search_results_internal(search_pb);
+ slapi_pblock_destroy(search_pb);
+ slapi_ch_free_string(&base_dn);
+
+ return rc;
+}
+
+static int
+usn_cleanup_add(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eAfter,
+ int *returncode, char *returntext, void *arg)
+{
+ PRThread *thread = NULL;
+ char *cn = NULL;
+ char *suffix = NULL;
+ char *backend = NULL;
+ char *maxusn = NULL;
+ struct usn_cleanup_data *cleanup_data = NULL;
+ int rv = SLAPI_DSE_CALLBACK_OK;
+ Slapi_Task *task = NULL;
+ Slapi_Backend *be = NULL;
+ const Slapi_DN *be_suffix = NULL;
+
+ slapi_log_error(SLAPI_LOG_TRACE, USN_PLUGIN_SUBSYSTEM,
+ "--> usn_cleanup_add\n");
+
+ *returncode = LDAP_SUCCESS;
+ cn = slapi_entry_attr_get_charptr(e, "cn");
+ if (NULL == cn) {
+ *returncode = LDAP_OBJECT_CLASS_VIOLATION;
+ rv = SLAPI_DSE_CALLBACK_ERROR;
+ goto bail;
+ }
+
+ /* get args */
+ suffix = slapi_entry_attr_get_charptr(e, "suffix");
+ backend = slapi_entry_attr_get_charptr(e, "backend");
+ maxusn = slapi_entry_attr_get_charptr(e, "maxusn_to_delete");
+
+ if (NULL == suffix && NULL == backend) {
+ slapi_log_error(SLAPI_LOG_FATAL, USN_PLUGIN_SUBSYSTEM,
+ "USN tombstone cleanup: Both suffix and backend are missing.\n");
+ *returncode = LDAP_PARAM_ERROR;
+ rv = SLAPI_DSE_CALLBACK_ERROR;
+ goto bail;
+ }
+
+ /* suffix is not given, but backend is; get the suffix */
+ if (NULL == suffix && NULL != backend) {
+ be = slapi_be_select_by_instance_name(backend);
+ be_suffix = slapi_be_getsuffix(be, 0);
+ if (be_suffix) {
+ suffix = slapi_ch_strdup(slapi_sdn_get_ndn(be_suffix));
+ } else {
+ slapi_log_error(SLAPI_LOG_FATAL, USN_PLUGIN_SUBSYSTEM,
+ "USN tombstone cleanup: Backend %s is invalid.\n", backend);
+ *returncode = LDAP_PARAM_ERROR;
+ rv = SLAPI_DSE_CALLBACK_ERROR;
+ goto bail;
+ }
+ }
+
+ /* The suffix is the target of replication,
+ * we don't want to clean up tombstones used by MMR */
+ if (_usn_cleanup_is_mmr_enabled(suffix)) {
+ slapi_log_error(SLAPI_LOG_FATAL, USN_PLUGIN_SUBSYSTEM,
+ "USN tombstone cleanup: Suffix %s is replicated. Unwilling to "
+ "perform cleaning up tombstones.\n", suffix);
+ *returncode = LDAP_UNWILLING_TO_PERFORM;
+ rv = SLAPI_DSE_CALLBACK_ERROR;
+ goto bail;
+ }
+
+ cleanup_data =
+ (struct usn_cleanup_data *)slapi_ch_malloc(sizeof(struct usn_cleanup_data));
+ cleanup_data->suffix = slapi_ch_strdup(suffix);
+ cleanup_data->maxusn_to_delete = slapi_ch_strdup(maxusn);
+
+ /* allocate new task now */
+ task = slapi_new_task(slapi_entry_get_ndn(e));
+ if (task == NULL) {
+ slapi_log_error(SLAPI_LOG_FATAL, USN_PLUGIN_SUBSYSTEM,
+ "USN tombstone cleanup: unable to allocate new task.\n");
+ *returncode = LDAP_OPERATIONS_ERROR;
+ rv = SLAPI_DSE_CALLBACK_ERROR;
+ goto bail;
+ }
+
+ /* Stash our argument in the task for use by the task thread */
+ slapi_task_set_data(task, cleanup_data);
+
+ /* start the USN tombstone cleanup task as a separate thread */
+ thread = PR_CreateThread(PR_USER_THREAD, usn_cleanup_thread,
+ (void *)task, PR_PRIORITY_NORMAL, PR_GLOBAL_THREAD,
+ PR_UNJOINABLE_THREAD, SLAPD_DEFAULT_THREAD_STACKSIZE);
+ if (thread == NULL) {
+ slapi_log_error(SLAPI_LOG_FATAL, USN_PLUGIN_SUBSYSTEM,
+ "USN tombstone cleanup: unable to create task thread.\n");
+ *returncode = LDAP_OPERATIONS_ERROR;
+ rv = SLAPI_DSE_CALLBACK_ERROR;
+ slapi_task_finish(task, *returncode);
+ } else {
+ /* thread successful */
+ rv = SLAPI_DSE_CALLBACK_OK;
+ }
+bail:
+ slapi_ch_free_string(&cn);
+ slapi_ch_free_string(&suffix);
+ slapi_ch_free_string(&backend);
+ slapi_ch_free_string(&maxusn);
+ slapi_log_error(SLAPI_LOG_TRACE, USN_PLUGIN_SUBSYSTEM,
+ "<-- usn_cleanup_add\n");
+ return rv;
+}
+
diff --git a/ldap/servers/slapd/back-ldbm/back-ldbm.h b/ldap/servers/slapd/back-ldbm/back-ldbm.h
index 1b6dda54f..3036bf7c2 100644
--- a/ldap/servers/slapd/back-ldbm/back-ldbm.h
+++ b/ldap/servers/slapd/back-ldbm/back-ldbm.h
@@ -739,6 +739,7 @@ typedef struct _back_search_result_set
#define LDBM_DNCOMP_OID "2.16.840.1.113730.3.1.603"
#define LDBM_PARENTID_OID "2.16.840.1.113730.3.1.604"
#define LDBM_ENTRYID_OID "2.16.840.1.113730.3.1.605"
+#define LDBM_ENTRYUSN_OID "2.16.840.1.113730.3.1.606"
/* Name of psuedo attribute used to track default indexes */
#define LDBM_PSEUDO_ATTR_DEFAULT ".default"
diff --git a/ldap/servers/slapd/back-ldbm/import.c b/ldap/servers/slapd/back-ldbm/import.c
index 57c208a8f..48e19c81d 100644
--- a/ldap/servers/slapd/back-ldbm/import.c
+++ b/ldap/servers/slapd/back-ldbm/import.c
@@ -1052,6 +1052,9 @@ static int import_all_done(ImportJob *job, int ret)
if (ret != 0)
return ret;
+ /* Reset USN slapi_counter with the last key of the entryUSN index */
+ ldbm_set_last_usn(inst->inst_be);
+
/* bring backend online again */
slapi_mtn_be_enable(inst->inst_be);
}
diff --git a/ldap/servers/slapd/back-ldbm/init.c b/ldap/servers/slapd/back-ldbm/init.c
index c32f9825a..be9c114dc 100644
--- a/ldap/servers/slapd/back-ldbm/init.c
+++ b/ldap/servers/slapd/back-ldbm/init.c
@@ -84,6 +84,10 @@ ldbm_back_add_schema( Slapi_PBlock *pb )
LDBM_ENTRYID_OID, DIRSTRING_SYNTAX_OID, CASEIGNOREMATCH_NAME,
SLAPI_ATTR_FLAG_SINGLE );
+ rc |= add_ldbm_internal_attr_syntax( "entryusn",
+ LDBM_ENTRYUSN_OID, INTEGER_SYNTAX_OID, INTFIRSTCOMPMATCH_NAME,
+ SLAPI_ATTR_FLAG_SINGLE|SLAPI_ATTR_FLAG_NOUSERMOD );
+
return rc;
}
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_add.c b/ldap/servers/slapd/back-ldbm/ldbm_add.c
index b9f573a72..fa53e05fb 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_add.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_add.c
@@ -159,6 +159,7 @@ ldbm_back_add( Slapi_PBlock *pb )
operation->o_status = SLAPI_OP_STATUS_WILL_COMPLETE;
}
if ( slapi_op_abandoned( pb ) ) {
+ ldap_result_code = -1; /* needs to distinguish from "success" */
goto error_return;
}
@@ -847,6 +848,8 @@ common_return:
{
cache_return( &inst->inst_cache, &addingentry );
}
+ /* bepost op needs to know this result */
+ slapi_pblock_set(pb, SLAPI_RESULT_CODE, &ldap_result_code);
/* JCMREPL - The bepostop is called even if the operation fails. */
plugin_call_plugins (pb, SLAPI_PLUGIN_BE_POST_ADD_FN);
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_delete.c b/ldap/servers/slapd/back-ldbm/ldbm_delete.c
index fe02b2075..74d8de8ee 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_delete.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_delete.c
@@ -82,6 +82,9 @@ ldbm_back_delete( Slapi_PBlock *pb )
int tombstone_in_cache = 0;
entry_address *addr;
int addordel_flags = 0; /* passed to index_addordel */
+ char *entryusn_str = NULL;
+ char *prev_entryusn_str = NULL;
+ Slapi_Entry *orig_entry = NULL;
slapi_pblock_get( pb, SLAPI_BACKEND, &be);
slapi_pblock_get( pb, SLAPI_PLUGIN_PRIVATE, &li );
@@ -133,6 +136,23 @@ ldbm_back_delete( Slapi_PBlock *pb )
goto error_return;
}
+ /* find and lock the entry we are about to modify */
+ if ( (e = find_entry2modify( pb, be, addr, NULL )) == NULL )
+ {
+ ldap_result_code= -1;
+ goto error_return; /* error result sent by find_entry2modify() */
+ }
+
+ if ( slapi_entry_has_children( e->ep_entry ) )
+ {
+ ldap_result_code= LDAP_NOT_ALLOWED_ON_NONLEAF;
+ goto error_return;
+ }
+
+ /* set entry in case be-preop plugins need to work on it (e.g., USN) */
+ slapi_pblock_get( pb, SLAPI_DELETE_BEPREOP_ENTRY, &orig_entry );
+ slapi_pblock_set( pb, SLAPI_DELETE_BEPREOP_ENTRY, e->ep_entry );
+
/* Don't call pre-op for Tombstone entries */
if (!delete_tombstone_entry)
{
@@ -141,7 +161,8 @@ ldbm_back_delete( Slapi_PBlock *pb )
* backend pre-op plugin. To ensure a consistent snapshot of this state
* we wrap the reading of the entry with the dblock.
*/
- ldap_result_code= get_copy_of_entry(pb, addr, &txn, SLAPI_DELETE_EXISTING_ENTRY, !is_replicated_operation);
+ ldap_result_code= get_copy_of_entry(pb, addr, &txn,
+ SLAPI_DELETE_EXISTING_ENTRY, !is_replicated_operation);
slapi_pblock_set(pb, SLAPI_RESULT_CODE, &ldap_result_code);
if(plugin_call_plugins(pb, SLAPI_PLUGIN_BE_PRE_DELETE_FN)==-1)
{
@@ -152,21 +173,12 @@ ldbm_back_delete( Slapi_PBlock *pb )
slapi_pblock_get(pb, SLAPI_RESULT_CODE, &ldap_result_code);
goto error_return;
}
- }
-
-
- /* find and lock the entry we are about to modify */
- if ( (e = find_entry2modify( pb, be, addr, NULL )) == NULL )
- {
- ldap_result_code= -1;
- goto error_return; /* error result sent by find_entry2modify() */
+ /* the flag could be set in a preop plugin (e.g., USN) */
+ delete_tombstone_entry = operation_is_flag_set(operation,
+ OP_FLAG_TOMBSTONE_ENTRY);
}
- if ( slapi_entry_has_children( e->ep_entry ) )
- {
- ldap_result_code= LDAP_NOT_ALLOWED_ON_NONLEAF;
- goto error_return;
- }
+ slapi_pblock_set( pb, SLAPI_DELETE_BEPREOP_ENTRY, orig_entry );
/*
* Sanity check to avoid to delete a non-tombstone or to tombstone again
@@ -196,8 +208,12 @@ ldbm_back_delete( Slapi_PBlock *pb )
opcsn = operation_get_csn (operation);
if (!delete_tombstone_entry)
{
- if (opcsn == NULL && !is_fixup_operation && operation->o_csngen_handler)
+ /* If both USN and replication is enabled, csn set by replication
+ * should be honored. */
+ if ((opcsn == NULL || ldbm_usn_enabled(be)) &&
+ !is_fixup_operation && operation->o_csngen_handler)
{
+ csn_free(&opcsn); /* free opcsn set by USN plugin, if any */
/*
* Current op is a user request. Opcsn will be assigned
* by entry_assign_operation_csn() if the dn is in an
@@ -338,6 +354,20 @@ ldbm_back_delete( Slapi_PBlock *pb )
slapi_entry_add_value(tombstone->ep_entry, SLAPI_ATTR_OBJECTCLASS, tomb_value);
slapi_value_free(&tomb_value);
+ /* retrieve previous entry usn value, if any */
+ prev_entryusn_str = slapi_entry_attr_get_charptr(tombstone->ep_entry,
+ SLAPI_ATTR_ENTRYUSN_PREV);
+ if (prev_entryusn_str) {
+ /* discard the previous value from the tombstone entry */
+ retval = slapi_entry_delete_string(tombstone->ep_entry,
+ SLAPI_ATTR_ENTRYUSN_PREV, prev_entryusn_str);
+ if (0 != retval) {
+ LDAPDebug( LDAP_DEBUG_TRACE,
+ "delete (deleting %s) failed, err=%d\n",
+ SLAPI_ATTR_ENTRYUSN, retval, 0) ;
+ }
+ }
+
/* XXXggood above used to be: slapi_entry_add_string(tombstone->ep_entry, SLAPI_ATTR_OBJECTCLASS, SLAPI_ATTR_VALUE_TOMBSTONE); */
/* JCMREPL - Add a description of what's going on? */
}
@@ -432,45 +462,110 @@ ldbm_back_delete( Slapi_PBlock *pb )
* above, but we want it to remain in the nsUniqueID and nscpEntryDN indexes
* and for objectclass=tombstone.
*/
- retval = index_addordel_string(be,SLAPI_ATTR_OBJECTCLASS,SLAPI_ATTR_VALUE_TOMBSTONE,tombstone->ep_id,BE_INDEX_ADD,&txn);
+ retval = index_addordel_string(be, SLAPI_ATTR_OBJECTCLASS,
+ SLAPI_ATTR_VALUE_TOMBSTONE,
+ tombstone->ep_id,BE_INDEX_ADD, &txn);
if (DB_LOCK_DEADLOCK == retval) {
- LDAPDebug( LDAP_DEBUG_ARGS, "delete 4 DB_LOCK_DEADLOCK\n", 0, 0, 0 );
+ LDAPDebug( LDAP_DEBUG_ARGS,
+ "delete (adding %s) DB_LOCK_DEADLOCK\n",
+ SLAPI_ATTR_VALUE_TOMBSTONE, 0, 0 );
/* Retry txn */
continue;
}
if (0 != retval) {
- LDAPDebug( LDAP_DEBUG_TRACE, "delete 1 BAD, err=%d %s\n",
- retval, (msg = dblayer_strerror( retval )) ? msg : "", 0 );
+ LDAPDebug( LDAP_DEBUG_TRACE,
+ "delete (adding %s) failed, err=%d %s\n",
+ SLAPI_ATTR_VALUE_TOMBSTONE, retval,
+ (msg = dblayer_strerror( retval )) ? msg : "" );
if (LDBM_OS_ERR_IS_DISKFULL(retval)) disk_full = 1;
- ldap_result_code= LDAP_OPERATIONS_ERROR;
+ ldap_result_code= LDAP_OPERATIONS_ERROR;
goto error_return;
}
- retval = index_addordel_string(be,SLAPI_ATTR_UNIQUEID,slapi_entry_get_uniqueid(tombstone->ep_entry),tombstone->ep_id,BE_INDEX_ADD,&txn);
+ retval = index_addordel_string(be, SLAPI_ATTR_UNIQUEID,
+ slapi_entry_get_uniqueid(tombstone->ep_entry),
+ tombstone->ep_id,BE_INDEX_ADD,&txn);
if (DB_LOCK_DEADLOCK == retval) {
- LDAPDebug( LDAP_DEBUG_ARGS, "delete 5 DB_LOCK_DEADLOCK\n", 0, 0, 0 );
+ LDAPDebug( LDAP_DEBUG_ARGS,
+ "delete (adding %s) DB_LOCK_DEADLOCK\n",
+ SLAPI_ATTR_UNIQUEID, 0, 0 );
/* Retry txn */
continue;
}
if (0 != retval) {
- LDAPDebug( LDAP_DEBUG_TRACE, "delete 2 BAD, err=%d %s\n",
- retval, (msg = dblayer_strerror( retval )) ? msg : "", 0 );
+ LDAPDebug( LDAP_DEBUG_TRACE,
+ "delete (adding %s) failed, err=%d %s\n",
+ SLAPI_ATTR_UNIQUEID, retval,
+ (msg = dblayer_strerror( retval )) ? msg : "" );
if (LDBM_OS_ERR_IS_DISKFULL(retval)) disk_full = 1;
- ldap_result_code= LDAP_OPERATIONS_ERROR;
+ ldap_result_code= LDAP_OPERATIONS_ERROR;
goto error_return;
}
- retval = index_addordel_string(be,SLAPI_ATTR_NSCP_ENTRYDN, slapi_sdn_get_ndn(nscpEntrySDN),tombstone->ep_id,BE_INDEX_ADD,&txn);
+ retval = index_addordel_string(be, SLAPI_ATTR_NSCP_ENTRYDN,
+ slapi_sdn_get_ndn(nscpEntrySDN),
+ tombstone->ep_id, BE_INDEX_ADD, &txn);
if (DB_LOCK_DEADLOCK == retval) {
- LDAPDebug( LDAP_DEBUG_ARGS, "delete 6 DB_LOCK_DEADLOCK\n", 0, 0, 0 );
+ LDAPDebug( LDAP_DEBUG_ARGS,
+ "delete (adding %s) DB_LOCK_DEADLOCK\n",
+ SLAPI_ATTR_NSCP_ENTRYDN, 0, 0 );
/* Retry txn */
continue;
}
if (0 != retval) {
- LDAPDebug( LDAP_DEBUG_TRACE, "delete 3 BAD, err=%d %s\n",
- retval, (msg = dblayer_strerror( retval )) ? msg : "", 0 );
+ LDAPDebug( LDAP_DEBUG_TRACE,
+ "delete (adding %s) failed, err=%d %s\n",
+ SLAPI_ATTR_NSCP_ENTRYDN, retval,
+ (msg = dblayer_strerror( retval )) ? msg : "" );
if (LDBM_OS_ERR_IS_DISKFULL(retval)) disk_full = 1;
- ldap_result_code= LDAP_OPERATIONS_ERROR;
+ ldap_result_code= LDAP_OPERATIONS_ERROR;
goto error_return;
}
+ /* add a new usn to the entryusn index */
+ entryusn_str = slapi_entry_attr_get_charptr(tombstone->ep_entry,
+ SLAPI_ATTR_ENTRYUSN);
+ if (entryusn_str) {
+ retval = index_addordel_string(be, SLAPI_ATTR_ENTRYUSN,
+ entryusn_str, tombstone->ep_id, BE_INDEX_ADD, &txn);
+ slapi_ch_free_string(&entryusn_str);
+ if (DB_LOCK_DEADLOCK == retval) {
+ LDAPDebug( LDAP_DEBUG_ARGS,
+ "delete (adding %s) DB_LOCK_DEADLOCK\n",
+ SLAPI_ATTR_ENTRYUSN, 0, 0 );
+ /* Retry txn */
+ continue;
+ }
+ if (0 != retval) {
+ LDAPDebug( LDAP_DEBUG_TRACE,
+ "delete (adding %s) failed, err=%d %s\n",
+ SLAPI_ATTR_ENTRYUSN, retval,
+ (msg = dblayer_strerror( retval )) ? msg : "" );
+ if (LDBM_OS_ERR_IS_DISKFULL(retval)) disk_full = 1;
+ ldap_result_code= LDAP_OPERATIONS_ERROR;
+ goto error_return;
+ }
+ }
+ /* delete a previous value (if it exists) from the entryusn index */
+ if (prev_entryusn_str) {
+ retval = index_addordel_string(be, SLAPI_ATTR_ENTRYUSN,
+ prev_entryusn_str, tombstone->ep_id,
+ BE_INDEX_DEL|BE_INDEX_EQUALITY, &txn);
+ slapi_ch_free_string(&prev_entryusn_str);
+ if (DB_LOCK_DEADLOCK == retval) {
+ LDAPDebug( LDAP_DEBUG_ARGS,
+ "delete (deleting %s) DB_LOCK_DEADLOCK\n",
+ SLAPI_ATTR_ENTRYUSN, 0, 0 );
+ /* Retry txn */
+ continue;
+ }
+ if (0 != retval) {
+ LDAPDebug( LDAP_DEBUG_TRACE,
+ "delete (deleting %s) failed, err=%d %s\n",
+ SLAPI_ATTR_ENTRYUSN, retval,
+ (msg = dblayer_strerror( retval )) ? msg : "" );
+ if (LDBM_OS_ERR_IS_DISKFULL(retval)) disk_full = 1;
+ ldap_result_code= LDAP_OPERATIONS_ERROR;
+ goto error_return;
+ }
+ }
} /* create_tombstone_entry */
else if (delete_tombstone_entry)
{
@@ -480,45 +575,88 @@ ldbm_back_delete( Slapi_PBlock *pb )
*/
char *nscpedn = NULL;
- retval = index_addordel_string(be,SLAPI_ATTR_OBJECTCLASS,SLAPI_ATTR_VALUE_TOMBSTONE,e->ep_id,BE_INDEX_DEL,&txn);
+ retval = index_addordel_string(be, SLAPI_ATTR_OBJECTCLASS,
+ SLAPI_ATTR_VALUE_TOMBSTONE, e->ep_id,
+ BE_INDEX_DEL, &txn);
if (DB_LOCK_DEADLOCK == retval) {
- LDAPDebug( LDAP_DEBUG_ARGS, "delete 4 DB_LOCK_DEADLOCK\n", 0, 0, 0 );
+ LDAPDebug( LDAP_DEBUG_ARGS,
+ "delete (deleting %s) DB_LOCK_DEADLOCK\n",
+ SLAPI_ATTR_VALUE_TOMBSTONE, 0, 0 );
/* Retry txn */
continue;
}
if (0 != retval) {
- LDAPDebug( LDAP_DEBUG_TRACE, "delete 1 BAD, err=%d %s\n",
- retval, (msg = dblayer_strerror( retval )) ? msg : "", 0 );
+ LDAPDebug( LDAP_DEBUG_TRACE,
+ "delete (deleting %s) failed, err=%d %s\n",
+ SLAPI_ATTR_VALUE_TOMBSTONE, retval,
+ (msg = dblayer_strerror( retval )) ? msg : "" );
if (LDBM_OS_ERR_IS_DISKFULL(retval)) disk_full = 1;
- ldap_result_code= LDAP_OPERATIONS_ERROR;
+ ldap_result_code= LDAP_OPERATIONS_ERROR;
goto error_return;
}
- retval = index_addordel_string(be,SLAPI_ATTR_UNIQUEID,slapi_entry_get_uniqueid(e->ep_entry),e->ep_id,BE_INDEX_DEL,&txn);
+ retval = index_addordel_string(be, SLAPI_ATTR_UNIQUEID,
+ slapi_entry_get_uniqueid(e->ep_entry),
+ e->ep_id, BE_INDEX_DEL, &txn);
if (DB_LOCK_DEADLOCK == retval) {
- LDAPDebug( LDAP_DEBUG_ARGS, "delete 5 DB_LOCK_DEADLOCK\n", 0, 0, 0 );
+ LDAPDebug( LDAP_DEBUG_ARGS,
+ "delete (deleting %s) DB_LOCK_DEADLOCK\n",
+ SLAPI_ATTR_UNIQUEID, 0, 0 );
/* Retry txn */
continue;
}
if (0 != retval) {
- LDAPDebug( LDAP_DEBUG_TRACE, "delete 2 BAD, err=%d %s\n",
- retval, (msg = dblayer_strerror( retval )) ? msg : "", 0 );
+ LDAPDebug( LDAP_DEBUG_TRACE,
+ "delete (deleting %s) failed, err=%d %s\n",
+ SLAPI_ATTR_UNIQUEID, retval,
+ (msg = dblayer_strerror( retval )) ? msg : "" );
if (LDBM_OS_ERR_IS_DISKFULL(retval)) disk_full = 1;
- ldap_result_code= LDAP_OPERATIONS_ERROR;
+ ldap_result_code= LDAP_OPERATIONS_ERROR;
goto error_return;
}
- nscpedn = slapi_entry_attr_get_charptr(e->ep_entry, SLAPI_ATTR_NSCP_ENTRYDN);
+ nscpedn = slapi_entry_attr_get_charptr(e->ep_entry,
+ SLAPI_ATTR_NSCP_ENTRYDN);
if (nscpedn) {
- retval = index_addordel_string(be,SLAPI_ATTR_NSCP_ENTRYDN, nscpedn, e->ep_id,BE_INDEX_DEL,&txn);
+ retval = index_addordel_string(be, SLAPI_ATTR_NSCP_ENTRYDN,
+ nscpedn, e->ep_id, BE_INDEX_DEL, &txn);
slapi_ch_free((void **)&nscpedn);
if (DB_LOCK_DEADLOCK == retval) {
- LDAPDebug( LDAP_DEBUG_ARGS, "delete 6 DB_LOCK_DEADLOCK\n", 0, 0, 0 );
+ LDAPDebug( LDAP_DEBUG_ARGS,
+ "delete (deleting %s) DB_LOCK_DEADLOCK\n",
+ SLAPI_ATTR_NSCP_ENTRYDN, 0, 0 );
+ /* Retry txn */
+ continue;
+ }
+ if (0 != retval) {
+ LDAPDebug( LDAP_DEBUG_TRACE,
+ "delete (deleting %s) failed, err=%d %s\n",
+ SLAPI_ATTR_NSCP_ENTRYDN, retval,
+ (msg = dblayer_strerror( retval )) ? msg : "" );
+ if (LDBM_OS_ERR_IS_DISKFULL(retval)) disk_full = 1;
+ ldap_result_code= LDAP_OPERATIONS_ERROR;
+ goto error_return;
+ }
+ }
+ /* delete usn from the entryusn index */
+ entryusn_str = slapi_entry_attr_get_charptr(e->ep_entry,
+ SLAPI_ATTR_ENTRYUSN);
+ if (entryusn_str) {
+ retval = index_addordel_string(be, SLAPI_ATTR_ENTRYUSN,
+ entryusn_str, e->ep_id,
+ BE_INDEX_DEL|BE_INDEX_EQUALITY, &txn);
+ slapi_ch_free_string(&entryusn_str);
+ if (DB_LOCK_DEADLOCK == retval) {
+ LDAPDebug( LDAP_DEBUG_ARGS,
+ "delete (deleting %s) DB_LOCK_DEADLOCK\n",
+ SLAPI_ATTR_ENTRYUSN, 0, 0 );
/* Retry txn */
continue;
}
if (0 != retval) {
- LDAPDebug( LDAP_DEBUG_TRACE, "delete 3 BAD, err=%d %s\n",
- retval, (msg = dblayer_strerror( retval )) ? msg : "", 0 );
+ LDAPDebug( LDAP_DEBUG_TRACE,
+ "delete (deleting %s) failed, err=%d %s\n",
+ SLAPI_ATTR_ENTRYUSN, retval,
+ (msg = dblayer_strerror( retval )) ? msg : "" );
if (LDBM_OS_ERR_IS_DISKFULL(retval)) disk_full = 1;
ldap_result_code= LDAP_OPERATIONS_ERROR;
goto error_return;
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_instance_config.c b/ldap/servers/slapd/back-ldbm/ldbm_instance_config.c
index e8fc3d737..b4e44732e 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_instance_config.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_instance_config.c
@@ -752,6 +752,12 @@ static int ldbm_instance_generate(struct ldbminfo *li, char *instance_name,
ldbm_instance_config_load_dse_info(new_be->be_instance_info);
rc = ldbm_instance_create_default_indexes(new_be);
+ /* if USN plugin is enabled, set slapi_counter */
+ if (plugin_enabled("USN", li->li_identity)) {
+ /* slapi_counter_new sets the initial value to 0 */
+ new_be->be_usn_counter = slapi_counter_new();
+ }
+
if (ret_be != NULL) {
*ret_be = new_be;
}
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_modify.c b/ldap/servers/slapd/back-ldbm/ldbm_modify.c
index e85443d70..6ae954484 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_modify.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_modify.c
@@ -303,6 +303,7 @@ ldbm_back_modify( Slapi_PBlock *pb )
}
if ( !change_entry || ldap_result_code != 0 ) {
/* change_entry == 0 is not an error, but we need to free lock etc */
+ slapi_pblock_set(pb, SLAPI_RESULT_CODE, &ldap_result_code);
goto error_return;
}
}
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_usn.c b/ldap/servers/slapd/back-ldbm/ldbm_usn.c
new file mode 100644
index 000000000..e8dd72378
--- /dev/null
+++ b/ldap/servers/slapd/back-ldbm/ldbm_usn.c
@@ -0,0 +1,196 @@
+/** BEGIN COPYRIGHT BLOCK
+ * This Program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License as published by the Free Software
+ * Foundation; version 2 of the License.
+ *
+ * This Program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this Program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * In addition, as a special exception, Red Hat, Inc. gives You the additional
+ * right to link the code of this Program with code not covered under the GNU
+ * General Public License ("Non-GPL Code") and to distribute linked combinations
+ * including the two, subject to the limitations in this paragraph. Non-GPL Code
+ * permitted under this exception must only link to the code of this Program
+ * through those well defined interfaces identified in the file named EXCEPTION
+ * found in the source code files (the "Approved Interfaces"). The files of
+ * Non-GPL Code may instantiate templates or use macros or inline functions from
+ * the Approved Interfaces without causing the resulting work to be covered by
+ * the GNU General Public License. Only Red Hat, Inc. may make changes or
+ * additions to the list of Approved Interfaces. You must obey the GNU General
+ * Public License in all respects for all of the Program code and other code used
+ * in conjunction with the Program except the Non-GPL Code covered by this
+ * exception. If you modify this file, you may extend this exception to your
+ * version of the file, but you are not obligated to do so. If you do not wish to
+ * provide this exception without modification, you must delete this exception
+ * statement from your version and license this file solely under the GPL without
+ * exception.
+ *
+ *
+ * Copyright (C) 2009 Red Hat, Inc.
+ * All rights reserved.
+ * END COPYRIGHT BLOCK **/
+
+#ifdef HAVE_CONFIG_H
+# include <config.h>
+#endif
+
+#include "back-ldbm.h"
+
+static int usn_get_last_usn(Slapi_Backend *be, PRUint64 *last_usn);
+
+/*
+ * USN counter part in the backend
+ * - If usn is enabled,
+ * - For each backend,
+ * - Get the last USN index key
+ * - Initialize the slapi counter with the next USN (last USN + 1)
+ *
+ * dn: cn=entryusn,cn=default indexes,cn=config,cn=ldbm database,cn=plugins,cn=
+ * config
+ * objectClass: top
+ * objectClass: nsIndex
+ * cn: entryusn
+ * nsSystemIndex: true
+ * nsIndexType: eq
+ */
+
+void
+ldbm_usn_init(struct ldbminfo *li)
+{
+ Slapi_DN *sdn = NULL;
+ void *node = NULL;
+ const char *base = NULL;
+ int rc = 0;
+ Slapi_Backend *be = NULL;
+ PRUint64 last_usn = 0;
+
+ /* if USN is not enabled, return immediately */
+ if (!plugin_enabled("USN", li->li_identity)) {
+ goto bail;
+ }
+
+ /* Search each namingContext in turn */
+ for ( sdn = slapi_get_first_suffix( &node, 0 ); sdn != NULL;
+ sdn = slapi_get_next_suffix( &node, 0 )) {
+ base = slapi_sdn_get_dn( sdn );
+ be = slapi_mapping_tree_find_backend_for_sdn(sdn);
+ slapi_log_error(SLAPI_LOG_TRACE, "ldbm_usn_init",
+ "backend: %s\n", be->be_name);
+ rc = usn_get_last_usn(be, &last_usn);
+ if (0 == rc) { /* only when the last usn is available */
+ be->be_usn_counter = slapi_counter_new();
+ slapi_counter_set_value(be->be_usn_counter, last_usn);
+ slapi_counter_increment(be->be_usn_counter); /* stores next usn */
+ }
+ }
+bail:
+ return;
+}
+
+/*
+ * usn_ge_last_usn: get the last USN from the entryusn equality index
+ */
+static int
+usn_get_last_usn(Slapi_Backend *be, PRUint64 *last_usn)
+{
+ struct attrinfo *ai = NULL;
+ struct ldbminfo *li = (struct ldbminfo *)be->be_database->plg_private;
+ int rc = -1;
+ DB *db = NULL;
+ DBC *dbc = NULL;
+ DBT key; /* For the last usn */
+ DBT value;
+
+ if (NULL == last_usn) {
+ return rc;
+ }
+
+ memset(&key, 0, sizeof(key));
+ memset(&value, 0, sizeof(key));
+
+ *last_usn = -1; /* to start from 0 */
+
+ /* Open the entryusn index */
+ ainfo_get(be, "entryusn", &ai);
+
+ /* Open the entryusn index file */
+ rc = dblayer_get_index_file(be, ai, &db, DBOPEN_CREATE);
+ if (0 != rc) {
+ /* entryusn.db# is missing; it would be the first time. */
+ slapi_log_error(SLAPI_LOG_FATAL, "usn_get_last_usn",
+ "failed to open the entryusn index: %d", rc);
+ goto bail;
+ }
+
+ /* Get a cursor */
+ rc = db->cursor(db, NULL, &dbc, 0);
+ if (0 != rc) {
+ slapi_log_error(SLAPI_LOG_FATAL, "usn_get_last_usn",
+ "failed to create a cursor: %d", rc);
+ goto bail;
+ }
+
+ key.flags = DB_DBT_MALLOC;
+ value.flags = DB_DBT_MALLOC;
+ rc = dbc->c_get(dbc, &key, &value, DB_LAST);
+ if ((0 == rc) && key.data) {
+ char *p = (char *)key.data;
+ while ((0 == rc) && ('=' != *p)) { /* get the last elem of equality */
+ slapi_ch_free(&(key.data));
+ slapi_ch_free(&(value.data));
+ rc = dbc->c_get(dbc, &key, &value, DB_PREV);
+ p = (char *)key.data;
+ }
+ if (0 == rc) {
+ *last_usn = strtoll(++p, (char **)NULL, 0); /* key.data: =num */
+ }
+ } else if (DB_NOTFOUND == rc) {
+ /* if empty, it's okay. This is just a beginning. */
+ rc = 0;
+ }
+ slapi_ch_free(&(key.data));
+ slapi_ch_free(&(value.data));
+
+bail:
+ if (dbc) {
+ dbc->c_close(dbc);
+ }
+ if (db) {
+ dblayer_release_index_file(be, ai, db);
+ }
+ return rc;
+}
+
+/*
+ * Whether USN is enabled or not is checked with be_usn_counter.
+ */
+int
+ldbm_usn_enabled(Slapi_Backend *be)
+{
+ return (NULL != be->be_usn_counter);
+}
+
+/*
+ * set last usn to the USN slapi_counter in backend
+ */
+int
+ldbm_set_last_usn(Slapi_Backend *be)
+{
+ PRUint64 last_usn = 0;
+ int rc = usn_get_last_usn(be, &last_usn);
+
+ if (0 == rc) { /* only when the last usn is available */
+ /* destroy old counter, if any */
+ slapi_counter_destroy(&(be->be_usn_counter));
+ be->be_usn_counter = slapi_counter_new();
+ slapi_counter_set_value(be->be_usn_counter, last_usn);
+ slapi_counter_increment(be->be_usn_counter); /* stores next usn */
+ }
+
+ return rc;
+}
diff --git a/ldap/servers/slapd/back-ldbm/misc.c b/ldap/servers/slapd/back-ldbm/misc.c
index b4c7942f2..8fcb81f8e 100644
--- a/ldap/servers/slapd/back-ldbm/misc.c
+++ b/ldap/servers/slapd/back-ldbm/misc.c
@@ -94,17 +94,19 @@ int return_on_disk_full(struct ldbminfo *li)
/* System Indexes */
static const char *systemIndexes[] = {
- "entrydn",
- "parentid",
- "objectclass",
"aci",
+ "entrydn",
"numsubordinates",
+ "parentid",
+ SLAPI_ATTR_OBJECTCLASS,
SLAPI_ATTR_UNIQUEID,
SLAPI_ATTR_NSCP_ENTRYDN,
ATTR_NSDS5_REPLCONFLICT,
+ SLAPI_ATTR_ENTRYUSN,
NULL
};
+
int
ldbm_attribute_always_indexed(const char *attrtype)
{
diff --git a/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h b/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h
index 30d607eef..3fc583286 100644
--- a/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h
+++ b/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h
@@ -632,3 +632,12 @@ int attrcrypt_encrypt_entry_inplace(backend *be, const struct backentry *inout);
int attrcrypt_encrypt_entry(backend *be, const struct backentry *in, struct backentry **out);
int attrcrypt_encrypt_index_key(backend *be, struct attrinfo *ai, const struct berval *in, struct berval **out);
int attrcrypt_init(ldbm_instance *li);
+
+/*
+ * ldbm_usn.c
+ */
+void ldbm_usn_init(struct ldbminfo *li);
+int ldbm_usn_enabled(backend *be);
+int ldbm_set_last_usn(Slapi_Backend *be);
+
+
diff --git a/ldap/servers/slapd/back-ldbm/start.c b/ldap/servers/slapd/back-ldbm/start.c
index 9b517eb1b..6d833571a 100644
--- a/ldap/servers/slapd/back-ldbm/start.c
+++ b/ldap/servers/slapd/back-ldbm/start.c
@@ -222,6 +222,9 @@ ldbm_back_start( Slapi_PBlock *pb )
initialized = 1;
}
+ /* initialize the USN counter */
+ ldbm_usn_init(li);
+
LDAPDebug( LDAP_DEBUG_TRACE, "ldbm backend done starting\n", 0, 0, 0 );
return( 0 );
diff --git a/ldap/servers/slapd/backend.c b/ldap/servers/slapd/backend.c
index c4c399f9a..740de29dd 100644
--- a/ldap/servers/slapd/backend.c
+++ b/ldap/servers/slapd/backend.c
@@ -64,7 +64,7 @@ be_init( Slapi_Backend *be, const char *type, const char *name, int isprivate, i
/* maximum group nesting level before giving up */
be->be_maxnestlevel = SLAPD_DEFAULT_GROUPNESTLEVEL;
be->be_noacl= 0;
- be->be_flags=0;
+ be->be_flags=0;
if (( fecfg = getFrontendConfig()) != NULL )
{
if ( fecfg->backendconfig != NULL && fecfg->backendconfig[ 0 ] != NULL )
diff --git a/ldap/servers/slapd/main.c b/ldap/servers/slapd/main.c
index 7e595111d..ed51825e1 100644
--- a/ldap/servers/slapd/main.c
+++ b/ldap/servers/slapd/main.c
@@ -2372,6 +2372,8 @@ slapd_exemode_db2ldif(int argc, char** argv)
}
slapi_ch_free( (void**)&myname );
if (db2ldif_dump_replica) {
+ eq_stop(); /* event queue should be shutdown before closing
+ all plugins (especailly, replication plugin) */
plugin_closeall( 1 /* Close Backends */, 1 /* Close Globals */);
}
return( return_value );
diff --git a/ldap/servers/slapd/pblock.c b/ldap/servers/slapd/pblock.c
index 98984adaf..e5751a92a 100644
--- a/ldap/servers/slapd/pblock.c
+++ b/ldap/servers/slapd/pblock.c
@@ -1592,6 +1592,10 @@ slapi_pblock_get( Slapi_PBlock *pblock, int arg, void *value )
(*(IFP *)value) = pblock->pb_plugin->plg_entrystorefunc;
break;
+ case SLAPI_PLUGIN_ENABLED:
+ *((int *)value) = pblock->pb_plugin_enabled;
+ break;
+
/* DSE add parameters */
case SLAPI_DSE_DONT_WRITE_WHEN_ADDING:
(*(int *)value) = pblock->pb_dse_dont_add_write;
@@ -2857,6 +2861,10 @@ slapi_pblock_set( Slapi_PBlock *pblock, int arg, void *value )
pblock->pb_plugin->plg_entrystorefunc = (IFP) value;
break;
+ case SLAPI_PLUGIN_ENABLED:
+ pblock->pb_plugin_enabled = *((int *)value);
+ break;
+
/* DSE add parameters */
case SLAPI_DSE_DONT_WRITE_WHEN_ADDING:
pblock->pb_dse_dont_add_write = *((int *)value);
diff --git a/ldap/servers/slapd/plugin.c b/ldap/servers/slapd/plugin.c
index 5ae63564e..17730d59b 100644
--- a/ldap/servers/slapd/plugin.c
+++ b/ldap/servers/slapd/plugin.c
@@ -2166,6 +2166,20 @@ plugin_setup(Slapi_Entry *plugin_entry, struct slapi_componentid *group,
configdir = config_get_configdir();
slapi_pblock_set(&pb, SLAPI_CONFIG_DIRECTORY, configdir);
+ /* see if the plugin is enabled or not */
+ if ((value = slapi_entry_attr_get_charptr(plugin_entry,
+ ATTR_PLUGIN_ENABLED)) &&
+ !strcasecmp(value, "off"))
+ {
+ enabled = 0;
+ }
+ else
+ {
+ enabled = 1;
+ }
+
+ slapi_pblock_set(&pb, SLAPI_PLUGIN_ENABLED, &enabled);
+
if ((*initfunc)(&pb) != 0)
{
LDAPDebug(LDAP_DEBUG_ANY, "Init function \"%s\" for \"%s\" plugin"
@@ -2193,18 +2207,6 @@ plugin_setup(Slapi_Entry *plugin_entry, struct slapi_componentid *group,
}
}
- /* see if the plugin is enabled or not */
- if ((value = slapi_entry_attr_get_charptr(plugin_entry,
- ATTR_PLUGIN_ENABLED)) &&
- !strcasecmp(value, "off"))
- {
- enabled = 0;
- }
- else
- {
- enabled = 1;
- }
-
if (value)
slapi_ch_free((void**)&value);
@@ -2859,3 +2861,45 @@ void plugin_print_versions(void)
}
}
+
+/*
+ * check the spedified plugin entry and its nssladp-pluginEnabled value
+ * Return Value: 1 if the plugin is on.
+ * : 0 otherwise.
+ */
+int
+plugin_enabled(const char *plugin_name, void *identity)
+{
+ Slapi_PBlock *search_pb = NULL;
+ Slapi_Entry **entries = NULL, **ep = NULL;
+ char *on_off = NULL;
+ char *filter = NULL;
+ int rc = 0; /* disabled, by default */
+
+ filter = slapi_ch_smprintf("cn=%s", plugin_name);
+ search_pb = slapi_pblock_new();
+ slapi_search_internal_set_pb(search_pb, PLUGIN_BASE_DN, LDAP_SCOPE_ONELEVEL,
+ filter, NULL, 0, NULL, NULL, identity, 0);
+ slapi_search_internal_pb(search_pb);
+ slapi_pblock_get(search_pb, SLAPI_PLUGIN_INTOP_RESULT, &rc);
+ if (LDAP_SUCCESS != rc) { /* plugin is not available */
+ goto bail;
+ }
+
+ slapi_pblock_get(search_pb, SLAPI_PLUGIN_INTOP_SEARCH_ENTRIES, &entries);
+ for (ep = entries; ep && *ep; ep++) {
+ on_off = slapi_entry_attr_get_charptr(*ep, "nsslapd-pluginEnabled");
+ if (on_off && (0 == strcasecmp(on_off, "on"))) {
+ rc = 1; /* plugin is on */
+ goto bail;
+ }
+ }
+
+bail:
+ slapi_ch_free_string(&on_off);
+ slapi_free_search_results_internal(search_pb);
+ slapi_pblock_destroy(search_pb);
+ slapi_ch_free_string(&filter);
+
+ return rc;
+}
diff --git a/ldap/servers/slapd/slap.h b/ldap/servers/slapd/slap.h
index b4eb0c03d..6eecd013c 100644
--- a/ldap/servers/slapd/slap.h
+++ b/ldap/servers/slapd/slap.h
@@ -1115,6 +1115,7 @@ typedef struct backend {
PRRWLock *be_lock;
PRRWLock *vlvSearchList_lock;
void *vlvSearchList;
+ Slapi_Counter *be_usn_counter; /* USN counter; one counter per backend */
} backend;
enum
@@ -1488,6 +1489,8 @@ typedef struct slapi_pblock {
int *pb_substrlens; /* user specified minimum substr search key lengths:
* nsSubStrBegin, nsSubStrMiddle, nsSubStrEnd
*/
+ int pb_plugin_enabled; /* nsslapd-pluginEnabled: on|off */
+ /* used in plugin init; pb_plugin is not ready, then */
} slapi_pblock;
/* index if substrlens */
diff --git a/ldap/servers/slapd/slapi-plugin.h b/ldap/servers/slapd/slapi-plugin.h
index e04fad9b3..d5efe4d5c 100644
--- a/ldap/servers/slapd/slapi-plugin.h
+++ b/ldap/servers/slapd/slapi-plugin.h
@@ -296,7 +296,9 @@ NSPR_API(PRUint32) PR_fprintf(struct PRFileDesc* fd, const char *fmt, ...)
#define SLAPI_ATTR_OBJECTCLASS "objectclass"
#define SLAPI_ATTR_VALUE_TOMBSTONE "nsTombstone"
#define SLAPI_ATTR_VALUE_PARENT_UNIQUEID "nsParentUniqueID"
-#define SLAPI_ATTR_NSCP_ENTRYDN "nscpEntryDN"
+#define SLAPI_ATTR_NSCP_ENTRYDN "nscpEntryDN"
+#define SLAPI_ATTR_ENTRYUSN "entryusn"
+#define SLAPI_ATTR_ENTRYUSN_PREV "preventryusn"
/* opaque structures */
@@ -3272,6 +3274,7 @@ typedef struct slapi_plugindesc {
/* entry fetch and entry store values */
#define SLAPI_PLUGIN_ENTRY_FETCH_FUNC 813
#define SLAPI_PLUGIN_ENTRY_STORE_FUNC 814
+#define SLAPI_PLUGIN_ENABLED 815
/*
* Defined values of SLAPI_PLUGIN_SYNTAX_FLAGS:
@@ -3332,6 +3335,7 @@ typedef struct slapi_plugindesc {
#define SLAPI_DELETE_TARGET SLAPI_TARGET_DN
#define SLAPI_DELETE_EXISTING_ENTRY SLAPI_ADD_EXISTING_DN_ENTRY
#define SLAPI_DELETE_GLUE_PARENT_ENTRY SLAPI_ADD_PARENT_ENTRY
+#define SLAPI_DELETE_BEPREOP_ENTRY SLAPI_ENTRY_PRE_OP
/* modify arguments */
#define SLAPI_MODIFY_TARGET SLAPI_TARGET_DN
diff --git a/ldap/servers/slapd/slapi-private.h b/ldap/servers/slapd/slapi-private.h
index 85c458965..21b8dc4fe 100644
--- a/ldap/servers/slapd/slapi-private.h
+++ b/ldap/servers/slapd/slapi-private.h
@@ -1200,6 +1200,9 @@ void DS_Sleep(PRIntervalTime ticks);
#define PRLDAP_SET_PORT(myaddr,myport) \
((myaddr)->raw.family == PR_AF_INET6 ? ((myaddr)->ipv6.port = PR_htons(myport)) : ((myaddr)->inet.port = PR_htons(myport)))
+/* plugin.c */
+int plugin_enabled(const char *plugin_name, void *identity);
+
#ifdef __cplusplus
}
#endif
diff --git a/ltmain.sh b/ltmain.sh
old mode 100755
new mode 100644
| 0 |
f97797235463d73b10581680605d88c0eef546ac
|
389ds/389-ds-base
|
Bug 489379 - passwordExpirationTime in entry being added
collides with passwordMustChange policy
https://bugzilla.redhat.com/show_bug.cgi?id=489379
Description:
If we have passwordMustChange policy enabled and try to add an entry
that already contains an passwordExpirationTime attribute, then the
passwordMustChange code will insert another passwordExpirationTime
attribute with the magic 19700101000000Z value. This wont work
because passwordExpirationTime is single-valued:
ldap_add: Object class violation
ldap_add: additional info: single-valued attribute "passwordExpirationTime"
has multiple values
We need to pick one passwordExpirationTime value to "win". I think
it makes sense for the value that exists in the entry being added
to win, but I'm open to discussion if it's felt that would violate
the password policy.
To make the existing passwordExpirationTime value win, in
add_password_attrs() in pw.c I think the logic can be changed so
the scan for passwordExpirationTime happens unconditionally and
then not touch it even if pwpolicy->pw_must_change is set.
This patch has been submitted by Ulf Weltman ([email protected]).
|
commit f97797235463d73b10581680605d88c0eef546ac
Author: Noriko Hosoi <[email protected]>
Date: Wed Dec 15 18:14:48 2010 -0800
Bug 489379 - passwordExpirationTime in entry being added
collides with passwordMustChange policy
https://bugzilla.redhat.com/show_bug.cgi?id=489379
Description:
If we have passwordMustChange policy enabled and try to add an entry
that already contains an passwordExpirationTime attribute, then the
passwordMustChange code will insert another passwordExpirationTime
attribute with the magic 19700101000000Z value. This wont work
because passwordExpirationTime is single-valued:
ldap_add: Object class violation
ldap_add: additional info: single-valued attribute "passwordExpirationTime"
has multiple values
We need to pick one passwordExpirationTime value to "win". I think
it makes sense for the value that exists in the entry being added
to win, but I'm open to discussion if it's felt that would violate
the password policy.
To make the existing passwordExpirationTime value win, in
add_password_attrs() in pw.c I think the logic can be changed so
the scan for passwordExpirationTime happens unconditionally and
then not touch it even if pwpolicy->pw_must_change is set.
This patch has been submitted by Ulf Weltman ([email protected]).
diff --git a/ldap/servers/slapd/pw.c b/ldap/servers/slapd/pw.c
index 94a038f9b..22f818ee8 100644
--- a/ldap/servers/slapd/pw.c
+++ b/ldap/servers/slapd/pw.c
@@ -1321,44 +1321,63 @@ add_password_attrs( Slapi_PBlock *pb, Operation *op, Slapi_Entry *e )
Slapi_Attr **a, **next;
passwdPolicy *pwpolicy = NULL;
char *dn = slapi_entry_get_ndn(e);
-
- pwpolicy = new_passwdPolicy(pb, dn);
+ int has_allowchangetime = 0, has_expirationtime = 0;
+ time_t existing_exptime = 0;
LDAPDebug( LDAP_DEBUG_TRACE, "add_password_attrs\n", 0, 0, 0 );
bvals[0] = &bv;
bvals[1] = NULL;
- if ( pwpolicy->pw_must_change) {
- /* must change password when first time logon */
- bv.bv_val = format_genTime ( NO_TIME );
- } else {
- /* If passwordexpirationtime is specified by the user, don't
- try to assign the initial value */
- for ( a = &e->e_attrs; *a != NULL; a = next ) {
- if ( strcasecmp( (*a)->a_type,
- "passwordexpirationtime" ) == 0) {
- delete_passwdPolicy(&pwpolicy);
- return;
+ /* If passwordexpirationtime is specified by the user, don't
+ try to assign the initial value */
+ for ( a = &e->e_attrs; *a != NULL; a = next ) {
+ if ( strcasecmp( (*a)->a_type,
+ "passwordexpirationtime" ) == 0) {
+ Slapi_Value *sval;
+ if (slapi_attr_first_value(*a, &sval) == 0) {
+ const struct berval *bv = slapi_value_get_berval(sval);
+ existing_exptime = parse_genTime(bv->bv_val);
}
- next = &(*a)->a_next;
+ has_expirationtime = 1;
+
+ } else if ( strcasecmp( (*a)->a_type,
+ "passwordallowchangetime" ) == 0) {
+ has_allowchangetime = 1;
}
+ next = &(*a)->a_next;
+ }
- bv.bv_val = format_genTime ( time_plus_sec ( current_time (),
- pwpolicy->pw_maxage ) );
+ if ( has_allowchangetime && has_expirationtime ) {
+ return;
}
- if ( pwpolicy->pw_exp || pwpolicy->pw_must_change ) {
+
+ pwpolicy = new_passwdPolicy(pb, dn);
+
+ if ( !has_expirationtime &&
+ ( pwpolicy->pw_exp || pwpolicy->pw_must_change ) ) {
+ if ( pwpolicy->pw_must_change) {
+ /* must change password when first time logon */
+ bv.bv_val = format_genTime ( NO_TIME );
+ } else if ( pwpolicy->pw_exp ) {
+ bv.bv_val = format_genTime ( time_plus_sec ( current_time (),
+ pwpolicy->pw_maxage ) );
+ }
bv.bv_len = strlen( bv.bv_val );
slapi_entry_attr_merge( e, "passwordexpirationtime", bvals );
+ slapi_ch_free_string( &bv.bv_val );
}
- slapi_ch_free((void **) &bv.bv_val );
/*
* If the password minimum age is not 0, calculate when the password
* is allowed to be changed again and store the result
* in passwordallowchangetime in the user's entry.
+ * If the password has expired, don't add passwordallowchangetime,
+ * otherwise if the user has grace logins, they can't be used to change
+ * the password if we set a passwordallowchangetime in the future.
*/
- if ( pwpolicy->pw_minage != 0 ) {
+ if ( !has_allowchangetime && pwpolicy->pw_minage != 0 &&
+ (has_expirationtime && existing_exptime > current_time()) ) {
bv.bv_val = format_genTime ( time_plus_sec ( current_time (),
pwpolicy->pw_minage ) );
bv.bv_len = strlen( bv.bv_val );
| 0 |
b956c18caff3237a9cdb562139184cdd9595a0c6
|
389ds/389-ds-base
|
Resolves: #202843
Summary: referential integrity plugin does not stack with Class of Service appliance (Comment #16)
Changes: Don't call dblayer_close before plugin_closeall.
|
commit b956c18caff3237a9cdb562139184cdd9595a0c6
Author: Noriko Hosoi <[email protected]>
Date: Wed Dec 13 22:21:22 2006 +0000
Resolves: #202843
Summary: referential integrity plugin does not stack with Class of Service appliance (Comment #16)
Changes: Don't call dblayer_close before plugin_closeall.
diff --git a/ldap/servers/slapd/back-ldbm/ldif2ldbm.c b/ldap/servers/slapd/back-ldbm/ldif2ldbm.c
index e29439d7c..9c78068f5 100644
--- a/ldap/servers/slapd/back-ldbm/ldif2ldbm.c
+++ b/ldap/servers/slapd/back-ldbm/ldif2ldbm.c
@@ -783,7 +783,7 @@ ldbm_back_ldbm2ldif( Slapi_PBlock *pb )
* Note that we should only call this once. If we're
* dumping several backends then it gets called multiple
* times and we get warnings in the error log like this:
- * WARNING: ldbm instance NetscapeRoot already exists
+ * WARNING: ldbm instance userRoot already exists
*/
ldbm_config_load_dse_info(li);
}
@@ -1205,24 +1205,6 @@ ldbm_back_ldbm2ldif( Slapi_PBlock *pb )
"db2ldif: Failed to close database\n",
0, 0, 0 );
}
- } else if (run_from_cmdline && dump_replica) {
- /*
- * It should not be necessary to close the dblayer here.
- * However it masks complex thread timing issues that
- * prevent a correct shutdown of the plugins. Closing the
- * dblayer here means we cannot dump multiple replicas
- * using -r, but the server doesn't allow that either.
- */
-
- /*
- * Use DBLAYER_NORMAL_MODE to match the value that was provided
- * to dblayer_start() and ensure creation of the guardian file.
- */
- if (0 != dblayer_close(li,DBLAYER_NORMAL_MODE)) {
- LDAPDebug( LDAP_DEBUG_ANY,
- "db2ldif: Failed to close database\n",
- 0, 0, 0 );
- }
}
if (!run_from_cmdline) {
| 0 |
89aef892f01242bbd40345d1bb42bb2072359996
|
389ds/389-ds-base
|
Issue 6472 - CLI - Improve error message format
Description:
For non-json/non-verbose generic exceptions format the message into
something more readable and friendly (desc, result, and info)
Relates: https://github.com/389ds/389-ds-base/issues/6472
Reviewed by: tbordaz & spichugi (Thanks!!)
|
commit 89aef892f01242bbd40345d1bb42bb2072359996
Author: Mark Reynolds <[email protected]>
Date: Sun Jan 5 11:38:45 2025 -0500
Issue 6472 - CLI - Improve error message format
Description:
For non-json/non-verbose generic exceptions format the message into
something more readable and friendly (desc, result, and info)
Relates: https://github.com/389ds/389-ds-base/issues/6472
Reviewed by: tbordaz & spichugi (Thanks!!)
diff --git a/src/lib389/cli/dsconf b/src/lib389/cli/dsconf
index b8a980877..d47906981 100755
--- a/src/lib389/cli/dsconf
+++ b/src/lib389/cli/dsconf
@@ -34,6 +34,7 @@ from lib389.cli_base.dsrc import dsrc_to_ldap, dsrc_arg_concat
from lib389.cli_base import setup_script_logger
from lib389.cli_base import format_error_to_dict
from lib389.cli_base import parent_argparser
+from lib389.cli_base import format_pretty_error
from lib389.utils import instance_choices
parser = argparse.ArgumentParser(allow_abbrev=True, parents=[parent_argparser])
@@ -133,14 +134,16 @@ if __name__ == '__main__':
if args.verbose:
log.info("Command successful.")
except Exception as e:
+ result = False
log.debug(e, exc_info=True)
msg = format_error_to_dict(e)
if args and args.json:
sys.stderr.write(f"{json.dumps(msg, indent=4)}\n")
else:
+ if not args.verbose:
+ msg = format_pretty_error(msg)
log.error("Error: %s" % " - ".join(str(val) for val in msg.values()))
- result = False
disconnect_instance(inst)
diff --git a/src/lib389/cli/dscreate b/src/lib389/cli/dscreate
index aa3878ff9..b39177caa 100755
--- a/src/lib389/cli/dscreate
+++ b/src/lib389/cli/dscreate
@@ -18,7 +18,7 @@ from textwrap import dedent
from lib389 import DirSrv
from lib389.cli_ctl import instance as cli_instance
from lib389.cli_base import setup_script_logger
-from lib389.cli_base import format_error_to_dict
+from lib389.cli_base import format_error_to_dict, format_pretty_error
epilog = """
@@ -103,6 +103,8 @@ if __name__ == '__main__':
if args and args.json:
sys.stderr.write(f"{json.dumps(msg, indent=4)}\n")
else:
+ if not args.verbose:
+ msg = format_pretty_error(msg)
log.error("Error: %s" % " - ".join(str(val) for val in msg.values()))
result = False
diff --git a/src/lib389/cli/dsctl b/src/lib389/cli/dsctl
index ac12340fb..9ab830c69 100755
--- a/src/lib389/cli/dsctl
+++ b/src/lib389/cli/dsctl
@@ -31,6 +31,7 @@ from lib389.cli_base import (
disconnect_instance,
setup_script_logger,
format_error_to_dict,
+ format_pretty_error,
parent_argparser
)
from lib389._constants import DSRC_CONTAINER
@@ -131,6 +132,8 @@ if __name__ == '__main__':
if args.json:
sys.stderr.write(f"{json.dumps(msg, indent=4)}\n")
else:
+ if not args.verbose:
+ msg = format_pretty_error(msg)
log.error("Error: %s" % " - ".join(msg.values()))
sys.exit(1)
except Exception as e:
@@ -138,6 +141,8 @@ if __name__ == '__main__':
if args.json:
sys.stderr.write(f"{json.dumps(msg, indent=4)}\n")
else:
+ if not args.verbose:
+ msg = format_pretty_error(msg)
log.error("Error: %s" % " - ".join(msg.values()))
sys.exit(1)
if len(insts) != 1:
@@ -160,6 +165,8 @@ if __name__ == '__main__':
if args.json:
sys.stderr.write(f"{json.dumps(msg, indent=4)}\n")
else:
+ if not args.verbose:
+ msg = format_pretty_error(msg)
log.error("Error: %s" % " - ".join(str(val) for val in msg.values()))
result = False
disconnect_instance(inst)
diff --git a/src/lib389/cli/dsidm b/src/lib389/cli/dsidm
index d318664bc..2a5af280e 100755
--- a/src/lib389/cli/dsidm
+++ b/src/lib389/cli/dsidm
@@ -32,7 +32,7 @@ from lib389.cli_idm import role as cli_role
from lib389.cli_idm import service as cli_service
from lib389.cli_base import connect_instance, disconnect_instance, setup_script_logger
from lib389.cli_base.dsrc import dsrc_to_ldap, dsrc_arg_concat
-from lib389.cli_base import format_error_to_dict
+from lib389.cli_base import format_error_to_dict, format_pretty_error
from lib389.cli_base import parent_argparser
parser = argparse.ArgumentParser(allow_abbrev=True, parents=[parent_argparser])
@@ -145,10 +145,9 @@ if __name__ == '__main__':
if args.json:
sys.stderr.write(f"{json.dumps(msg, indent=4)}\n")
else:
- if 'desc' in msg:
- log.error(f"Error: {msg['desc']}")
- else:
- log.error("Error: %s" % " - ".join(str(val) for val in msg.values()))
+ if not args.verbose:
+ msg = format_pretty_error(msg)
+ log.error("Error: %s" % " - ".join(str(val) for val in msg.values()))
result = False
disconnect_instance(inst)
diff --git a/src/lib389/cli/openldap_to_ds b/src/lib389/cli/openldap_to_ds
index 79b121c66..c54cfb96c 100755
--- a/src/lib389/cli/openldap_to_ds
+++ b/src/lib389/cli/openldap_to_ds
@@ -12,6 +12,7 @@
import argparse
import argcomplete
+import json
import signal
import sys
from lib389 import DirSrv
@@ -19,7 +20,8 @@ from lib389.cli_base import (
connect_instance,
disconnect_instance,
setup_script_logger,
- format_error_to_dict)
+ format_error_to_dict,
+ format_pretty_error)
from lib389.cli_base.dsrc import dsrc_to_ldap, dsrc_arg_concat
from lib389._constants import DSRC_HOME
@@ -254,6 +256,8 @@ if __name__ == '__main__':
if args.json:
sys.stderr.write(f"{json.dumps(msg, indent=4)}\n")
else:
+ if not args.verbose:
+ msg = format_pretty_error(msg)
log.error("Error: %s" % " - ".join(str(val) for val in msg.values()))
result = False
disconnect_instance(inst)
diff --git a/src/lib389/lib389/_mapped_object.py b/src/lib389/lib389/_mapped_object.py
index af5aab71e..62bd1c1b5 100644
--- a/src/lib389/lib389/_mapped_object.py
+++ b/src/lib389/lib389/_mapped_object.py
@@ -73,7 +73,7 @@ def _ldap_op_s(inst, f, fname, *args, **kwargs):
try:
return f(*args, **kwargs)
except ldap.LDAPError as e:
- new_desc = f"{fname}({args},{kwargs}) on instance {inst.serverid}";
+ new_desc = f"{fname}({args},{kwargs}) on instance {inst.serverid}"
if len(e.args) >= 1:
e.args[0]['ldap_request'] = new_desc
logging.getLogger().debug(f"args={e.args}")
@@ -527,9 +527,10 @@ class DSLdapObject(DSLogging, DSLint):
elif value is not None:
value = [ensure_bytes(value)]
- return _modify_ext_s(self._instance,self._dn, [(action, key, value)],
- serverctrls=self._server_controls, clientctrls=self._client_controls,
- escapehatch='i am sure')
+ return _modify_ext_s(self._instance, self._dn, [(action, key, value)],
+ serverctrls=self._server_controls,
+ clientctrls=self._client_controls,
+ escapehatch='i am sure')
def apply_mods(self, mods):
"""Perform modification operation using several mods at once
diff --git a/src/lib389/lib389/cli_base/__init__.py b/src/lib389/lib389/cli_base/__init__.py
index c431f066d..31ea63bbc 100644
--- a/src/lib389/lib389/cli_base/__init__.py
+++ b/src/lib389/lib389/cli_base/__init__.py
@@ -388,7 +388,7 @@ class CustomHelpFormatter(argparse.HelpFormatter):
if len(actions) > 0 and actions[0].option_strings:
actions = parent_arguments + actions
super(CustomHelpFormatter, self).add_arguments(actions)
-
+
def _format_usage(self, usage, actions, groups, prefix):
usage = super(CustomHelpFormatter, self)._format_usage(usage, actions, groups, prefix)
formatted_options = self._format_actions_usage(parent_arguments, [])
@@ -504,9 +504,24 @@ def format_error_to_dict(exception):
# We should fix the code here after the issue is fixed
errmsg = str(exception)
try:
- # The function literal_eval parses the string and returns only if it's a literal.
- # Also, the code is never executed. So there is no reason for a security risk.
+ # The function literal_eval parses the string and returns only if it's
+ # a literal. Also, the code is never executed. So there is no reason
+ # for a security risk.
msg = ast.literal_eval(errmsg)
except Exception:
msg = {'desc': errmsg}
+
return msg
+
+
+def format_pretty_error(msg_dict):
+ """
+ Take a raw exception dict and make a pretty message for the user
+ """
+ msg = f"{msg_dict['desc']}"
+ if 'result' in msg_dict:
+ msg += f"({msg_dict['result']})"
+ if 'info' in msg_dict:
+ msg += f" - {msg_dict['info']}"
+
+ return {'desc': msg}
| 0 |
269a85b79465f6b0a6d1231cbca335a8975e163b
|
389ds/389-ds-base
|
RPM packaging - does the old setup pre and post installation tasks
|
commit 269a85b79465f6b0a6d1231cbca335a8975e163b
Author: Rich Megginson <[email protected]>
Date: Fri Feb 4 00:08:15 2005 +0000
RPM packaging - does the old setup pre and post installation tasks
diff --git a/ldap/cm/newinst/setup b/ldap/cm/newinst/setup
new file mode 100755
index 000000000..1adc2b3bf
--- /dev/null
+++ b/ldap/cm/newinst/setup
@@ -0,0 +1,183 @@
+#!/bin/sh
+#
+# BEGIN COPYRIGHT BLOCK
+# Copyright 2005 Red Hat Inc.
+# All rights reserved.
+# END COPYRIGHT BLOCK
+#
+
+###########################
+#
+# This shell script provides a way to set up a new installation after
+# the binaries have already been extracted. This is typically after
+# using native packaging support to install the package e.g. RPM,
+# pkgadd, depot, etc. This script will show the license, readme,
+# dsktune, then run the usual setup pre and post installers. This
+# script should be run from the server root directory since it uses
+# pwd to get the server root directory.
+#
+##########################
+
+# get command line arguments
+
+# see if silent mode
+
+counter=0
+doMktmp() {
+ tmpfile=`mktemp /tmp/${1}XXXXXX 2> /dev/null`
+ if ! [ $tmpfile ] ; then
+ tmpfile=/tmp/$1.$counter.$$
+ counter=`expr $counter + 1`
+ fi
+ echo $tmpfile
+}
+
+doExit() {
+ echo "ERROR Exiting . . ." | tee -a $logfile
+ if [ $tmpinffile ]; then
+ rm -f $inffile
+ fi
+ echo "Log file is $logfile"
+ exit 1
+}
+
+askYN() {
+ prompt="$1"
+ finished=
+ while ! [ $finished ]; do
+ echo ""
+ echo -n "$prompt (yes/no) " | tee -a $logfile
+ read ans
+ echo $ans >> $logfile
+ case "$ans" in
+ y*|Y*) finished=1 ;;
+ n*|N*) exit 1 ;;
+ *) echo "Please answer yes or no" | tee -a $logfile ;;
+ esac
+ done
+}
+
+logfile=`doMktmp log`
+myargs=
+silent=
+inffile=
+tmpinffile=
+nextisinffile=
+keepinffile=
+for arg in "$@" ; do
+ if [ "$arg" = "-s" ]; then
+ silent=1
+ elif [ "$arg" = "-k" ]; then
+ keepinffile=1
+ elif [ "$arg" = "-f" ]; then
+ nextisinffile=1
+ elif [ $nextisinffile ]; then
+ inffile="$arg"
+ nextisinffile=
+ else
+ myargs="$myargs $arg"
+ fi
+done
+
+echo "INFO Begin Setup . . ." | tee -a $logfile
+# cat LICENSE.txt
+if ! [ $silent ]; then
+ echo "" | tee -a $logfile
+ echo "" | tee -a $logfile
+ echo "" | tee -a $logfile
+ cat LICENSE.txt | tee -a $logfile
+ askYN "Do you accept the license terms?"
+fi
+
+# cat README.txt
+if ! [ $silent ]; then
+ cat README.txt | tee -a $logfile
+ askYN "Continue?"
+fi
+
+# dsktune
+if ! [ $silent ]; then
+ bin/slapd/server/dsktune | tee -a $logfile
+ askYN "Continue?"
+fi
+
+# if silent mode, do not run the pre-installer programs
+# otherwise, create a temp file for their use
+if ! [ $silent ]; then
+ inffile=`doMktmp setup`
+ tmpinffile=1
+
+ # put some common answers in the file
+ hostname=`hostname`
+ echo "" | tee -a $logfile
+ echo -n "Hostname to use (default: $hostname) " | tee -a $logfile
+ read ans
+ echo $ans >> $logfile
+ if [ "$ans" ]; then
+ hostname="$ans"
+ fi
+
+ user=nobody
+ group=nobody
+ echo ""
+ echo -n "Server user ID to use (default: $user) " | tee -a $logfile
+ read ans
+ echo $ans >> $logfile
+ if [ "$ans" ]; then
+ user="$ans"
+ fi
+ echo ""
+ echo -n "Server group ID to use (default: $group) " | tee -a $logfile
+ read ans
+ echo $ans >> $logfile
+ if [ "$ans" ]; then
+ group="$ans"
+ fi
+
+ echo '[General]' >> $inffile
+ echo "FullMachineName = $hostname" >> $inffile
+ echo "SuiteSpotUserID = $user" >> $inffile
+ echo "SuiteSpotGroup = $group" >> $inffile
+ echo ServerRoot = `pwd` >> $inffile
+
+ # first, run ds
+ cd bin/slapd/admin/bin
+ ./ns-config -f $inffile -l $logfile || doExit
+ cd ../../../..
+
+ # next, run admin
+ cd bin/admin
+ ./ns-config -f $inffile -l $logfile || doExit
+ cd ../..
+fi
+
+# do the post installers
+silentarg=""
+if ! [ $silent ] ; then
+ silentarg="-s"
+fi
+
+`pwd`/bin/slapd/admin/bin/ns-update $silentarg $myargs -f $inffile | tee -a $logfile || doExit
+
+`pwd`/bin/admin/ns-update $silentarg $myargs -f $inffile | tee -a $logfile || doExit
+
+echo "INFO Finished with setup, logfile is setup/setup.log" | tee -a $logfile
+if [ -f setup/setup.log ] ; then
+ cat $logfile >> setup/setup.log
+else
+ cp $logfile setup/setup.log
+fi
+rm -f $logfile
+
+if [ $tmpinffile ]; then
+ if [ $keepinffile ]; then
+ if [ -f setup/install.inf ]; then
+ cat $inffile >> setup/install.inf
+ else
+ cp $inffile setup/install.inf
+ fi
+ chmod 600 setup/install.inf
+ fi
+ rm -f $inffile
+fi
+exit 0
| 0 |
29ee6d2ec9af5f6d903e02f30a342f8243bcb1de
|
389ds/389-ds-base
|
issue 4585 - backend redesign phase 3c - dbregion test removal (#4665)
* issue 4585 - backend redesign phase 3c - dbregion test removal
* Issue 4585 - backend redesign phase 3c - dbregion test removal
* Issue 4585 - Backend redesign phase 3c - remove import_lock_fd
|
commit 29ee6d2ec9af5f6d903e02f30a342f8243bcb1de
Author: progier389 <[email protected]>
Date: Wed Mar 31 14:59:23 2021 +0200
issue 4585 - backend redesign phase 3c - dbregion test removal (#4665)
* issue 4585 - backend redesign phase 3c - dbregion test removal
* Issue 4585 - backend redesign phase 3c - dbregion test removal
* Issue 4585 - Backend redesign phase 3c - remove import_lock_fd
diff --git a/Makefile.am b/Makefile.am
index 3a29dae63..bb81469ec 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -266,8 +266,8 @@ if RUST_ENABLE
-rm -rf $(abs_top_builddir)/rs
endif
-dberrstrs.h: Makefile
- $(srcdir)/ldap/servers/slapd/mkDBErrStrs.py -i @db_incdir@ -o .
+dberrstrs.h: Makefile $(srcdir)/ldap/servers/slapd/mkDBErrStrs.py $(srcdir)/ldap/servers/slapd/back-ldbm/dbimpl.h
+ $(srcdir)/ldap/servers/slapd/mkDBErrStrs.py -i $(srcdir)/ldap/servers/slapd/back-ldbm -o .
#------------------------
diff --git a/dirsrvtests/tests/data/longduration/db_protect_long_test_reference_1.4.2.12.json b/dirsrvtests/tests/data/longduration/db_protect_long_test_reference_1.4.2.12.json
new file mode 100644
index 000000000..45d0eb2ee
--- /dev/null
+++ b/dirsrvtests/tests/data/longduration/db_protect_long_test_reference_1.4.2.12.json
@@ -0,0 +1,405 @@
+{"Instance OFFLINE OFFLINE _job_nothing + OFFLINE _job_nothing": "OK + OK",
+ "Instance OFFLINE OFFLINE _job_nothing + OFFLINE _job_db2ldifSuffix1": "OK + OK",
+ "Instance OFFLINE OFFLINE _job_nothing + OFFLINE _job_db2ldifSuffix2": "OK + OK",
+ "Instance OFFLINE OFFLINE _job_nothing + OFFLINE _job_ldif2dbSuffix1": "OK + OK",
+ "Instance OFFLINE OFFLINE _job_nothing + OFFLINE _job_ldif2dbSuffix2": "OK + OK",
+ "Instance OFFLINE OFFLINE _job_nothing + OFFLINE _job_db2indexSuffix1": "OK + OK",
+ "Instance OFFLINE OFFLINE _job_nothing + OFFLINE _job_db2indexSuffix2": "OK + OK",
+ "Instance OFFLINE OFFLINE _job_nothing + OFFLINE _job_db2archive": "OK + OK",
+ "Instance OFFLINE OFFLINE _job_nothing + OFFLINE _job_archive2db": "OK + OK",
+ "Instance OFFLINE OFFLINE _job_db2ldifSuffix1 + OFFLINE _job_nothing": "OK + OK",
+ "Instance OFFLINE OFFLINE _job_db2ldifSuffix1 + OFFLINE _job_db2ldifSuffix1": "OK + OK",
+ "Instance OFFLINE OFFLINE _job_db2ldifSuffix1 + OFFLINE _job_db2ldifSuffix2": "OK + OK",
+ "Instance OFFLINE OFFLINE _job_db2ldifSuffix1 + OFFLINE _job_ldif2dbSuffix1": "OK + KO",
+ "Instance OFFLINE OFFLINE _job_db2ldifSuffix1 + OFFLINE _job_ldif2dbSuffix2": "OK + KO",
+ "Instance OFFLINE OFFLINE _job_db2ldifSuffix1 + OFFLINE _job_db2indexSuffix1": "KO + OK",
+ "Instance OFFLINE OFFLINE _job_db2ldifSuffix1 + OFFLINE _job_db2indexSuffix2": "KO + OK",
+ "Instance OFFLINE OFFLINE _job_db2ldifSuffix1 + OFFLINE _job_db2archive": "OK + OK",
+ "Instance OFFLINE OFFLINE _job_db2ldifSuffix1 + OFFLINE _job_archive2db": "OK + KO",
+ "Instance OFFLINE OFFLINE _job_db2ldifSuffix2 + OFFLINE _job_nothing": "OK + OK",
+ "Instance OFFLINE OFFLINE _job_db2ldifSuffix2 + OFFLINE _job_db2ldifSuffix1": "OK + OK",
+ "Instance OFFLINE OFFLINE _job_db2ldifSuffix2 + OFFLINE _job_db2ldifSuffix2": "OK + OK",
+ "Instance OFFLINE OFFLINE _job_db2ldifSuffix2 + OFFLINE _job_ldif2dbSuffix1": "OK + KO",
+ "Instance OFFLINE OFFLINE _job_db2ldifSuffix2 + OFFLINE _job_ldif2dbSuffix2": "OK + KO",
+ "Instance OFFLINE OFFLINE _job_db2ldifSuffix2 + OFFLINE _job_db2indexSuffix1": "KO + OK",
+ "Instance OFFLINE OFFLINE _job_db2ldifSuffix2 + OFFLINE _job_db2indexSuffix2": "KO + OK",
+ "Instance OFFLINE OFFLINE _job_db2ldifSuffix2 + OFFLINE _job_db2archive": "OK + OK",
+ "Instance OFFLINE OFFLINE _job_db2ldifSuffix2 + OFFLINE _job_archive2db": "OK + KO",
+ "Instance OFFLINE OFFLINE _job_ldif2dbSuffix1 + OFFLINE _job_nothing": "OK + OK",
+ "Instance OFFLINE OFFLINE _job_ldif2dbSuffix1 + OFFLINE _job_db2ldifSuffix1": "OK + KO",
+ "Instance OFFLINE OFFLINE _job_ldif2dbSuffix1 + OFFLINE _job_db2ldifSuffix2": "OK + KO",
+ "Instance OFFLINE OFFLINE _job_ldif2dbSuffix1 + OFFLINE _job_ldif2dbSuffix1": "OK + KO",
+ "Instance OFFLINE OFFLINE _job_ldif2dbSuffix1 + OFFLINE _job_ldif2dbSuffix2": "OK + KO",
+ "Instance OFFLINE OFFLINE _job_ldif2dbSuffix1 + OFFLINE _job_db2indexSuffix1": "KO + OK",
+ "Instance OFFLINE OFFLINE _job_ldif2dbSuffix1 + OFFLINE _job_db2indexSuffix2": "KO + OK",
+ "Instance OFFLINE OFFLINE _job_ldif2dbSuffix1 + OFFLINE _job_db2archive": "OK + KO",
+ "Instance OFFLINE OFFLINE _job_ldif2dbSuffix1 + OFFLINE _job_archive2db": "OK + KO",
+ "Instance OFFLINE OFFLINE _job_ldif2dbSuffix2 + OFFLINE _job_nothing": "OK + OK",
+ "Instance OFFLINE OFFLINE _job_ldif2dbSuffix2 + OFFLINE _job_db2ldifSuffix1": "OK + KO",
+ "Instance OFFLINE OFFLINE _job_ldif2dbSuffix2 + OFFLINE _job_db2ldifSuffix2": "OK + KO",
+ "Instance OFFLINE OFFLINE _job_ldif2dbSuffix2 + OFFLINE _job_ldif2dbSuffix1": "OK + KO",
+ "Instance OFFLINE OFFLINE _job_ldif2dbSuffix2 + OFFLINE _job_ldif2dbSuffix2": "OK + KO",
+ "Instance OFFLINE OFFLINE _job_ldif2dbSuffix2 + OFFLINE _job_db2indexSuffix1": "KO + OK",
+ "Instance OFFLINE OFFLINE _job_ldif2dbSuffix2 + OFFLINE _job_db2indexSuffix2": "KO + OK",
+ "Instance OFFLINE OFFLINE _job_ldif2dbSuffix2 + OFFLINE _job_db2archive": "OK + KO",
+ "Instance OFFLINE OFFLINE _job_ldif2dbSuffix2 + OFFLINE _job_archive2db": "OK + KO",
+ "Instance OFFLINE OFFLINE _job_db2indexSuffix1 + OFFLINE _job_nothing": "OK + OK",
+ "Instance OFFLINE OFFLINE _job_db2indexSuffix1 + OFFLINE _job_db2ldifSuffix1": "OK + KO",
+ "Instance OFFLINE OFFLINE _job_db2indexSuffix1 + OFFLINE _job_db2ldifSuffix2": "OK + KO",
+ "Instance OFFLINE OFFLINE _job_db2indexSuffix1 + OFFLINE _job_ldif2dbSuffix1": "OK + KO",
+ "Instance OFFLINE OFFLINE _job_db2indexSuffix1 + OFFLINE _job_ldif2dbSuffix2": "OK + KO",
+ "Instance OFFLINE OFFLINE _job_db2indexSuffix1 + OFFLINE _job_db2indexSuffix1": "OK + KO",
+ "Instance OFFLINE OFFLINE _job_db2indexSuffix1 + OFFLINE _job_db2indexSuffix2": "OK + KO",
+ "Instance OFFLINE OFFLINE _job_db2indexSuffix1 + OFFLINE _job_db2archive": "OK + KO",
+ "Instance OFFLINE OFFLINE _job_db2indexSuffix1 + OFFLINE _job_archive2db": "OK + KO",
+ "Instance OFFLINE OFFLINE _job_db2indexSuffix2 + OFFLINE _job_nothing": "OK + OK",
+ "Instance OFFLINE OFFLINE _job_db2indexSuffix2 + OFFLINE _job_db2ldifSuffix1": "OK + KO",
+ "Instance OFFLINE OFFLINE _job_db2indexSuffix2 + OFFLINE _job_db2ldifSuffix2": "OK + KO",
+ "Instance OFFLINE OFFLINE _job_db2indexSuffix2 + OFFLINE _job_ldif2dbSuffix1": "OK + KO",
+ "Instance OFFLINE OFFLINE _job_db2indexSuffix2 + OFFLINE _job_ldif2dbSuffix2": "OK + KO",
+ "Instance OFFLINE OFFLINE _job_db2indexSuffix2 + OFFLINE _job_db2indexSuffix1": "OK + KO",
+ "Instance OFFLINE OFFLINE _job_db2indexSuffix2 + OFFLINE _job_db2indexSuffix2": "OK + KO",
+ "Instance OFFLINE OFFLINE _job_db2indexSuffix2 + OFFLINE _job_db2archive": "OK + KO",
+ "Instance OFFLINE OFFLINE _job_db2indexSuffix2 + OFFLINE _job_archive2db": "OK + KO",
+ "Instance OFFLINE OFFLINE _job_db2archive + OFFLINE _job_nothing": "OK + OK",
+ "Instance OFFLINE OFFLINE _job_db2archive + OFFLINE _job_db2ldifSuffix1": "OK + OK",
+ "Instance OFFLINE OFFLINE _job_db2archive + OFFLINE _job_db2ldifSuffix2": "OK + OK",
+ "Instance OFFLINE OFFLINE _job_db2archive + OFFLINE _job_ldif2dbSuffix1": "OK + KO",
+ "Instance OFFLINE OFFLINE _job_db2archive + OFFLINE _job_ldif2dbSuffix2": "OK + KO",
+ "Instance OFFLINE OFFLINE _job_db2archive + OFFLINE _job_db2indexSuffix1": "KO + OK",
+ "Instance OFFLINE OFFLINE _job_db2archive + OFFLINE _job_db2indexSuffix2": "KO + OK",
+ "Instance OFFLINE OFFLINE _job_db2archive + OFFLINE _job_db2archive": "OK + OK",
+ "Instance OFFLINE OFFLINE _job_db2archive + OFFLINE _job_archive2db": "OK + KO",
+ "Instance OFFLINE OFFLINE _job_archive2db + OFFLINE _job_nothing": "OK + OK",
+ "Instance OFFLINE OFFLINE _job_archive2db + OFFLINE _job_db2ldifSuffix1": "OK + KO",
+ "Instance OFFLINE OFFLINE _job_archive2db + OFFLINE _job_db2ldifSuffix2": "OK + KO",
+ "Instance OFFLINE OFFLINE _job_archive2db + OFFLINE _job_ldif2dbSuffix1": "OK + KO",
+ "Instance OFFLINE OFFLINE _job_archive2db + OFFLINE _job_ldif2dbSuffix2": "OK + KO",
+ "Instance OFFLINE OFFLINE _job_archive2db + OFFLINE _job_db2indexSuffix1": "KO + OK",
+ "Instance OFFLINE OFFLINE _job_archive2db + OFFLINE _job_db2indexSuffix2": "KO + OK",
+ "Instance OFFLINE OFFLINE _job_archive2db + OFFLINE _job_db2archive": "OK + KO",
+ "Instance OFFLINE OFFLINE _job_archive2db + OFFLINE _job_archive2db": "OK + KO",
+ "Instance ONLINE OFFLINE _job_nothing + OFFLINE _job_nothing": "OK + OK",
+ "Instance ONLINE OFFLINE _job_nothing + OFFLINE _job_db2ldifSuffix1": "OK + KO",
+ "Instance ONLINE OFFLINE _job_nothing + OFFLINE _job_db2ldifSuffix2": "OK + KO",
+ "Instance ONLINE OFFLINE _job_nothing + OFFLINE _job_ldif2dbSuffix1": "OK + KO",
+ "Instance ONLINE OFFLINE _job_nothing + OFFLINE _job_ldif2dbSuffix2": "OK + KO",
+ "Instance ONLINE OFFLINE _job_nothing + OFFLINE _job_db2indexSuffix1": "OK + KO",
+ "Instance ONLINE OFFLINE _job_nothing + OFFLINE _job_db2indexSuffix2": "OK + KO",
+ "Instance ONLINE OFFLINE _job_nothing + OFFLINE _job_db2archive": "OK + KO",
+ "Instance ONLINE OFFLINE _job_nothing + OFFLINE _job_archive2db": "OK + KO",
+ "Instance ONLINE OFFLINE _job_db2ldifSuffix1 + OFFLINE _job_nothing": "KO + OK",
+ "Instance ONLINE OFFLINE _job_db2ldifSuffix1 + OFFLINE _job_db2ldifSuffix1": "KO + KO",
+ "Instance ONLINE OFFLINE _job_db2ldifSuffix1 + OFFLINE _job_db2ldifSuffix2": "KO + KO",
+ "Instance ONLINE OFFLINE _job_db2ldifSuffix1 + OFFLINE _job_ldif2dbSuffix1": "KO + KO",
+ "Instance ONLINE OFFLINE _job_db2ldifSuffix1 + OFFLINE _job_ldif2dbSuffix2": "KO + KO",
+ "Instance ONLINE OFFLINE _job_db2ldifSuffix1 + OFFLINE _job_db2indexSuffix1": "KO + KO",
+ "Instance ONLINE OFFLINE _job_db2ldifSuffix1 + OFFLINE _job_db2indexSuffix2": "KO + KO",
+ "Instance ONLINE OFFLINE _job_db2ldifSuffix1 + OFFLINE _job_db2archive": "KO + KO",
+ "Instance ONLINE OFFLINE _job_db2ldifSuffix1 + OFFLINE _job_archive2db": "KO + KO",
+ "Instance ONLINE OFFLINE _job_db2ldifSuffix2 + OFFLINE _job_nothing": "KO + OK",
+ "Instance ONLINE OFFLINE _job_db2ldifSuffix2 + OFFLINE _job_db2ldifSuffix1": "KO + KO",
+ "Instance ONLINE OFFLINE _job_db2ldifSuffix2 + OFFLINE _job_db2ldifSuffix2": "KO + KO",
+ "Instance ONLINE OFFLINE _job_db2ldifSuffix2 + OFFLINE _job_ldif2dbSuffix1": "KO + KO",
+ "Instance ONLINE OFFLINE _job_db2ldifSuffix2 + OFFLINE _job_ldif2dbSuffix2": "KO + KO",
+ "Instance ONLINE OFFLINE _job_db2ldifSuffix2 + OFFLINE _job_db2indexSuffix1": "KO + KO",
+ "Instance ONLINE OFFLINE _job_db2ldifSuffix2 + OFFLINE _job_db2indexSuffix2": "KO + KO",
+ "Instance ONLINE OFFLINE _job_db2ldifSuffix2 + OFFLINE _job_db2archive": "KO + KO",
+ "Instance ONLINE OFFLINE _job_db2ldifSuffix2 + OFFLINE _job_archive2db": "KO + KO",
+ "Instance ONLINE OFFLINE _job_ldif2dbSuffix1 + OFFLINE _job_nothing": "KO + OK",
+ "Instance ONLINE OFFLINE _job_ldif2dbSuffix1 + OFFLINE _job_db2ldifSuffix1": "KO + KO",
+ "Instance ONLINE OFFLINE _job_ldif2dbSuffix1 + OFFLINE _job_db2ldifSuffix2": "KO + KO",
+ "Instance ONLINE OFFLINE _job_ldif2dbSuffix1 + OFFLINE _job_ldif2dbSuffix1": "KO + KO",
+ "Instance ONLINE OFFLINE _job_ldif2dbSuffix1 + OFFLINE _job_ldif2dbSuffix2": "KO + KO",
+ "Instance ONLINE OFFLINE _job_ldif2dbSuffix1 + OFFLINE _job_db2indexSuffix1": "KO + KO",
+ "Instance ONLINE OFFLINE _job_ldif2dbSuffix1 + OFFLINE _job_db2indexSuffix2": "KO + KO",
+ "Instance ONLINE OFFLINE _job_ldif2dbSuffix1 + OFFLINE _job_db2archive": "KO + KO",
+ "Instance ONLINE OFFLINE _job_ldif2dbSuffix1 + OFFLINE _job_archive2db": "KO + KO",
+ "Instance ONLINE OFFLINE _job_ldif2dbSuffix2 + OFFLINE _job_nothing": "KO + OK",
+ "Instance ONLINE OFFLINE _job_ldif2dbSuffix2 + OFFLINE _job_db2ldifSuffix1": "KO + KO",
+ "Instance ONLINE OFFLINE _job_ldif2dbSuffix2 + OFFLINE _job_db2ldifSuffix2": "KO + KO",
+ "Instance ONLINE OFFLINE _job_ldif2dbSuffix2 + OFFLINE _job_ldif2dbSuffix1": "KO + KO",
+ "Instance ONLINE OFFLINE _job_ldif2dbSuffix2 + OFFLINE _job_ldif2dbSuffix2": "KO + KO",
+ "Instance ONLINE OFFLINE _job_ldif2dbSuffix2 + OFFLINE _job_db2indexSuffix1": "KO + KO",
+ "Instance ONLINE OFFLINE _job_ldif2dbSuffix2 + OFFLINE _job_db2indexSuffix2": "KO + KO",
+ "Instance ONLINE OFFLINE _job_ldif2dbSuffix2 + OFFLINE _job_db2archive": "KO + KO",
+ "Instance ONLINE OFFLINE _job_ldif2dbSuffix2 + OFFLINE _job_archive2db": "KO + KO",
+ "Instance ONLINE OFFLINE _job_db2indexSuffix1 + OFFLINE _job_nothing": "KO + OK",
+ "Instance ONLINE OFFLINE _job_db2indexSuffix1 + OFFLINE _job_db2ldifSuffix1": "KO + KO",
+ "Instance ONLINE OFFLINE _job_db2indexSuffix1 + OFFLINE _job_db2ldifSuffix2": "KO + KO",
+ "Instance ONLINE OFFLINE _job_db2indexSuffix1 + OFFLINE _job_ldif2dbSuffix1": "KO + KO",
+ "Instance ONLINE OFFLINE _job_db2indexSuffix1 + OFFLINE _job_ldif2dbSuffix2": "KO + KO",
+ "Instance ONLINE OFFLINE _job_db2indexSuffix1 + OFFLINE _job_db2indexSuffix1": "KO + KO",
+ "Instance ONLINE OFFLINE _job_db2indexSuffix1 + OFFLINE _job_db2indexSuffix2": "KO + KO",
+ "Instance ONLINE OFFLINE _job_db2indexSuffix1 + OFFLINE _job_db2archive": "KO + KO",
+ "Instance ONLINE OFFLINE _job_db2indexSuffix1 + OFFLINE _job_archive2db": "KO + KO",
+ "Instance ONLINE OFFLINE _job_db2indexSuffix2 + OFFLINE _job_nothing": "KO + OK",
+ "Instance ONLINE OFFLINE _job_db2indexSuffix2 + OFFLINE _job_db2ldifSuffix1": "KO + KO",
+ "Instance ONLINE OFFLINE _job_db2indexSuffix2 + OFFLINE _job_db2ldifSuffix2": "KO + KO",
+ "Instance ONLINE OFFLINE _job_db2indexSuffix2 + OFFLINE _job_ldif2dbSuffix1": "KO + KO",
+ "Instance ONLINE OFFLINE _job_db2indexSuffix2 + OFFLINE _job_ldif2dbSuffix2": "KO + KO",
+ "Instance ONLINE OFFLINE _job_db2indexSuffix2 + OFFLINE _job_db2indexSuffix1": "KO + KO",
+ "Instance ONLINE OFFLINE _job_db2indexSuffix2 + OFFLINE _job_db2indexSuffix2": "KO + KO",
+ "Instance ONLINE OFFLINE _job_db2indexSuffix2 + OFFLINE _job_db2archive": "KO + KO",
+ "Instance ONLINE OFFLINE _job_db2indexSuffix2 + OFFLINE _job_archive2db": "KO + KO",
+ "Instance ONLINE OFFLINE _job_db2archive + OFFLINE _job_nothing": "KO + OK",
+ "Instance ONLINE OFFLINE _job_db2archive + OFFLINE _job_db2ldifSuffix1": "KO + KO",
+ "Instance ONLINE OFFLINE _job_db2archive + OFFLINE _job_db2ldifSuffix2": "KO + KO",
+ "Instance ONLINE OFFLINE _job_db2archive + OFFLINE _job_ldif2dbSuffix1": "KO + KO",
+ "Instance ONLINE OFFLINE _job_db2archive + OFFLINE _job_ldif2dbSuffix2": "KO + KO",
+ "Instance ONLINE OFFLINE _job_db2archive + OFFLINE _job_db2indexSuffix1": "KO + KO",
+ "Instance ONLINE OFFLINE _job_db2archive + OFFLINE _job_db2indexSuffix2": "KO + KO",
+ "Instance ONLINE OFFLINE _job_db2archive + OFFLINE _job_db2archive": "KO + KO",
+ "Instance ONLINE OFFLINE _job_db2archive + OFFLINE _job_archive2db": "KO + KO",
+ "Instance ONLINE OFFLINE _job_archive2db + OFFLINE _job_nothing": "KO + OK",
+ "Instance ONLINE OFFLINE _job_archive2db + OFFLINE _job_db2ldifSuffix1": "KO + KO",
+ "Instance ONLINE OFFLINE _job_archive2db + OFFLINE _job_db2ldifSuffix2": "KO + KO",
+ "Instance ONLINE OFFLINE _job_archive2db + OFFLINE _job_ldif2dbSuffix1": "KO + KO",
+ "Instance ONLINE OFFLINE _job_archive2db + OFFLINE _job_ldif2dbSuffix2": "KO + KO",
+ "Instance ONLINE OFFLINE _job_archive2db + OFFLINE _job_db2indexSuffix1": "KO + KO",
+ "Instance ONLINE OFFLINE _job_archive2db + OFFLINE _job_db2indexSuffix2": "KO + KO",
+ "Instance ONLINE OFFLINE _job_archive2db + OFFLINE _job_db2archive": "KO + KO",
+ "Instance ONLINE OFFLINE _job_archive2db + OFFLINE _job_archive2db": "KO + KO",
+ "Instance ONLINE ONLINE _job_nothing + OFFLINE _job_nothing": "OK + OK",
+ "Instance ONLINE ONLINE _job_nothing + OFFLINE _job_db2ldifSuffix1": "OK + KO",
+ "Instance ONLINE ONLINE _job_nothing + OFFLINE _job_db2ldifSuffix2": "OK + KO",
+ "Instance ONLINE ONLINE _job_nothing + OFFLINE _job_ldif2dbSuffix1": "OK + KO",
+ "Instance ONLINE ONLINE _job_nothing + OFFLINE _job_ldif2dbSuffix2": "OK + KO",
+ "Instance ONLINE ONLINE _job_nothing + OFFLINE _job_db2indexSuffix1": "OK + KO",
+ "Instance ONLINE ONLINE _job_nothing + OFFLINE _job_db2indexSuffix2": "OK + KO",
+ "Instance ONLINE ONLINE _job_nothing + OFFLINE _job_db2archive": "OK + KO",
+ "Instance ONLINE ONLINE _job_nothing + OFFLINE _job_archive2db": "OK + KO",
+ "Instance ONLINE ONLINE _job_db2ldifSuffix1 + OFFLINE _job_nothing": "OK + OK",
+ "Instance ONLINE ONLINE _job_db2ldifSuffix1 + OFFLINE _job_db2ldifSuffix1": "OK + KO",
+ "Instance ONLINE ONLINE _job_db2ldifSuffix1 + OFFLINE _job_db2ldifSuffix2": "OK + KO",
+ "Instance ONLINE ONLINE _job_db2ldifSuffix1 + OFFLINE _job_ldif2dbSuffix1": "OK + KO",
+ "Instance ONLINE ONLINE _job_db2ldifSuffix1 + OFFLINE _job_ldif2dbSuffix2": "OK + KO",
+ "Instance ONLINE ONLINE _job_db2ldifSuffix1 + OFFLINE _job_db2indexSuffix1": "OK + KO",
+ "Instance ONLINE ONLINE _job_db2ldifSuffix1 + OFFLINE _job_db2indexSuffix2": "OK + KO",
+ "Instance ONLINE ONLINE _job_db2ldifSuffix1 + OFFLINE _job_db2archive": "OK + KO",
+ "Instance ONLINE ONLINE _job_db2ldifSuffix1 + OFFLINE _job_archive2db": "OK + KO",
+ "Instance ONLINE ONLINE _job_db2ldifSuffix2 + OFFLINE _job_nothing": "OK + OK",
+ "Instance ONLINE ONLINE _job_db2ldifSuffix2 + OFFLINE _job_db2ldifSuffix1": "OK + KO",
+ "Instance ONLINE ONLINE _job_db2ldifSuffix2 + OFFLINE _job_db2ldifSuffix2": "OK + KO",
+ "Instance ONLINE ONLINE _job_db2ldifSuffix2 + OFFLINE _job_ldif2dbSuffix1": "OK + KO",
+ "Instance ONLINE ONLINE _job_db2ldifSuffix2 + OFFLINE _job_ldif2dbSuffix2": "OK + KO",
+ "Instance ONLINE ONLINE _job_db2ldifSuffix2 + OFFLINE _job_db2indexSuffix1": "OK + KO",
+ "Instance ONLINE ONLINE _job_db2ldifSuffix2 + OFFLINE _job_db2indexSuffix2": "OK + KO",
+ "Instance ONLINE ONLINE _job_db2ldifSuffix2 + OFFLINE _job_db2archive": "OK + KO",
+ "Instance ONLINE ONLINE _job_db2ldifSuffix2 + OFFLINE _job_archive2db": "OK + KO",
+ "Instance ONLINE ONLINE _job_ldif2dbSuffix1 + OFFLINE _job_nothing": "OK + OK",
+ "Instance ONLINE ONLINE _job_ldif2dbSuffix1 + OFFLINE _job_db2ldifSuffix1": "OK + KO",
+ "Instance ONLINE ONLINE _job_ldif2dbSuffix1 + OFFLINE _job_db2ldifSuffix2": "OK + KO",
+ "Instance ONLINE ONLINE _job_ldif2dbSuffix1 + OFFLINE _job_ldif2dbSuffix1": "OK + KO",
+ "Instance ONLINE ONLINE _job_ldif2dbSuffix1 + OFFLINE _job_ldif2dbSuffix2": "OK + KO",
+ "Instance ONLINE ONLINE _job_ldif2dbSuffix1 + OFFLINE _job_db2indexSuffix1": "OK + KO",
+ "Instance ONLINE ONLINE _job_ldif2dbSuffix1 + OFFLINE _job_db2indexSuffix2": "OK + KO",
+ "Instance ONLINE ONLINE _job_ldif2dbSuffix1 + OFFLINE _job_db2archive": "OK + KO",
+ "Instance ONLINE ONLINE _job_ldif2dbSuffix1 + OFFLINE _job_archive2db": "OK + KO",
+ "Instance ONLINE ONLINE _job_ldif2dbSuffix2 + OFFLINE _job_nothing": "OK + OK",
+ "Instance ONLINE ONLINE _job_ldif2dbSuffix2 + OFFLINE _job_db2ldifSuffix1": "OK + KO",
+ "Instance ONLINE ONLINE _job_ldif2dbSuffix2 + OFFLINE _job_db2ldifSuffix2": "OK + KO",
+ "Instance ONLINE ONLINE _job_ldif2dbSuffix2 + OFFLINE _job_ldif2dbSuffix1": "OK + KO",
+ "Instance ONLINE ONLINE _job_ldif2dbSuffix2 + OFFLINE _job_ldif2dbSuffix2": "OK + KO",
+ "Instance ONLINE ONLINE _job_ldif2dbSuffix2 + OFFLINE _job_db2indexSuffix1": "OK + KO",
+ "Instance ONLINE ONLINE _job_ldif2dbSuffix2 + OFFLINE _job_db2indexSuffix2": "OK + KO",
+ "Instance ONLINE ONLINE _job_ldif2dbSuffix2 + OFFLINE _job_db2archive": "OK + KO",
+ "Instance ONLINE ONLINE _job_ldif2dbSuffix2 + OFFLINE _job_archive2db": "OK + KO",
+ "Instance ONLINE ONLINE _job_db2indexSuffix1 + OFFLINE _job_nothing": "OK + OK",
+ "Instance ONLINE ONLINE _job_db2indexSuffix1 + OFFLINE _job_db2ldifSuffix1": "OK + KO",
+ "Instance ONLINE ONLINE _job_db2indexSuffix1 + OFFLINE _job_db2ldifSuffix2": "OK + KO",
+ "Instance ONLINE ONLINE _job_db2indexSuffix1 + OFFLINE _job_ldif2dbSuffix1": "OK + KO",
+ "Instance ONLINE ONLINE _job_db2indexSuffix1 + OFFLINE _job_ldif2dbSuffix2": "OK + KO",
+ "Instance ONLINE ONLINE _job_db2indexSuffix1 + OFFLINE _job_db2indexSuffix1": "OK + KO",
+ "Instance ONLINE ONLINE _job_db2indexSuffix1 + OFFLINE _job_db2indexSuffix2": "OK + KO",
+ "Instance ONLINE ONLINE _job_db2indexSuffix1 + OFFLINE _job_db2archive": "OK + KO",
+ "Instance ONLINE ONLINE _job_db2indexSuffix1 + OFFLINE _job_archive2db": "OK + KO",
+ "Instance ONLINE ONLINE _job_db2indexSuffix2 + OFFLINE _job_nothing": "OK + OK",
+ "Instance ONLINE ONLINE _job_db2indexSuffix2 + OFFLINE _job_db2ldifSuffix1": "OK + KO",
+ "Instance ONLINE ONLINE _job_db2indexSuffix2 + OFFLINE _job_db2ldifSuffix2": "OK + KO",
+ "Instance ONLINE ONLINE _job_db2indexSuffix2 + OFFLINE _job_ldif2dbSuffix1": "OK + KO",
+ "Instance ONLINE ONLINE _job_db2indexSuffix2 + OFFLINE _job_ldif2dbSuffix2": "OK + KO",
+ "Instance ONLINE ONLINE _job_db2indexSuffix2 + OFFLINE _job_db2indexSuffix1": "OK + KO",
+ "Instance ONLINE ONLINE _job_db2indexSuffix2 + OFFLINE _job_db2indexSuffix2": "OK + KO",
+ "Instance ONLINE ONLINE _job_db2indexSuffix2 + OFFLINE _job_db2archive": "OK + KO",
+ "Instance ONLINE ONLINE _job_db2indexSuffix2 + OFFLINE _job_archive2db": "OK + KO",
+ "Instance ONLINE ONLINE _job_db2archive + OFFLINE _job_nothing": "OK + OK",
+ "Instance ONLINE ONLINE _job_db2archive + OFFLINE _job_db2ldifSuffix1": "OK + KO",
+ "Instance ONLINE ONLINE _job_db2archive + OFFLINE _job_db2ldifSuffix2": "OK + KO",
+ "Instance ONLINE ONLINE _job_db2archive + OFFLINE _job_ldif2dbSuffix1": "OK + KO",
+ "Instance ONLINE ONLINE _job_db2archive + OFFLINE _job_ldif2dbSuffix2": "OK + KO",
+ "Instance ONLINE ONLINE _job_db2archive + OFFLINE _job_db2indexSuffix1": "OK + KO",
+ "Instance ONLINE ONLINE _job_db2archive + OFFLINE _job_db2indexSuffix2": "OK + KO",
+ "Instance ONLINE ONLINE _job_db2archive + OFFLINE _job_db2archive": "KO + KO",
+ "Instance ONLINE ONLINE _job_db2archive + OFFLINE _job_archive2db": "KO + KO",
+ "Instance ONLINE ONLINE _job_archive2db + OFFLINE _job_nothing": "KO + OK",
+ "Instance ONLINE ONLINE _job_archive2db + OFFLINE _job_db2ldifSuffix1": "KO + KO",
+ "Instance ONLINE ONLINE _job_archive2db + OFFLINE _job_db2ldifSuffix2": "KO + KO",
+ "Instance ONLINE ONLINE _job_archive2db + OFFLINE _job_ldif2dbSuffix1": "KO + KO",
+ "Instance ONLINE ONLINE _job_archive2db + OFFLINE _job_ldif2dbSuffix2": "KO + KO",
+ "Instance ONLINE ONLINE _job_archive2db + OFFLINE _job_db2indexSuffix1": "KO + KO",
+ "Instance ONLINE ONLINE _job_archive2db + OFFLINE _job_db2indexSuffix2": "KO + KO",
+ "Instance ONLINE ONLINE _job_archive2db + OFFLINE _job_db2archive": "KO + KO",
+ "Instance ONLINE ONLINE _job_archive2db + OFFLINE _job_archive2db": "KO + KO",
+ "Instance ONLINE OFFLINE _job_nothing + ONLINE _job_nothing": "OK + OK",
+ "Instance ONLINE OFFLINE _job_nothing + ONLINE _job_db2ldifSuffix1": "OK + KO",
+ "Instance ONLINE OFFLINE _job_nothing + ONLINE _job_db2ldifSuffix2": "OK + KO",
+ "Instance ONLINE OFFLINE _job_nothing + ONLINE _job_ldif2dbSuffix1": "OK + KO",
+ "Instance ONLINE OFFLINE _job_nothing + ONLINE _job_ldif2dbSuffix2": "OK + KO",
+ "Instance ONLINE OFFLINE _job_nothing + ONLINE _job_db2indexSuffix1": "OK + KO",
+ "Instance ONLINE OFFLINE _job_nothing + ONLINE _job_db2indexSuffix2": "OK + KO",
+ "Instance ONLINE OFFLINE _job_nothing + ONLINE _job_db2archive": "OK + KO",
+ "Instance ONLINE OFFLINE _job_nothing + ONLINE _job_archive2db": "OK + KO",
+ "Instance ONLINE OFFLINE _job_db2ldifSuffix1 + ONLINE _job_nothing": "KO + OK",
+ "Instance ONLINE OFFLINE _job_db2ldifSuffix1 + ONLINE _job_db2ldifSuffix1": "KO + KO",
+ "Instance ONLINE OFFLINE _job_db2ldifSuffix1 + ONLINE _job_db2ldifSuffix2": "KO + KO",
+ "Instance ONLINE OFFLINE _job_db2ldifSuffix1 + ONLINE _job_ldif2dbSuffix1": "KO + KO",
+ "Instance ONLINE OFFLINE _job_db2ldifSuffix1 + ONLINE _job_ldif2dbSuffix2": "KO + KO",
+ "Instance ONLINE OFFLINE _job_db2ldifSuffix1 + ONLINE _job_db2indexSuffix1": "KO + KO",
+ "Instance ONLINE OFFLINE _job_db2ldifSuffix1 + ONLINE _job_db2indexSuffix2": "KO + KO",
+ "Instance ONLINE OFFLINE _job_db2ldifSuffix1 + ONLINE _job_db2archive": "KO + KO",
+ "Instance ONLINE OFFLINE _job_db2ldifSuffix1 + ONLINE _job_archive2db": "KO + KO",
+ "Instance ONLINE OFFLINE _job_db2ldifSuffix2 + ONLINE _job_nothing": "KO + OK",
+ "Instance ONLINE OFFLINE _job_db2ldifSuffix2 + ONLINE _job_db2ldifSuffix1": "KO + KO",
+ "Instance ONLINE OFFLINE _job_db2ldifSuffix2 + ONLINE _job_db2ldifSuffix2": "KO + KO",
+ "Instance ONLINE OFFLINE _job_db2ldifSuffix2 + ONLINE _job_ldif2dbSuffix1": "KO + KO",
+ "Instance ONLINE OFFLINE _job_db2ldifSuffix2 + ONLINE _job_ldif2dbSuffix2": "KO + KO",
+ "Instance ONLINE OFFLINE _job_db2ldifSuffix2 + ONLINE _job_db2indexSuffix1": "KO + KO",
+ "Instance ONLINE OFFLINE _job_db2ldifSuffix2 + ONLINE _job_db2indexSuffix2": "KO + KO",
+ "Instance ONLINE OFFLINE _job_db2ldifSuffix2 + ONLINE _job_db2archive": "KO + KO",
+ "Instance ONLINE OFFLINE _job_db2ldifSuffix2 + ONLINE _job_archive2db": "KO + KO",
+ "Instance ONLINE OFFLINE _job_ldif2dbSuffix1 + ONLINE _job_nothing": "KO + OK",
+ "Instance ONLINE OFFLINE _job_ldif2dbSuffix1 + ONLINE _job_db2ldifSuffix1": "KO + KO",
+ "Instance ONLINE OFFLINE _job_ldif2dbSuffix1 + ONLINE _job_db2ldifSuffix2": "KO + KO",
+ "Instance ONLINE OFFLINE _job_ldif2dbSuffix1 + ONLINE _job_ldif2dbSuffix1": "KO + KO",
+ "Instance ONLINE OFFLINE _job_ldif2dbSuffix1 + ONLINE _job_ldif2dbSuffix2": "KO + KO",
+ "Instance ONLINE OFFLINE _job_ldif2dbSuffix1 + ONLINE _job_db2indexSuffix1": "KO + KO",
+ "Instance ONLINE OFFLINE _job_ldif2dbSuffix1 + ONLINE _job_db2indexSuffix2": "KO + KO",
+ "Instance ONLINE OFFLINE _job_ldif2dbSuffix1 + ONLINE _job_db2archive": "KO + KO",
+ "Instance ONLINE OFFLINE _job_ldif2dbSuffix1 + ONLINE _job_archive2db": "KO + KO",
+ "Instance ONLINE OFFLINE _job_ldif2dbSuffix2 + ONLINE _job_nothing": "KO + OK",
+ "Instance ONLINE OFFLINE _job_ldif2dbSuffix2 + ONLINE _job_db2ldifSuffix1": "KO + KO",
+ "Instance ONLINE OFFLINE _job_ldif2dbSuffix2 + ONLINE _job_db2ldifSuffix2": "KO + KO",
+ "Instance ONLINE OFFLINE _job_ldif2dbSuffix2 + ONLINE _job_ldif2dbSuffix1": "KO + KO",
+ "Instance ONLINE OFFLINE _job_ldif2dbSuffix2 + ONLINE _job_ldif2dbSuffix2": "KO + KO",
+ "Instance ONLINE OFFLINE _job_ldif2dbSuffix2 + ONLINE _job_db2indexSuffix1": "KO + KO",
+ "Instance ONLINE OFFLINE _job_ldif2dbSuffix2 + ONLINE _job_db2indexSuffix2": "KO + KO",
+ "Instance ONLINE OFFLINE _job_ldif2dbSuffix2 + ONLINE _job_db2archive": "KO + KO",
+ "Instance ONLINE OFFLINE _job_ldif2dbSuffix2 + ONLINE _job_archive2db": "KO + KO",
+ "Instance ONLINE OFFLINE _job_db2indexSuffix1 + ONLINE _job_nothing": "KO + OK",
+ "Instance ONLINE OFFLINE _job_db2indexSuffix1 + ONLINE _job_db2ldifSuffix1": "KO + KO",
+ "Instance ONLINE OFFLINE _job_db2indexSuffix1 + ONLINE _job_db2ldifSuffix2": "KO + KO",
+ "Instance ONLINE OFFLINE _job_db2indexSuffix1 + ONLINE _job_ldif2dbSuffix1": "KO + KO",
+ "Instance ONLINE OFFLINE _job_db2indexSuffix1 + ONLINE _job_ldif2dbSuffix2": "KO + KO",
+ "Instance ONLINE OFFLINE _job_db2indexSuffix1 + ONLINE _job_db2indexSuffix1": "KO + KO",
+ "Instance ONLINE OFFLINE _job_db2indexSuffix1 + ONLINE _job_db2indexSuffix2": "KO + KO",
+ "Instance ONLINE OFFLINE _job_db2indexSuffix1 + ONLINE _job_db2archive": "KO + KO",
+ "Instance ONLINE OFFLINE _job_db2indexSuffix1 + ONLINE _job_archive2db": "KO + KO",
+ "Instance ONLINE OFFLINE _job_db2indexSuffix2 + ONLINE _job_nothing": "KO + OK",
+ "Instance ONLINE OFFLINE _job_db2indexSuffix2 + ONLINE _job_db2ldifSuffix1": "KO + KO",
+ "Instance ONLINE OFFLINE _job_db2indexSuffix2 + ONLINE _job_db2ldifSuffix2": "KO + KO",
+ "Instance ONLINE OFFLINE _job_db2indexSuffix2 + ONLINE _job_ldif2dbSuffix1": "KO + KO",
+ "Instance ONLINE OFFLINE _job_db2indexSuffix2 + ONLINE _job_ldif2dbSuffix2": "KO + KO",
+ "Instance ONLINE OFFLINE _job_db2indexSuffix2 + ONLINE _job_db2indexSuffix1": "KO + KO",
+ "Instance ONLINE OFFLINE _job_db2indexSuffix2 + ONLINE _job_db2indexSuffix2": "KO + KO",
+ "Instance ONLINE OFFLINE _job_db2indexSuffix2 + ONLINE _job_db2archive": "KO + KO",
+ "Instance ONLINE OFFLINE _job_db2indexSuffix2 + ONLINE _job_archive2db": "KO + KO",
+ "Instance ONLINE OFFLINE _job_db2archive + ONLINE _job_nothing": "KO + OK",
+ "Instance ONLINE OFFLINE _job_db2archive + ONLINE _job_db2ldifSuffix1": "KO + KO",
+ "Instance ONLINE OFFLINE _job_db2archive + ONLINE _job_db2ldifSuffix2": "KO + KO",
+ "Instance ONLINE OFFLINE _job_db2archive + ONLINE _job_ldif2dbSuffix1": "KO + KO",
+ "Instance ONLINE OFFLINE _job_db2archive + ONLINE _job_ldif2dbSuffix2": "KO + KO",
+ "Instance ONLINE OFFLINE _job_db2archive + ONLINE _job_db2indexSuffix1": "KO + KO",
+ "Instance ONLINE OFFLINE _job_db2archive + ONLINE _job_db2indexSuffix2": "KO + KO",
+ "Instance ONLINE OFFLINE _job_db2archive + ONLINE _job_db2archive": "KO + KO",
+ "Instance ONLINE OFFLINE _job_db2archive + ONLINE _job_archive2db": "KO + KO",
+ "Instance ONLINE OFFLINE _job_archive2db + ONLINE _job_nothing": "KO + OK",
+ "Instance ONLINE OFFLINE _job_archive2db + ONLINE _job_db2ldifSuffix1": "KO + KO",
+ "Instance ONLINE OFFLINE _job_archive2db + ONLINE _job_db2ldifSuffix2": "KO + KO",
+ "Instance ONLINE OFFLINE _job_archive2db + ONLINE _job_ldif2dbSuffix1": "KO + KO",
+ "Instance ONLINE OFFLINE _job_archive2db + ONLINE _job_ldif2dbSuffix2": "KO + KO",
+ "Instance ONLINE OFFLINE _job_archive2db + ONLINE _job_db2indexSuffix1": "KO + KO",
+ "Instance ONLINE OFFLINE _job_archive2db + ONLINE _job_db2indexSuffix2": "KO + KO",
+ "Instance ONLINE OFFLINE _job_archive2db + ONLINE _job_db2archive": "KO + KO",
+ "Instance ONLINE OFFLINE _job_archive2db + ONLINE _job_archive2db": "KO + KO",
+ "Instance ONLINE ONLINE _job_nothing + ONLINE _job_nothing": "OK + OK",
+ "Instance ONLINE ONLINE _job_nothing + ONLINE _job_db2ldifSuffix1": "OK + KO",
+ "Instance ONLINE ONLINE _job_nothing + ONLINE _job_db2ldifSuffix2": "OK + KO",
+ "Instance ONLINE ONLINE _job_nothing + ONLINE _job_ldif2dbSuffix1": "OK + KO",
+ "Instance ONLINE ONLINE _job_nothing + ONLINE _job_ldif2dbSuffix2": "OK + KO",
+ "Instance ONLINE ONLINE _job_nothing + ONLINE _job_db2indexSuffix1": "OK + KO",
+ "Instance ONLINE ONLINE _job_nothing + ONLINE _job_db2indexSuffix2": "OK + KO",
+ "Instance ONLINE ONLINE _job_nothing + ONLINE _job_db2archive": "OK + KO",
+ "Instance ONLINE ONLINE _job_nothing + ONLINE _job_archive2db": "OK + KO",
+ "Instance ONLINE ONLINE _job_db2ldifSuffix1 + ONLINE _job_nothing": "KO + OK",
+ "Instance ONLINE ONLINE _job_db2ldifSuffix1 + ONLINE _job_db2ldifSuffix1": "KO + KO",
+ "Instance ONLINE ONLINE _job_db2ldifSuffix1 + ONLINE _job_db2ldifSuffix2": "KO + KO",
+ "Instance ONLINE ONLINE _job_db2ldifSuffix1 + ONLINE _job_ldif2dbSuffix1": "KO + KO",
+ "Instance ONLINE ONLINE _job_db2ldifSuffix1 + ONLINE _job_ldif2dbSuffix2": "KO + KO",
+ "Instance ONLINE ONLINE _job_db2ldifSuffix1 + ONLINE _job_db2indexSuffix1": "KO + KO",
+ "Instance ONLINE ONLINE _job_db2ldifSuffix1 + ONLINE _job_db2indexSuffix2": "KO + KO",
+ "Instance ONLINE ONLINE _job_db2ldifSuffix1 + ONLINE _job_db2archive": "KO + KO",
+ "Instance ONLINE ONLINE _job_db2ldifSuffix1 + ONLINE _job_archive2db": "KO + KO",
+ "Instance ONLINE ONLINE _job_db2ldifSuffix2 + ONLINE _job_nothing": "KO + OK",
+ "Instance ONLINE ONLINE _job_db2ldifSuffix2 + ONLINE _job_db2ldifSuffix1": "KO + KO",
+ "Instance ONLINE ONLINE _job_db2ldifSuffix2 + ONLINE _job_db2ldifSuffix2": "KO + KO",
+ "Instance ONLINE ONLINE _job_db2ldifSuffix2 + ONLINE _job_ldif2dbSuffix1": "KO + KO",
+ "Instance ONLINE ONLINE _job_db2ldifSuffix2 + ONLINE _job_ldif2dbSuffix2": "KO + KO",
+ "Instance ONLINE ONLINE _job_db2ldifSuffix2 + ONLINE _job_db2indexSuffix1": "KO + KO",
+ "Instance ONLINE ONLINE _job_db2ldifSuffix2 + ONLINE _job_db2indexSuffix2": "KO + KO",
+ "Instance ONLINE ONLINE _job_db2ldifSuffix2 + ONLINE _job_db2archive": "KO + KO",
+ "Instance ONLINE ONLINE _job_db2ldifSuffix2 + ONLINE _job_archive2db": "KO + KO",
+ "Instance ONLINE ONLINE _job_ldif2dbSuffix1 + ONLINE _job_nothing": "KO + OK",
+ "Instance ONLINE ONLINE _job_ldif2dbSuffix1 + ONLINE _job_db2ldifSuffix1": "KO + KO",
+ "Instance ONLINE ONLINE _job_ldif2dbSuffix1 + ONLINE _job_db2ldifSuffix2": "KO + KO",
+ "Instance ONLINE ONLINE _job_ldif2dbSuffix1 + ONLINE _job_ldif2dbSuffix1": "KO + KO",
+ "Instance ONLINE ONLINE _job_ldif2dbSuffix1 + ONLINE _job_ldif2dbSuffix2": "KO + KO",
+ "Instance ONLINE ONLINE _job_ldif2dbSuffix1 + ONLINE _job_db2indexSuffix1": "KO + KO",
+ "Instance ONLINE ONLINE _job_ldif2dbSuffix1 + ONLINE _job_db2indexSuffix2": "KO + KO",
+ "Instance ONLINE ONLINE _job_ldif2dbSuffix1 + ONLINE _job_db2archive": "KO + KO",
+ "Instance ONLINE ONLINE _job_ldif2dbSuffix1 + ONLINE _job_archive2db": "KO + KO",
+ "Instance ONLINE ONLINE _job_ldif2dbSuffix2 + ONLINE _job_nothing": "KO + OK",
+ "Instance ONLINE ONLINE _job_ldif2dbSuffix2 + ONLINE _job_db2ldifSuffix1": "KO + KO",
+ "Instance ONLINE ONLINE _job_ldif2dbSuffix2 + ONLINE _job_db2ldifSuffix2": "KO + KO",
+ "Instance ONLINE ONLINE _job_ldif2dbSuffix2 + ONLINE _job_ldif2dbSuffix1": "KO + KO",
+ "Instance ONLINE ONLINE _job_ldif2dbSuffix2 + ONLINE _job_ldif2dbSuffix2": "KO + KO",
+ "Instance ONLINE ONLINE _job_ldif2dbSuffix2 + ONLINE _job_db2indexSuffix1": "KO + KO",
+ "Instance ONLINE ONLINE _job_ldif2dbSuffix2 + ONLINE _job_db2indexSuffix2": "KO + KO",
+ "Instance ONLINE ONLINE _job_ldif2dbSuffix2 + ONLINE _job_db2archive": "KO + KO",
+ "Instance ONLINE ONLINE _job_ldif2dbSuffix2 + ONLINE _job_archive2db": "KO + KO",
+ "Instance ONLINE ONLINE _job_db2indexSuffix1 + ONLINE _job_nothing": "KO + OK",
+ "Instance ONLINE ONLINE _job_db2indexSuffix1 + ONLINE _job_db2ldifSuffix1": "KO + KO",
+ "Instance ONLINE ONLINE _job_db2indexSuffix1 + ONLINE _job_db2ldifSuffix2": "KO + KO",
+ "Instance ONLINE ONLINE _job_db2indexSuffix1 + ONLINE _job_ldif2dbSuffix1": "KO + KO",
+ "Instance ONLINE ONLINE _job_db2indexSuffix1 + ONLINE _job_ldif2dbSuffix2": "KO + KO",
+ "Instance ONLINE ONLINE _job_db2indexSuffix1 + ONLINE _job_db2indexSuffix1": "KO + KO",
+ "Instance ONLINE ONLINE _job_db2indexSuffix1 + ONLINE _job_db2indexSuffix2": "KO + KO",
+ "Instance ONLINE ONLINE _job_db2indexSuffix1 + ONLINE _job_db2archive": "KO + KO",
+ "Instance ONLINE ONLINE _job_db2indexSuffix1 + ONLINE _job_archive2db": "KO + KO",
+ "Instance ONLINE ONLINE _job_db2indexSuffix2 + ONLINE _job_nothing": "KO + OK",
+ "Instance ONLINE ONLINE _job_db2indexSuffix2 + ONLINE _job_db2ldifSuffix1": "KO + KO",
+ "Instance ONLINE ONLINE _job_db2indexSuffix2 + ONLINE _job_db2ldifSuffix2": "KO + KO",
+ "Instance ONLINE ONLINE _job_db2indexSuffix2 + ONLINE _job_ldif2dbSuffix1": "KO + KO",
+ "Instance ONLINE ONLINE _job_db2indexSuffix2 + ONLINE _job_ldif2dbSuffix2": "KO + KO",
+ "Instance ONLINE ONLINE _job_db2indexSuffix2 + ONLINE _job_db2indexSuffix1": "KO + KO",
+ "Instance ONLINE ONLINE _job_db2indexSuffix2 + ONLINE _job_db2indexSuffix2": "KO + KO",
+ "Instance ONLINE ONLINE _job_db2indexSuffix2 + ONLINE _job_db2archive": "KO + KO",
+ "Instance ONLINE ONLINE _job_db2indexSuffix2 + ONLINE _job_archive2db": "KO + KO",
+ "Instance ONLINE ONLINE _job_db2archive + ONLINE _job_nothing": "KO + OK",
+ "Instance ONLINE ONLINE _job_db2archive + ONLINE _job_db2ldifSuffix1": "KO + KO",
+ "Instance ONLINE ONLINE _job_db2archive + ONLINE _job_db2ldifSuffix2": "KO + KO",
+ "Instance ONLINE ONLINE _job_db2archive + ONLINE _job_ldif2dbSuffix1": "KO + KO",
+ "Instance ONLINE ONLINE _job_db2archive + ONLINE _job_ldif2dbSuffix2": "KO + KO",
+ "Instance ONLINE ONLINE _job_db2archive + ONLINE _job_db2indexSuffix1": "KO + KO",
+ "Instance ONLINE ONLINE _job_db2archive + ONLINE _job_db2indexSuffix2": "KO + KO",
+ "Instance ONLINE ONLINE _job_db2archive + ONLINE _job_db2archive": "KO + KO",
+ "Instance ONLINE ONLINE _job_db2archive + ONLINE _job_archive2db": "KO + KO",
+ "Instance ONLINE ONLINE _job_archive2db + ONLINE _job_nothing": "KO + OK",
+ "Instance ONLINE ONLINE _job_archive2db + ONLINE _job_db2ldifSuffix1": "KO + KO",
+ "Instance ONLINE ONLINE _job_archive2db + ONLINE _job_db2ldifSuffix2": "KO + KO",
+ "Instance ONLINE ONLINE _job_archive2db + ONLINE _job_ldif2dbSuffix1": "KO + KO",
+ "Instance ONLINE ONLINE _job_archive2db + ONLINE _job_ldif2dbSuffix2": "KO + KO",
+ "Instance ONLINE ONLINE _job_archive2db + ONLINE _job_db2indexSuffix1": "KO + KO",
+ "Instance ONLINE ONLINE _job_archive2db + ONLINE _job_db2indexSuffix2": "KO + KO",
+ "Instance ONLINE ONLINE _job_archive2db + ONLINE _job_db2archive": "KO + KO",
+ "Instance ONLINE ONLINE _job_archive2db + ONLINE _job_archive2db": "KO + KO"}
diff --git a/dirsrvtests/tests/longduration/db_protect_long_test.py b/dirsrvtests/tests/longduration/db_protect_long_test.py
new file mode 100644
index 000000000..333e74c2b
--- /dev/null
+++ b/dirsrvtests/tests/longduration/db_protect_long_test.py
@@ -0,0 +1,372 @@
+#nunn --- BEGIN COPYRIGHT BLOCK ---
+# Copyright (C) 2021 Red Hat, Inc.
+# All rights reserved.
+#
+# License: GPL (version 3 or any later version).
+# See LICENSE for details.
+# --- END COPYRIGHT BLOCK ---
+
+"""
+Will Verify which tasks (Import/Export/Backup/Restore/Reindex (Offline/Online)) may run at the same time
+"""
+
+import os
+import logging
+import pytest
+import time
+import enum
+import shutil
+import json
+from threading import Thread, get_ident as get_tid
+from enum import auto as EnumAuto
+from lib389.topologies import topology_st as topo
+from lib389.dbgen import dbgen_users
+from lib389.backend import Backend
+from lib389.properties import ( TASK_WAIT )
+
+
+#pytestmark = pytest.mark.tier1
+
+NBUSERS=15000 # Should have enough user so that jobs spends at least a few seconds
+BASE_SUFFIX="dc=i4585,dc=test"
+# result reference file got from version 1.4.2.12
+JSONREFNAME = os.path.join(os.path.dirname(__file__), '../data/longduration/db_protect_long_test_reference_1.4.2.12.json')
+
+
+#Results
+OK="OK"
+KO="KO"
+BUSY="KO" # So far, no diffrence between failure and failure due to busy
+
+# data associated with both suffixes (i.e DN, bakend name, ldif files, and backup directory )
+_suffix1_info={ 'index': 1 }
+_suffix2_info={ 'index': 2 }
+# Threads result
+_result = {}
+# Threads
+_threads = {}
+
+#Mode
+OFFLINE="OFFLINE"
+ONLINE="ONLINE"
+
+DEBUGGING = os.getenv("DEBUGGING", default=False)
+if DEBUGGING:
+ logging.getLogger(__name__).setLevel(logging.DEBUG)
+else:
+ logging.getLogger(__name__).setLevel(logging.INFO)
+log = logging.getLogger(__name__)
+
+
+
+"""
+ create suffix bakend, generate ldif, populate the bakend, get a backup
+ and initialize suffix_info
+ Note: suffix_info['index'] must be set when calling the function
+"""
+def _init_suffix(topo, suffix_info):
+ index = suffix_info['index']
+ # Init suffix_info values
+ suffix = f'dc=suffix{index},' + BASE_SUFFIX
+ suffix_info['suffix'] = suffix
+ ldif_dir = topo.standalone.get_ldif_dir()
+ bak_dir = topo.standalone.get_bak_dir()
+ suffix_info['name'] = f'suffix{index}'
+ suffix_info['rbak'] = bak_dir + f'/r_i4585.bak' # For archive2db
+ suffix_info['wbak'] = bak_dir + f'/w_i4585.bak' # For db2archive
+ suffix_info['rldif'] = ldif_dir + f'/r_suffix{index}.ldif' # For ldif2db
+ suffix_info['wldif'] = ldif_dir + f'/w_suffix{index}.ldif' # For db2ldif
+ # create suffix backend
+ be = Backend(topo.standalone)
+ be.create(properties={'cn': suffix_info['name'], 'nsslapd-suffix': suffix})
+ # Generate rldif ldif file, populate backend, and generate rbak archive
+ dbgen_users(topo.standalone, NBUSERS, suffix_info['rldif'], suffix)
+ # Populate the backend
+ result = _run_ldif2db(topo, ONLINE, suffix_info)
+ assert( result == 0 )
+ # Generate archive (only second suffix is created)
+ if index == 2:
+ shutil.rmtree(suffix_info['rbak'], ignore_errors=True)
+ result = _job_db2archive(topo, ONLINE, suffix_info['rbak'])
+ assert( result == 0 )
+
+
+"""
+ determine json file name
+"""
+def _get_json_filename(topo):
+ return f"{topo.standalone.ds_paths.prefix}/var/log/dirsrv/test_db_protect.json"
+
+
+"""
+ Compare two results pairs
+ Note: In the Success + Failure case, do not care about the order
+ because of the threads race
+"""
+def is_same_result(res1, res2):
+ if res1 == res2:
+ return True
+ if res1 == "OK + KO" and res2 == "KO + OK":
+ return True
+ if res2 == "OK + KO" and res1 == "KO + OK":
+ return True
+ return False
+
+
+"""
+ Run a job within a dedicated thread
+"""
+def _worker(idx, job, topo, mode):
+ log.info(f"Thread {idx} id: {get_tid()} started {mode} job {job.__name__}")
+ rc0 = None
+ rc = None
+ try:
+ rc = job(topo, mode)
+ rc0 = rc
+ if mode == ONLINE:
+ if rc == 0:
+ rc = OK
+ else:
+ rc = KO
+ else:
+ if rc:
+ rc = OK
+ else:
+ rc = KO
+ except Exception as err:
+ log.info(f"Thread {idx} ended {mode} job {job.__name__} with exception {err}")
+ log.info(err, exc_info=True)
+ rc = KO
+ _result[idx] = rc
+ log.info(f"Thread {idx} ended {mode} job {job.__name__} with result {rc} (was {rc0})")
+
+"""
+ Create a new thread to run a job
+"""
+def _start_work(*args):
+ idx = args[0]
+ _threads[idx] = Thread(target=_worker, args=args)
+ log.info(f"created Thread {idx} id: {_threads[idx].ident}")
+ _result[idx] = None
+ _threads[idx].start()
+
+
+"""
+ Wait until thread worker has finished then return the result
+"""
+def _wait4work(idx):
+ _threads[idx].join()
+ log.info(f"completed wait on thread {idx} id: {_threads[idx].ident} result is {_result[idx]}")
+ return _result[idx]
+
+
+"""
+ Tests all pairs of jobs and check that we got the expected result
+ (first job is running in mode1 (ONLINE/OFFLINE)mode)
+ (second job is running in mode2 (ONLINE/OFFLINE)mode)
+"""
+def _check_all_job_pairs(topo, state, mode1, mode2, result):
+ """
+ Checks all couple of jobs with mode1 online/offline for first job and mode2 for second job
+ """
+ for idx1, job1 in enumerate(job_list):
+ for idx2, job2 in enumerate(job_list):
+ log.info(f"Testing {mode1} {job1} + {mode2} {job2}")
+ _start_work("job1", job1, topo, mode1)
+ # Wait enough to insure job1 is started
+ time.sleep(0.5)
+ _start_work("job2", job2, topo, mode2)
+ res1 = _wait4work("job1")
+ res2 = _wait4work("job2")
+ key = f"Instance {state} {mode1} {job1.__name__} + {mode2} {job2.__name__}"
+ val = f"{res1} + {res2}"
+ result[key] = val
+ log.info(f"{key} ==> {val}")
+
+
+"""
+ ********* JOBS DEFINITION **********
+"""
+
+def _run_ldif2db(topo, mode, suffix_info):
+ if mode == OFFLINE:
+ return topo.standalone.ldif2db(suffix_info['name'], None, None, None, suffix_info['rldif'])
+ else:
+ return topo.standalone.tasks.importLDIF(benamebase=suffix_info['name'], input_file=suffix_info['rldif'], args={TASK_WAIT: True})
+
+def _job_ldif2dbSuffix1(topo, mode):
+ return _run_ldif2db(topo, mode, _suffix1_info)
+
+def _job_ldif2dbSuffix2(topo, mode):
+ return _run_ldif2db(topo, mode, _suffix2_info)
+
+
+def _run_db2ldif(topo, mode, suffix_info):
+ if os.path.exists(suffix_info['wldif']):
+ os.remove(suffix_info['wldif'])
+ if mode == OFFLINE:
+ return topo.standalone.db2ldif(suffix_info['name'], None, None, False, False, suffix_info['wldif'])
+ else:
+ return topo.standalone.tasks.exportLDIF(benamebase=suffix_info['name'], output_file=suffix_info['wldif'], args={TASK_WAIT: True})
+
+def _job_db2ldifSuffix1(topo, mode):
+ return _run_db2ldif(topo, mode, _suffix1_info)
+
+def _job_db2ldifSuffix2(topo, mode):
+ return _run_db2ldif(topo, mode, _suffix2_info)
+
+
+def _run_db2index(topo, mode, suffix_info):
+ if mode == OFFLINE:
+ return topo.standalone.db2index(bename=suffix_info['name'], attrs=['cn'])
+ else:
+ return topo.standalone.tasks.reindex(topo.standalone, benamebase=suffix_info['name'], attrname='cn', args={TASK_WAIT: True})
+
+def _job_db2indexSuffix1(topo, mode):
+ return _run_db2index(topo, mode, _suffix1_info)
+
+def _job_db2indexSuffix2(topo, mode):
+ return _run_db2index(topo, mode, _suffix2_info)
+
+
+def _job_db2archive(topo, mode, backup_dir=None):
+ # backup is quite fast solets do it several time to increase chance of having concurrent task
+ if backup_dir is None:
+ backup_dir = _suffix1_info['wbak']
+ shutil.rmtree(backup_dir, ignore_errors=True)
+ if mode == OFFLINE:
+ for i in range(3):
+ rc = topo.standalone.db2bak(backup_dir)
+ if not rc:
+ return False
+ return True
+ else:
+ for i in range(3):
+ rc = topo.standalone.tasks.db2bak(backup_dir=backup_dir, args={TASK_WAIT: True})
+ if (rc != 0):
+ return rc
+ return 0
+
+def _job_archive2db(topo, mode, backup_dir=None):
+ # restore is quite fast solets do it several time to increase chance of having concurrent task
+ if backup_dir is None:
+ backup_dir = _suffix1_info['rbak']
+ if mode == OFFLINE:
+ for i in range(3):
+ rc = topo.standalone.bak2db(backup_dir)
+ if not rc:
+ return False
+ return True
+ else:
+ for i in range(3):
+ rc = topo.standalone.tasks.bak2db(backup_dir=backup_dir, args={TASK_WAIT: True})
+ if (rc != 0):
+ return rc
+ return 0
+
+def _job_nothing(topo, mode):
+ if mode == OFFLINE:
+ return True
+ return 0
+
+"""
+ ********* END OF JOBS DEFINITION **********
+"""
+
+
+# job_list must be defined after the job get defined
+job_list = [ _job_nothing, _job_db2ldifSuffix1, _job_db2ldifSuffix2, _job_ldif2dbSuffix1, _job_ldif2dbSuffix2,
+ _job_db2indexSuffix1, _job_db2indexSuffix2, _job_db2archive, _job_archive2db ]
+
+
+
+"""
+ Beware this test is very long (several hours)
+ it checks the results when two task (like import/export/reindex/backup/archive are run at the same time)
+ and store the result in a json file
+ the compare with a reference
+"""
+
+def test_db_protect(topo):
+ """
+ Add an index, then import via cn=tasks
+
+ :id: 462bc550-87d6-11eb-9310-482ae39447e5
+ :setup: Standalone Instance
+ :steps:
+ 1. Initialize suffixes
+ 2. Stop server instance
+ 3. Compute results for all couples of jobs in OFFLINE,OFFLINE mode
+ 4. Start server instance
+ 5. Compute results for all couples of jobs in OFFLINE,OFFLINE mode
+ 6. Compute results for all couples of jobs in ONLINE,OFFLINE mode
+ 7. Compute results for all couples of jobs in OFFLINE,ONLINE mode
+ 8. Compute results for all couples of jobs in ONLINE,ONLINE mode
+ 9. Store results in log file and json file
+ 10. Read json reference file
+ 11. Compute the difference between result and reference
+ 12. Logs the differences
+ 13. Assert if differences is not empty
+
+ :expected results:
+ 1. Operation successful
+ 2. Operation successful
+ 3. Operation successful
+ 4. Operation successful
+ 5. Operation successful
+ 6. Operation successful
+ 7. Operation successful
+ 8. Operation successful
+ 9. Operation successful
+ 10. Operation successful
+ 11. Operation successful
+ 12. Operation successful
+ 13. Operation successful
+ """
+ # Step 1: Initialize suffixes
+ _init_suffix(topo, _suffix1_info)
+ _init_suffix(topo, _suffix2_info)
+ result={}
+ # Step 2: Stop server instance
+ topo.standalone.stop()
+ log.info("Server instance is now stopped.")
+ # Step 3: Compute results for all couples of jobs in OFFLINE,OFFLINE mode
+ _check_all_job_pairs(topo, OFFLINE, OFFLINE, OFFLINE, result)
+ # Step 4: Start server instance
+ topo.standalone.start()
+ log.info("Server instance is now started.")
+ # Step 5: Compute results for all couples of jobs in OFFLINE,OFFLINE mode
+ _check_all_job_pairs(topo, ONLINE, OFFLINE, OFFLINE, result)
+ # Step 6: Compute results for all couples of jobs in ONLINE,OFFLINE mode
+ _check_all_job_pairs(topo, ONLINE, ONLINE, OFFLINE, result)
+ # Step 7: Compute results for all couples of jobs in OFFLINE,ONLINE mode
+ _check_all_job_pairs(topo, ONLINE, OFFLINE, ONLINE, result)
+ # Step 8: Compute results for all couples of jobs in ONLINE,ONLINE mode
+ _check_all_job_pairs(topo, ONLINE, ONLINE, ONLINE, result)
+ # Step 9: Logs the results and store the json file
+ for key,val in result.items():
+ log.info(f"{key} ==> {val}")
+ with open(_get_json_filename(topo), "w") as jfile:
+ json.dump(result, jfile)
+ # Step 10: read json reference file
+ with open(JSONREFNAME, "r") as jfile:
+ ref = json.load(jfile)
+ # Step 11: Compute the differences
+ differences={}
+ for key, value in result.items():
+ if key in ref:
+ if not is_same_result(value, ref[key]):
+ differences[key] = ( value, ref[key] )
+ else:
+ differences[key] = ( value, None )
+ for key, value in ref.items():
+ if not key in result:
+ differences[key] = ( None, value )
+ # Step 12: Log the differences
+ log.info(f"difference between result an 1.4.2.12 reference are:")
+ log.info(f" key: (result, reference)")
+ for key, value in differences.items():
+ log.info(f"{key}: {value}")
+ # Step 13: assert if there are differences
+ assert not differences
+
diff --git a/ldap/servers/slapd/back-ldbm/archive.c b/ldap/servers/slapd/back-ldbm/archive.c
index e51a5631d..e129d9672 100644
--- a/ldap/servers/slapd/back-ldbm/archive.c
+++ b/ldap/servers/slapd/back-ldbm/archive.c
@@ -358,7 +358,7 @@ ldbm_back_ldbm2archive(Slapi_PBlock *pb)
}
}
if (0 != MKDIR(directory, SLAPD_DEFAULT_DIR_MODE) && EEXIST != errno) {
- char *msg = dblayer_strerror(errno);
+ const char *msg = dblayer_strerror(errno);
slapi_log_err(SLAPI_LOG_ERR,
"ldbm_back_ldbm2archive", "mkdir(%s) failed; errno %i (%s)\n",
diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c
index f2a62ac24..186c11cc3 100644
--- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c
+++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c
@@ -2017,7 +2017,7 @@ bdb_pre_close(struct ldbminfo *li)
conf = (bdb_config *)li->li_dblayer_config;
bdb_db_env *pEnv = (bdb_db_env *)priv->dblayer_env;
- if (conf->bdb_stop_threads) /* already stopped. do nothing... */
+ if (conf->bdb_stop_threads || !pEnv) /* already stopped. do nothing... */
return;
/* first, see if there are any housekeeping threads running */
diff --git a/ldap/servers/slapd/back-ldbm/dbimpl.c b/ldap/servers/slapd/back-ldbm/dbimpl.c
index d970d6bba..d2c9a9184 100644
--- a/ldap/servers/slapd/back-ldbm/dbimpl.c
+++ b/ldap/servers/slapd/back-ldbm/dbimpl.c
@@ -298,33 +298,30 @@ int dblayer_set_dup_cmp_fn(Slapi_Backend *be, struct attrinfo *a, dbi_dup_cmp_t
return priv->dblayer_set_dup_cmp_fn(a, idx);
}
-char *
+const char *
dblayer_strerror(int error)
{
- switch (error)
- {
- case DBI_RC_SUCCESS:
- return "No error.";
- case DBI_RC_UNSUPPORTED:
- return "Database operation error: Operation not supported.";
- case DBI_RC_BUFFER_SMALL:
- return "Database operation error: Buffer is too small to store the result.";
- case DBI_RC_KEYEXIST:
- return "Database operation error: Key already exists.";
- case DBI_RC_NOTFOUND:
- return "Database operation error: Key not found (or no more keys).";
- case DBI_RC_RUNRECOVERY:
- return "Database operation error: Database recovery is needed.";
- case DBI_RC_RETRY:
- return "Database operation error: Transient error. transaction should be retried.";
- case DBI_RC_OTHER:
- return "Database operation error: Unhandled code. See details in previous error messages.";
- default:
- return "Unexpected error code.";
- }
+ /*
+ * Cannot use slapi_pr_strerror() because dbscan is not linked with
+ * libslapd so lets use dberrstrs.h
+ * And table is small enough that it is not worth to use bsearch;
+ */
+ static const struct {
+ int errcode;
+ const char *errmsg;
+ } errtab[] = {
+#include "dberrstrs.h"
+ { DBI_RC_SUCCESS, "No error." }
+ }, *errpt = errtab;
+
+ do {
+ if (errpt->errcode == error) {
+ return errpt->errmsg;
+ }
+ } while (errpt++ ->errcode);
+ return "Unexpected dbimpl error code";
}
-
int dblayer_cursor_get_count(dbi_cursor_t *cursor, dbi_recno_t *count)
{
dblayer_private *priv;
diff --git a/ldap/servers/slapd/back-ldbm/dbimpl.h b/ldap/servers/slapd/back-ldbm/dbimpl.h
index 6aa5e1059..9c6841fda 100644
--- a/ldap/servers/slapd/back-ldbm/dbimpl.h
+++ b/ldap/servers/slapd/back-ldbm/dbimpl.h
@@ -10,22 +10,27 @@
#define _DBIMPL_H
#include "../slapi-plugin.h"
-/* Temporary wrapup toward libdb */
-#include <db.h>
#define MEM_FOR_DB_PLUGINS (8*(sizeof (long)))
-/* WARNING! dblayer_strerror() should be modified when modifying dbi_error_t */
+/* Note: DBI_RC_ definition are parsed by ../mkDBErrStrs.py to generate
+ * the errormap sorted error message table used by slapi_pr_strerror().
+ * So:
+ * - The error code is important. value should be sorted in increasing order
+ * and should be lesser than SSL_ERROR_BASE (i.e -0x3000)
+ * - The comment format is important as it is used by ../mkDBErrStrs.py (beware to preserve the
+ enum value and the comment value in sync when adding/removing error codes)
+ */
typedef enum {
DBI_RC_SUCCESS,
- DBI_RC_UNSUPPORTED = 389000, /* db plugin does not support the operation */
- DBI_RC_BUFFER_SMALL,
- DBI_RC_KEYEXIST,
- DBI_RC_NOTFOUND,
- DBI_RC_RUNRECOVERY,
- DBI_RC_RETRY,
- DBI_RC_INVALID,
- DBI_RC_OTHER
+ DBI_RC_UNSUPPORTED = -0x3200, /* -12800, Database operation error: Operation not supported. */
+ DBI_RC_BUFFER_SMALL, /* -12799, Database operation error: Buffer is too small to store the result. */
+ DBI_RC_KEYEXIST, /* -12798, Database operation error: Key already exists. */
+ DBI_RC_NOTFOUND, /* -12797, Database operation error: Key not found (or no more keys). */
+ DBI_RC_RUNRECOVERY, /* -12796, Database operation error: Database recovery is needed. */
+ DBI_RC_RETRY, /* -12795, Database operation error: Transient error. transaction should be retried. */
+ DBI_RC_INVALID, /* -12794, Database operation error: Invalid parameter or invalid state. */
+ DBI_RC_OTHER /* -12793, Database operation error: Unhandled Database operation error. See details in previous error messages. */
} dbi_error_t;
@@ -140,7 +145,7 @@ int dblayer_dbi_txn_abort(Slapi_Backend *be, dbi_txn_t *txn);
int dblayer_get_entries_count(Slapi_Backend *be, dbi_db_t *db, int *count);
int dblayer_cursor_get_count(dbi_cursor_t *cursor, dbi_recno_t *count);
char *dblayer_get_db_filename(Slapi_Backend *be, dbi_db_t *db);
-char *dblayer_strerror(int error);
+const char *dblayer_strerror(int error);
const char *dblayer_op2str(dbi_op_t op);
int dblayer_cursor_get_count(dbi_cursor_t *cursor, dbi_recno_t *count);
diff --git a/ldap/servers/slapd/back-ldbm/dblayer.h b/ldap/servers/slapd/back-ldbm/dblayer.h
index e4b8e2819..99acfd1ab 100644
--- a/ldap/servers/slapd/back-ldbm/dblayer.h
+++ b/ldap/servers/slapd/back-ldbm/dblayer.h
@@ -57,17 +57,6 @@
#define DBLAYER_LIB_VERSION_PRE_24 1
#define DBLAYER_LIB_VERSION_POST_24 2
-/* Define constants from DB2.4 when using DB2.3 header file */
-#ifndef DB_TSL_SPINS
-#define DB_TSL_SPINS 21 /* DB: initialize spin count. */
-#endif
-#ifndef DB_REGION_INIT
-#define DB_REGION_INIT 24 /* DB: page-fault regions in create. */
-#endif
-#ifndef DB_REGION_NAME
-#define DB_REGION_NAME 25 /* DB: named regions, no backing file. */
-#endif
-
typedef int dblayer_start_fn_t(struct ldbminfo *li, int flags);
typedef int dblayer_close_fn_t(struct ldbminfo *li, int flags);
typedef int dblayer_instance_start_fn_t(backend *be, int flags);
diff --git a/ldap/servers/slapd/back-ldbm/idl.c b/ldap/servers/slapd/back-ldbm/idl.c
index d45bc23d3..5d38a3372 100644
--- a/ldap/servers/slapd/back-ldbm/idl.c
+++ b/ldap/servers/slapd/back-ldbm/idl.c
@@ -234,7 +234,7 @@ idl_fetch_one(
do {
*err = dblayer_db_op(be, db, txn, DBI_OP_GET, key, &data);
if (0 != *err && DBI_RC_NOTFOUND != *err && DBI_RC_RETRY != *err) {
- char *msg;
+ const char *msg;
if (EPERM == *err && *err != errno) {
slapi_log_err(SLAPI_LOG_ERR,
"idl_fetch_one", "(%s) Database failed to run, "
@@ -414,7 +414,7 @@ idl_store(
rc = dblayer_db_op(be, db, txn, DBI_OP_PUT, key, &data);
if (0 != rc) {
- char *msg;
+ const char *msg;
if (EPERM == rc && rc != errno) {
slapi_log_err(SLAPI_LOG_ERR,
"idl_store", "(%s) Database failed to run, "
@@ -495,7 +495,7 @@ idl_change_first(
dbi_txn_t *txn)
{
int rc;
- char *msg;
+ const char *msg;
/* delete old key block */
rc = dblayer_db_op(be, db, txn, DBI_OP_DEL, bkey, 0);
@@ -591,7 +591,7 @@ idl_old_insert_key(
int *disposition)
{
int i, j, rc = 0;
- char *msg;
+ const char *msg;
IDList *idl, *tmp, *tmp2, *tmp3;
char *kstr;
dbi_val_t k2 = {0};
@@ -1358,7 +1358,7 @@ idl_old_delete_key(
struct attrinfo *a __attribute__((unused)))
{
int i, j, rc;
- char *msg;
+ const char *msg;
IDList *idl, *didl;
dbi_val_t contkey = {0};
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_add.c b/ldap/servers/slapd/back-ldbm/ldbm_add.c
index 557390d8c..4582f9334 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_add.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_add.c
@@ -63,7 +63,7 @@ ldbm_back_add(Slapi_PBlock *pb)
back_txn txn = {0};
back_txnid parent_txn;
int retval = -1;
- char *msg;
+ const char *msg;
int managedsait;
int ldap_result_code = LDAP_SUCCESS;
char *ldap_result_message = NULL;
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_delete.c b/ldap/servers/slapd/back-ldbm/ldbm_delete.c
index 1ff6f5770..24c0147f0 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_delete.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_delete.c
@@ -37,7 +37,7 @@ ldbm_back_delete(Slapi_PBlock *pb)
back_txn txn;
back_txnid parent_txn;
int retval = -1;
- char *msg;
+ const char *msg;
char *errbuf = NULL;
int retry_count = 0;
int disk_full = 0;
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_modify.c b/ldap/servers/slapd/back-ldbm/ldbm_modify.c
index 175ccd89a..55535a266 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_modify.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_modify.c
@@ -500,7 +500,7 @@ ldbm_back_modify(Slapi_PBlock *pb)
modify_context ruv_c = {0};
int ruv_c_init = 0;
int retval = -1;
- char *msg;
+ const char *msg;
char *errbuf = NULL;
int retry_count = 0;
int disk_full = 0;
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c b/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c
index 0f52ead4d..88085db27 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c
@@ -41,7 +41,7 @@ ldbm_back_modrdn(Slapi_PBlock *pb)
back_txn txn;
back_txnid parent_txn;
int retval = -1;
- char *msg;
+ const char *msg;
Slapi_Entry *postentry = NULL;
char *errbuf = NULL;
int disk_full = 0;
@@ -1794,7 +1794,7 @@ modrdn_rename_entry_update_indexes(back_txn *ptxn, Slapi_PBlock *pb, struct ldbm
backend *be;
ldbm_instance *inst;
int retval = 0;
- char *msg;
+ const char *msg;
Slapi_Operation *operation;
int is_ruv = 0; /* True if the current entry is RUV */
int cache_rc = 0;
diff --git a/ldap/servers/slapd/back-ldbm/misc.c b/ldap/servers/slapd/back-ldbm/misc.c
index 9845425e3..4ff08dd1e 100644
--- a/ldap/servers/slapd/back-ldbm/misc.c
+++ b/ldap/servers/slapd/back-ldbm/misc.c
@@ -39,7 +39,7 @@ ldbm_set_error(Slapi_PBlock *pb, int retval, int *ldap_result_code, char **ldap_
void
ldbm_nasty(char *func, const char *str, int c, int err)
{
- char *msg = NULL;
+ const char *msg = NULL;
char buffer[200];
if (err == DBI_RC_RETRY) {
PR_snprintf(buffer, 200, "%s WARNING %d", str, c);
diff --git a/ldap/servers/slapd/back-ldbm/start.c b/ldap/servers/slapd/back-ldbm/start.c
index 8e903b383..71037561d 100644
--- a/ldap/servers/slapd/back-ldbm/start.c
+++ b/ldap/servers/slapd/back-ldbm/start.c
@@ -129,7 +129,7 @@ ldbm_back_start(Slapi_PBlock *pb)
retval = dblayer_start(li, DBLAYER_NORMAL_MODE);
}
if (0 != retval) {
- char *msg;
+ const char *msg;
slapi_log_err(SLAPI_LOG_ERR, "ldbm_back_start", "Failed to init database, err=%d %s\n",
retval, (msg = dblayer_strerror(retval)) ? msg : "");
if (LDBM_OS_ERR_IS_DISKFULL(retval))
@@ -141,7 +141,7 @@ ldbm_back_start(Slapi_PBlock *pb)
/* Walk down the instance list, starting all the instances. */
retval = ldbm_instance_startall(li);
if (0 != retval) {
- char *msg;
+ const char *msg;
slapi_log_err(SLAPI_LOG_ERR, "ldbm_back_start", "Failed to start databases, err=%d %s\n",
retval, (msg = dblayer_strerror(retval)) ? msg : "");
if (LDBM_OS_ERR_IS_DISKFULL(retval))
diff --git a/ldap/servers/slapd/libmakefile b/ldap/servers/slapd/libmakefile
index 916d1f4b9..65b5d6ab2 100644
--- a/ldap/servers/slapd/libmakefile
+++ b/ldap/servers/slapd/libmakefile
@@ -118,8 +118,8 @@ clean:
$(ERRORMAP.O): $(DIRVERDIR)/dberrstrs.h
-$(DIRVERDIR)/dberrstrs.h: $(DB_INCLUDE)/db.h
-mkDBErrStrs.py -i $(DB_INCLUDE) -o $(DIRVERDIR)
+$(DIRVERDIR)/dberrstrs.h: mkDBErrStrs.py back-ldbm/dbimpl.h
+ mkDBErrStrs.py -i back-ldbm -o $(DIRVERDIR)
# Target to push the built binary to an installed server
LIBSLAPD_PUSH = $(addprefix $(INSTDIR)/, bin/slapd/server/libslapd.dll)
diff --git a/ldap/servers/slapd/mkDBErrStrs.py b/ldap/servers/slapd/mkDBErrStrs.py
index 5dffbcbfc..61e907f6b 100755
--- a/ldap/servers/slapd/mkDBErrStrs.py
+++ b/ldap/servers/slapd/mkDBErrStrs.py
@@ -8,33 +8,24 @@ def build_header(args):
"""Dynamically build the all the bdb errors codes and strings into a header
file used by the server.
"""
- re_dberr = re.compile(r'^#define[ ][_A-Z]*[ ]*\(-[0-9]*')
+
+ re_dberr = re.compile(r'^ *DBI_RC_')
if args.output_dir is not None:
new_header = f"{args.output_dir}/dberrstrs.h"
else:
new_header = "dberrstrs.h"
- with open(f"{args.bdb_dir}/db.h") as bdb_file:
+ with open(f"{args.dbi_dir}/dbimpl.h") as bdb_file:
err_list = []
line = bdb_file.readline()
while line:
mo = re_dberr.search(line)
if mo is not None:
# Get the error code and error string from lines like this:
- ignore, err_start = line.split('(', 1)
- err_code, r = err_start.split(')', 1)
- if '/* ' in r:
- ignore, err_msg = r.split('/* ', 1)
- if '*/' not in err_msg:
- # continuation line, get the next line
- line = bdb_file.readline()
- err_str = err_msg.rstrip() + " " + line.replace("*/", "").strip()
- else:
- err_str, ignore = err_msg.split('*/', 1)
- err_str = err_str.replace('"', '\\"').rstrip()
- else:
- err_str = ""
- err_list.append((err_code, err_str))
+ err_split = line.split('*', 2)
+ if len(err_split) == 3:
+ err_code, err_str = err_split[1].split(',', 1)
+ err_list.append((err_code.strip(), err_str.strip()))
line = bdb_file.readline()
# Sort the dict
@@ -54,8 +45,8 @@ def main():
bdb_parser.add_argument('-o', '--output', help='The output file location',
dest='output_dir', default=None)
bdb_parser.add_argument('-i', '--include-dir',
- help='The location of the libdb header file',
- dest='bdb_dir', required=True)
+ help='The location of the dbimpl header file',
+ dest='dbi_dir', required=True)
args = bdb_parser.parse_args()
# Do it!
| 0 |
8d4000b79f2b7e7801df32cf64f0c84321c2adae
|
389ds/389-ds-base
|
Ticket 48921 - Adding replication and reliability tests
Description: Add two replication stress tests, one with just 4 masters, and
the other with 4 masters, 2 hubs, and 4 consumers.
Add a connection stress test (for nunc-stans)
Also made some small changes to the create_test.py script
https://fedorahosted.org/389/ticket/48921
Reviewed by: ?
|
commit 8d4000b79f2b7e7801df32cf64f0c84321c2adae
Author: Mark Reynolds <[email protected]>
Date: Mon Aug 1 09:52:49 2016 -0400
Ticket 48921 - Adding replication and reliability tests
Description: Add two replication stress tests, one with just 4 masters, and
the other with 4 masters, 2 hubs, and 4 consumers.
Add a connection stress test (for nunc-stans)
Also made some small changes to the create_test.py script
https://fedorahosted.org/389/ticket/48921
Reviewed by: ?
diff --git a/dirsrvtests/create_test.py b/dirsrvtests/create_test.py
index 19b838fa9..3898279f6 100755
--- a/dirsrvtests/create_test.py
+++ b/dirsrvtests/create_test.py
@@ -67,13 +67,13 @@ def writeFinalizer():
TEST.write(' def fin():\n')
TEST.write(' """')
TEST.write('If we are debugging just stop the instances, ' +
- 'otherwise remove\n them\n')
+ 'otherwise remove them\n')
TEST.write(' """\n')
TEST.write(' if DEBUGGING:\n')
writeInstanceOp('stop')
TEST.write(' else:\n')
writeInstanceOp('delete')
- TEST.write('\n request.addfinalizer(fin)')
+ TEST.write(' request.addfinalizer(fin)')
TEST.write('\n\n')
@@ -191,7 +191,7 @@ if len(sys.argv) > 0:
TEST.write('if DEBUGGING:\n')
TEST.write(' logging.getLogger(__name__).setLevel(logging.DEBUG)\n')
TEST.write('else:\n')
- TEST.write(' logging.getLogger(__name__).setLevel(logging.INFO)\n\n\n')
+ TEST.write(' logging.getLogger(__name__).setLevel(logging.INFO)\n')
TEST.write('log = logging.getLogger(__name__)\n\n\n')
#
@@ -649,7 +649,6 @@ if len(sys.argv) > 0:
writeFinalizer()
- TEST.write('\n')
TEST.write(' return TopologyStandalone(standalone')
for idx in range(instances):
idx += 1
diff --git a/dirsrvtests/tests/stress/reliabilty/reliab_conn_test.py b/dirsrvtests/tests/stress/reliabilty/reliab_conn_test.py
new file mode 100644
index 000000000..d3ee77386
--- /dev/null
+++ b/dirsrvtests/tests/stress/reliabilty/reliab_conn_test.py
@@ -0,0 +1,289 @@
+import os
+import sys
+import time
+import ldap
+import logging
+import pytest
+import signal
+import threading
+from lib389 import DirSrv
+from lib389.tools import DirSrvTools
+from lib389._constants import *
+from lib389.properties import *
+from lib389.tasks import *
+from lib389.utils import *
+
+DEBUGGING = False
+
+if DEBUGGING:
+ logging.getLogger(__name__).setLevel(logging.DEBUG)
+else:
+ logging.getLogger(__name__).setLevel(logging.INFO)
+log = logging.getLogger(__name__)
+
+MAX_CONNS = 10000000
+MAX_THREADS = 20
+STOP = False
+HOSTNAME = DirSrvTools.getLocalhost()
+PORT = 389
+
+
+class TopologyStandalone(object):
+ """The DS Topology Class"""
+ def __init__(self, standalone):
+ """Init"""
+ standalone.open()
+ self.standalone = standalone
+
+
[email protected](scope="module")
+def topology(request):
+ """Create DS Deployment"""
+
+ # Creating standalone instance ...
+ if DEBUGGING:
+ standalone = DirSrv(verbose=True)
+ else:
+ standalone = DirSrv(verbose=False)
+ args_instance[SER_HOST] = HOST_STANDALONE
+ args_instance[SER_PORT] = PORT_STANDALONE
+ args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
+ args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
+ args_standalone = args_instance.copy()
+ standalone.allocate(args_standalone)
+ instance_standalone = standalone.exists()
+ if instance_standalone:
+ standalone.delete()
+ standalone.create()
+ standalone.open()
+
+ def fin():
+ """If we are debugging just stop the instances, otherwise remove them
+ """
+ if DEBUGGING:
+ standalone.stop()
+ else:
+ standalone.delete()
+ request.addfinalizer(fin)
+
+ return TopologyStandalone(standalone)
+
+
+def signalHandler(signal, frame):
+ """
+ handle control-C cleanly
+ """
+ global STOP
+ STOP = True
+ sys.exit(0)
+
+
+def init(inst):
+ """Set the idle timeout, and add sample entries
+ """
+
+ try:
+ inst.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE,
+ 'nsslapd-idletimeout',
+ '5')])
+ except ldap.LDAPError as e:
+ log.fatal('Failed to set idletimeout: ' + str(e))
+ assert False
+
+ try:
+ inst.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE,
+ 'nsslapd-enable-nunc-stans',
+ 'on')])
+ except ldap.LDAPError as e:
+ log.fatal('Failed to enable nunc-stans: ' + str(e))
+ assert False
+
+ for idx in range(0, 9):
+ user_dn = 'uid=entry%d,%s' % (idx, DEFAULT_SUFFIX)
+ try:
+ inst.add_s(Entry((user_dn,
+ {'objectclass': ['top', 'extensibleObject'],
+ 'uid': 'entry%d' % idx,
+ 'cn': 'entry%d' % idx,
+ 'userpassword': 'password'})))
+ except ldap.LDAPError as e:
+ log.fatal('Failed to add user entry (%s): %s' % (user_dn, str(e)))
+ assert False
+
+ inst.restart()
+
+
+class BindOnlyConn(threading.Thread):
+ """This class opens and closes connections
+ """
+ def __init__(self, inst):
+ """Initialize the thread class with the server instance info"""
+ threading.Thread.__init__(self)
+ self.daemon = True
+ self.inst = inst
+
+ def run(self):
+ """Keep opening and closing connections"""
+ idx = 0
+ err_count = 0
+ global STOP
+ while idx < MAX_CONNS and not STOP:
+ try:
+ conn = self.inst.openConnection()
+ conn.unbind_s()
+ time.sleep(.2)
+ err_count = 0
+ except ldap.LDAPError as e:
+ err_count += 1
+ if err_count > 3:
+ log.error('BindOnlyConn exiting thread: %s' %
+ (str(e)))
+ return
+ time.sleep(.4)
+ idx += 1
+
+
+class IdleConn(threading.Thread):
+ """This class opens and closes connections
+ """
+ def __init__(self, inst):
+ """Initialize the thread class withte server isntance info"""
+ threading.Thread.__init__(self)
+ self.daemon = True
+ self.inst = inst
+
+ def run(self):
+ """Assume idleTimeout is set to less than 10 seconds
+ """
+ idx = 0
+ err_count = 0
+ global STOP
+ while idx < (MAX_CONNS / 10) and not STOP:
+ try:
+ conn = self.inst.openConnection()
+ conn.simple_bind_s('uid=entry0,dc=example,dc=com', 'password')
+ conn.search_s('dc=example,dc=com', ldap.SCOPE_SUBTREE,
+ 'uid=*')
+ time.sleep(10)
+ conn.search_s('dc=example,dc=com', ldap.SCOPE_SUBTREE,
+ 'cn=*')
+ conn.unbind_s()
+ time.sleep(.2)
+ err_count = 0
+ except ldap.LDAPError as e:
+ err_count += 1
+ if err_count > 3:
+ log.error('IdleConn exiting thread: %s' %
+ (str(e)))
+ return
+ time.sleep(.4)
+ idx += 1
+
+
+class LongConn(threading.Thread):
+ """This class opens and closes connections to a specified server
+ """
+ def __init__(self, inst):
+ """Initialize the thread class with the server instance info"""
+ threading.Thread.__init__(self)
+ self.daemon = True
+ self.inst = inst
+
+ def run(self):
+ """Assume idleTimeout is set to less than 10 seconds
+ """
+ idx = 0
+ err_count = 0
+ global STOP
+ while idx < MAX_CONNS and not STOP:
+ try:
+ conn = self.inst.openConnection()
+ conn.search_s('dc=example,dc=com', ldap.SCOPE_SUBTREE,
+ 'objectclass=*')
+ conn.search_s('dc=example,dc=com', ldap.SCOPE_SUBTREE,
+ 'uid=mark')
+ conn.search_s('dc=example,dc=com', ldap.SCOPE_SUBTREE,
+ 'cn=*')
+ conn.search_s('', ldap.SCOPE_BASE, 'objectclass=*')
+ conn.unbind_s()
+ time.sleep(.2)
+ err_count = 0
+ except ldap.LDAPError as e:
+ err_count += 1
+ if err_count > 3:
+ log.error('LongConn search exiting thread: %s' %
+ (str(e)))
+ return
+ time.sleep(.4)
+ idx += 1
+
+
+def test_connection_load(topology):
+ """Send the server a variety of connections using many threads:
+ - Open, Bind, Close
+ - Open, Bind, Search, wait to trigger idletimeout, Search, Close
+ - Open, Bind, Search, Search, Search, Close
+ """
+
+ # setup the control-C signal handler
+ signal.signal(signal.SIGINT, signalHandler)
+
+ # Set the config and add sample entries
+ log.info('Initializing setup...')
+ init(topology.standalone)
+
+ #
+ # Bind/Unbind Conn Threads
+ #
+ log.info('Launching Bind-Only Connection threads...')
+ threads = []
+ idx = 0
+ while idx < MAX_THREADS:
+ threads.append(BindOnlyConn(topology.standalone))
+ idx += 1
+ for thread in threads:
+ thread.start()
+ time.sleep(0.1)
+
+ #
+ # Idle Conn Threads
+ #
+ log.info('Launching Idle Connection threads...')
+ idx = 0
+ idle_threads = []
+ while idx < MAX_THREADS:
+ idle_threads.append(IdleConn(topology.standalone))
+ idx += 1
+ for thread in idle_threads:
+ thread.start()
+ time.sleep(0.1)
+
+ #
+ # Long Conn Threads
+ #
+ log.info('Launching Long Connection threads...')
+ idx = 0
+ long_threads = []
+ while idx < MAX_THREADS:
+ long_threads.append(LongConn(topology.standalone))
+ idx += 1
+ for thread in long_threads:
+ thread.start()
+ time.sleep(0.1)
+
+ #
+ # Now wait for all the threads to complete
+ #
+ log.info('Waiting for threads to finish...')
+ while threading.active_count() > 0:
+ time.sleep(1)
+
+ log.info('Done')
+
+
+if __name__ == '__main__':
+ # Run isolated
+ # -s for DEBUG mode
+ CURRENT_FILE = os.path.realpath(__file__)
+ pytest.main("-s %s" % CURRENT_FILE)
+
diff --git a/dirsrvtests/tests/stress/replication/mmr_01_4m-2h-4c_test.py b/dirsrvtests/tests/stress/replication/mmr_01_4m-2h-4c_test.py
new file mode 100644
index 000000000..95c0b71fc
--- /dev/null
+++ b/dirsrvtests/tests/stress/replication/mmr_01_4m-2h-4c_test.py
@@ -0,0 +1,969 @@
+import os
+import sys
+import time
+import datetime
+import ldap
+import logging
+import pytest
+import threading
+from lib389._constants import *
+from lib389.properties import *
+from lib389.tasks import *
+from lib389.utils import *
+from lib389.repltools import ReplTools
+
+logging.getLogger(__name__).setLevel(logging.INFO)
+log = logging.getLogger(__name__)
+
+DEBUGGING = False
+ADD_DEL_COUNT = 500
+MAX_LOOPS = 5
+TEST_CONVERGE_LATENCY = True
+CONVERGENCE_TIMEOUT = '60'
+master_list = []
+hub_list = []
+con_list = []
+TEST_START = time.time()
+
+LAST_DN_IDX = ADD_DEL_COUNT - 1
+LAST_DN_M1 = 'DEL dn="uid=master_1-%d,%s' % (LAST_DN_IDX, DEFAULT_SUFFIX)
+LAST_DN_M2 = 'DEL dn="uid=master_2-%d,%s' % (LAST_DN_IDX, DEFAULT_SUFFIX)
+LAST_DN_M3 = 'DEL dn="uid=master_3-%d,%s' % (LAST_DN_IDX, DEFAULT_SUFFIX)
+LAST_DN_M4 = 'DEL dn="uid=master_4-%d,%s' % (LAST_DN_IDX, DEFAULT_SUFFIX)
+
+
+class TopologyReplication(object):
+ """The Replication Topology Class"""
+ def __init__(self, master1, master2, master3, master4, hub1, hub2,
+ consumer1, consumer2, consumer3, consumer4):
+ """Init"""
+ master1.open()
+ self.master1 = master1
+ master2.open()
+ self.master2 = master2
+ master3.open()
+ self.master3 = master3
+ master4.open()
+ self.master4 = master4
+ hub1.open()
+ self.hub1 = hub1
+ hub2.open()
+ self.hub2 = hub2
+ consumer1.open()
+ self.consumer1 = consumer1
+ consumer2.open()
+ self.consumer2 = consumer2
+ consumer3.open()
+ self.consumer3 = consumer3
+ consumer4.open()
+ self.consumer4 = consumer4
+ master_list.append(master1.serverid)
+ master_list.append(master2.serverid)
+ master_list.append(master3.serverid)
+ master_list.append(master4.serverid)
+ hub_list.append(hub1.serverid)
+ hub_list.append(hub2.serverid)
+ con_list.append(consumer1.serverid)
+ con_list.append(consumer2.serverid)
+ con_list.append(consumer3.serverid)
+ con_list.append(consumer4.serverid)
+
+
[email protected](scope="module")
+def topology(request):
+ """Create Replication Deployment"""
+
+ # Creating master 1...
+ if DEBUGGING:
+ master1 = DirSrv(verbose=True)
+ else:
+ master1 = DirSrv(verbose=False)
+ args_instance[SER_HOST] = HOST_MASTER_1
+ args_instance[SER_PORT] = PORT_MASTER_1
+ args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
+ args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
+ args_master = args_instance.copy()
+ master1.allocate(args_master)
+ instance_master1 = master1.exists()
+ if instance_master1:
+ master1.delete()
+ master1.create()
+ master1.open()
+ master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
+ replicaId=REPLICAID_MASTER_1)
+
+ # Creating master 2...
+ if DEBUGGING:
+ master2 = DirSrv(verbose=True)
+ else:
+ master2 = DirSrv(verbose=False)
+ args_instance[SER_HOST] = HOST_MASTER_2
+ args_instance[SER_PORT] = PORT_MASTER_2
+ args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
+ args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
+ args_master = args_instance.copy()
+ master2.allocate(args_master)
+ instance_master2 = master2.exists()
+ if instance_master2:
+ master2.delete()
+ master2.create()
+ master2.open()
+ master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
+ replicaId=REPLICAID_MASTER_2)
+
+ # Creating master 3...
+ if DEBUGGING:
+ master3 = DirSrv(verbose=True)
+ else:
+ master3 = DirSrv(verbose=False)
+ args_instance[SER_HOST] = HOST_MASTER_3
+ args_instance[SER_PORT] = PORT_MASTER_3
+ args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_3
+ args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
+ args_master = args_instance.copy()
+ master3.allocate(args_master)
+ instance_master3 = master3.exists()
+ if instance_master3:
+ master3.delete()
+ master3.create()
+ master3.open()
+ master3.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
+ replicaId=REPLICAID_MASTER_3)
+
+ # Creating master 4...
+ if DEBUGGING:
+ master4 = DirSrv(verbose=True)
+ else:
+ master4 = DirSrv(verbose=False)
+ args_instance[SER_HOST] = HOST_MASTER_4
+ args_instance[SER_PORT] = PORT_MASTER_4
+ args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_4
+ args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
+ args_master = args_instance.copy()
+ master4.allocate(args_master)
+ instance_master4 = master4.exists()
+ if instance_master4:
+ master4.delete()
+ master4.create()
+ master4.open()
+ master4.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
+ replicaId=REPLICAID_MASTER_4)
+
+ # Creating hub 1...
+ if DEBUGGING:
+ hub1 = DirSrv(verbose=True)
+ else:
+ hub1 = DirSrv(verbose=False)
+ args_instance[SER_HOST] = HOST_HUB_1
+ args_instance[SER_PORT] = PORT_HUB_1
+ args_instance[SER_SERVERID_PROP] = SERVERID_HUB_1
+ args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
+ args_hub = args_instance.copy()
+ hub1.allocate(args_hub)
+ instance_hub1 = hub1.exists()
+ if instance_hub1:
+ hub1.delete()
+ hub1.create()
+ hub1.open()
+ hub1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_HUB,
+ replicaId=REPLICAID_HUB_1)
+
+ # Creating hub 2...
+ if DEBUGGING:
+ hub2 = DirSrv(verbose=True)
+ else:
+ hub2 = DirSrv(verbose=False)
+ args_instance[SER_HOST] = HOST_HUB_2
+ args_instance[SER_PORT] = PORT_HUB_2
+ args_instance[SER_SERVERID_PROP] = SERVERID_HUB_2
+ args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
+ args_hub = args_instance.copy()
+ hub2.allocate(args_hub)
+ instance_hub2 = hub2.exists()
+ if instance_hub2:
+ hub2.delete()
+ hub2.create()
+ hub2.open()
+ hub2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_HUB,
+ replicaId=REPLICAID_HUB_2)
+
+ # Creating consumer 1...
+ if DEBUGGING:
+ consumer1 = DirSrv(verbose=True)
+ else:
+ consumer1 = DirSrv(verbose=False)
+ args_instance[SER_HOST] = HOST_CONSUMER_1
+ args_instance[SER_PORT] = PORT_CONSUMER_1
+ args_instance[SER_SERVERID_PROP] = SERVERID_CONSUMER_1
+ args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
+ args_consumer = args_instance.copy()
+ consumer1.allocate(args_consumer)
+ instance_consumer1 = consumer1.exists()
+ if instance_consumer1:
+ consumer1.delete()
+ consumer1.create()
+ consumer1.open()
+ consumer1.replica.enableReplication(suffix=SUFFIX,
+ role=REPLICAROLE_CONSUMER,
+ replicaId=CONSUMER_REPLICAID)
+
+ # Creating consumer 2...
+ if DEBUGGING:
+ consumer2 = DirSrv(verbose=True)
+ else:
+ consumer2 = DirSrv(verbose=False)
+ args_instance[SER_HOST] = HOST_CONSUMER_2
+ args_instance[SER_PORT] = PORT_CONSUMER_2
+ args_instance[SER_SERVERID_PROP] = SERVERID_CONSUMER_2
+ args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
+ args_consumer = args_instance.copy()
+ consumer2.allocate(args_consumer)
+ instance_consumer2 = consumer2.exists()
+ if instance_consumer2:
+ consumer2.delete()
+ consumer2.create()
+ consumer2.open()
+ consumer2.replica.enableReplication(suffix=SUFFIX,
+ role=REPLICAROLE_CONSUMER,
+ replicaId=CONSUMER_REPLICAID)
+
+ # Creating consumer 3...
+ if DEBUGGING:
+ consumer3 = DirSrv(verbose=True)
+ else:
+ consumer3 = DirSrv(verbose=False)
+ args_instance[SER_HOST] = HOST_CONSUMER_3
+ args_instance[SER_PORT] = PORT_CONSUMER_3
+ args_instance[SER_SERVERID_PROP] = SERVERID_CONSUMER_3
+ args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
+ args_consumer = args_instance.copy()
+ consumer3.allocate(args_consumer)
+ instance_consumer3 = consumer3.exists()
+ if instance_consumer3:
+ consumer3.delete()
+ consumer3.create()
+ consumer3.open()
+ consumer3.replica.enableReplication(suffix=SUFFIX,
+ role=REPLICAROLE_CONSUMER,
+ replicaId=CONSUMER_REPLICAID)
+
+ # Creating consumer 4...
+ if DEBUGGING:
+ consumer4 = DirSrv(verbose=True)
+ else:
+ consumer4 = DirSrv(verbose=False)
+ args_instance[SER_HOST] = HOST_CONSUMER_4
+ args_instance[SER_PORT] = PORT_CONSUMER_4
+ args_instance[SER_SERVERID_PROP] = SERVERID_CONSUMER_4
+ args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
+ args_consumer = args_instance.copy()
+ consumer4.allocate(args_consumer)
+ instance_consumer4 = consumer4.exists()
+ if instance_consumer4:
+ consumer4.delete()
+ consumer4.create()
+ consumer4.open()
+ consumer4.replica.enableReplication(suffix=SUFFIX,
+ role=REPLICAROLE_CONSUMER,
+ replicaId=CONSUMER_REPLICAID)
+
+ #
+ # Create all the agreements
+ #
+
+ # Creating agreement from master 1 to master 2
+ properties = {RA_NAME: 'meTo_' + master2.host + ':' + str(master2.port),
+ RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
+ RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
+ RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
+ RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
+ m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
+ port=master2.port,
+ properties=properties)
+ if not m1_m2_agmt:
+ log.fatal("Fail to create a master -> master replica agreement")
+ sys.exit(1)
+ log.debug("%s created" % m1_m2_agmt)
+
+ # Creating agreement from master 1 to master 3
+ properties = {RA_NAME: 'meTo_' + master3.host + ':' + str(master3.port),
+ RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
+ RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
+ RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
+ RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
+ m1_m3_agmt = master1.agreement.create(suffix=SUFFIX, host=master3.host,
+ port=master3.port,
+ properties=properties)
+ if not m1_m3_agmt:
+ log.fatal("Fail to create a master -> master replica agreement")
+ sys.exit(1)
+ log.debug("%s created" % m1_m3_agmt)
+
+ # Creating agreement from master 1 to master 4
+ properties = {RA_NAME: 'meTo_' + master4.host + ':' + str(master4.port),
+ RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
+ RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
+ RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
+ RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
+ m1_m4_agmt = master1.agreement.create(suffix=SUFFIX, host=master4.host,
+ port=master4.port,
+ properties=properties)
+ if not m1_m4_agmt:
+ log.fatal("Fail to create a master -> master replica agreement")
+ sys.exit(1)
+ log.debug("%s created" % m1_m4_agmt)
+
+ # Creating agreement from master 1 to hub 1
+ properties = {RA_NAME: 'meTo_' + hub1.host + ':' + str(hub1.port),
+ RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
+ RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
+ RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
+ RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
+ m1_h1_agmt = master1.agreement.create(suffix=SUFFIX, host=hub1.host,
+ port=hub1.port,
+ properties=properties)
+ if not m1_h1_agmt:
+ log.fatal("Fail to create a master -> hub replica agreement")
+ sys.exit(1)
+ log.debug("%s created" % m1_h1_agmt)
+
+ # Creating agreement from master 1 to hub 2
+ properties = {RA_NAME: 'meTo_' + hub2.host + ':' + str(hub2.port),
+ RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
+ RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
+ RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
+ RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
+ m1_h2_agmt = master1.agreement.create(suffix=SUFFIX, host=hub2.host,
+ port=hub2.port,
+ properties=properties)
+ if not m1_h2_agmt:
+ log.fatal("Fail to create a master -> hub replica agreement")
+ sys.exit(1)
+ log.debug("%s created" % m1_h2_agmt)
+
+ # Creating agreement from master 2 to master 1
+ properties = {RA_NAME: 'meTo_' + master1.host + ':' + str(master1.port),
+ RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
+ RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
+ RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
+ RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
+ m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
+ port=master1.port,
+ properties=properties)
+ if not m2_m1_agmt:
+ log.fatal("Fail to create a master -> master replica agreement")
+ sys.exit(1)
+ log.debug("%s created" % m2_m1_agmt)
+
+ # Creating agreement from master 2 to master 3
+ properties = {RA_NAME: 'meTo_' + master3.host + ':' + str(master3.port),
+ RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
+ RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
+ RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
+ RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
+ m2_m3_agmt = master2.agreement.create(suffix=SUFFIX, host=master3.host,
+ port=master3.port,
+ properties=properties)
+ if not m2_m3_agmt:
+ log.fatal("Fail to create a master -> master replica agreement")
+ sys.exit(1)
+ log.debug("%s created" % m2_m3_agmt)
+
+ # Creating agreement from master 2 to master 4
+ properties = {RA_NAME: 'meTo_' + master4.host + ':' + str(master4.port),
+ RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
+ RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
+ RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
+ RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
+ m2_m4_agmt = master2.agreement.create(suffix=SUFFIX, host=master4.host,
+ port=master4.port,
+ properties=properties)
+ if not m2_m4_agmt:
+ log.fatal("Fail to create a master -> master replica agreement")
+ sys.exit(1)
+ log.debug("%s created" % m2_m4_agmt)
+
+ # Creating agreement from master 2 to hub 1
+ properties = {RA_NAME: 'meTo_' + hub1.host + ':' + str(hub1.port),
+ RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
+ RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
+ RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
+ RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
+ m2_h1_agmt = master2.agreement.create(suffix=SUFFIX, host=hub1.host,
+ port=hub1.port,
+ properties=properties)
+ if not m2_h1_agmt:
+ log.fatal("Fail to create a master -> hub replica agreement")
+ sys.exit(1)
+ log.debug("%s created" % m2_h1_agmt)
+
+ # Creating agreement from master 2 to hub 2
+ properties = {RA_NAME: 'meTo_' + hub2.host + ':' + str(hub2.port),
+ RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
+ RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
+ RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
+ RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
+ m2_h2_agmt = master2.agreement.create(suffix=SUFFIX, host=hub2.host,
+ port=hub2.port,
+ properties=properties)
+ if not m2_h2_agmt:
+ log.fatal("Fail to create a master -> hub replica agreement")
+ sys.exit(1)
+ log.debug("%s created" % m2_h2_agmt)
+
+ # Creating agreement from master 3 to master 1
+ properties = {RA_NAME: 'meTo_' + master1.host + ':' + str(master1.port),
+ RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
+ RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
+ RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
+ RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
+ m3_m1_agmt = master3.agreement.create(suffix=SUFFIX, host=master1.host,
+ port=master1.port,
+ properties=properties)
+ if not m3_m1_agmt:
+ log.fatal("Fail to create a master -> master replica agreement")
+ sys.exit(1)
+ log.debug("%s created" % m3_m1_agmt)
+
+ # Creating agreement from master 3 to master 2
+ properties = {RA_NAME: 'meTo_' + master2.host + ':' + str(master2.port),
+ RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
+ RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
+ RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
+ RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
+ m3_m2_agmt = master3.agreement.create(suffix=SUFFIX, host=master2.host,
+ port=master2.port,
+ properties=properties)
+ if not m3_m2_agmt:
+ log.fatal("Fail to create a master -> master replica agreement")
+ sys.exit(1)
+ log.debug("%s created" % m3_m2_agmt)
+
+ # Creating agreement from master 3 to master 4
+ properties = {RA_NAME: 'meTo_' + master4.host + ':' + str(master4.port),
+ RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
+ RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
+ RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
+ RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
+ m3_m4_agmt = master3.agreement.create(suffix=SUFFIX, host=master4.host,
+ port=master4.port,
+ properties=properties)
+ if not m3_m4_agmt:
+ log.fatal("Fail to create a master -> master replica agreement")
+ sys.exit(1)
+ log.debug("%s created" % m3_m4_agmt)
+
+ # Creating agreement from master 3 to hub 1
+ properties = {RA_NAME: 'meTo_' + hub1.host + ':' + str(hub1.port),
+ RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
+ RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
+ RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
+ RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
+ m3_h1_agmt = master3.agreement.create(suffix=SUFFIX, host=hub1.host,
+ port=hub1.port,
+ properties=properties)
+ if not m3_h1_agmt:
+ log.fatal("Fail to create a master -> hub replica agreement")
+ sys.exit(1)
+ log.debug("%s created" % m3_h1_agmt)
+
+ # Creating agreement from master 3 to hub 2
+ properties = {RA_NAME: 'meTo_' + hub2.host + ':' + str(hub2.port),
+ RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
+ RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
+ RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
+ RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
+ m3_h2_agmt = master3.agreement.create(suffix=SUFFIX, host=hub2.host,
+ port=hub2.port,
+ properties=properties)
+ if not m3_h2_agmt:
+ log.fatal("Fail to create a master -> hub replica agreement")
+ sys.exit(1)
+ log.debug("%s created" % m3_h2_agmt)
+
+ # Creating agreement from master 4 to master 1
+ properties = {RA_NAME: 'meTo_' + master1.host + ':' + str(master1.port),
+ RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
+ RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
+ RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
+ RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
+ m4_m1_agmt = master4.agreement.create(suffix=SUFFIX, host=master1.host,
+ port=master1.port,
+ properties=properties)
+ if not m4_m1_agmt:
+ log.fatal("Fail to create a master -> master replica agreement")
+ sys.exit(1)
+ log.debug("%s created" % m4_m1_agmt)
+
+ # Creating agreement from master 4 to master 2
+ properties = {RA_NAME: 'meTo_' + master2.host + ':' + str(master2.port),
+ RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
+ RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
+ RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
+ RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
+ m4_m2_agmt = master4.agreement.create(suffix=SUFFIX, host=master2.host,
+ port=master2.port,
+ properties=properties)
+ if not m4_m2_agmt:
+ log.fatal("Fail to create a master -> master replica agreement")
+ sys.exit(1)
+ log.debug("%s created" % m4_m2_agmt)
+
+ # Creating agreement from master 4 to master 3
+ properties = {RA_NAME: 'meTo_' + master3.host + ':' + str(master3.port),
+ RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
+ RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
+ RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
+ RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
+ m4_m3_agmt = master4.agreement.create(suffix=SUFFIX, host=master3.host,
+ port=master3.port,
+ properties=properties)
+ if not m4_m3_agmt:
+ log.fatal("Fail to create a master -> master replica agreement")
+ sys.exit(1)
+ log.debug("%s created" % m4_m3_agmt)
+
+ # Creating agreement from master 4 to hub 1
+ properties = {RA_NAME: 'meTo_' + hub1.host + ':' + str(hub1.port),
+ RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
+ RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
+ RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
+ RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
+ m4_h1_agmt = master4.agreement.create(suffix=SUFFIX, host=hub1.host,
+ port=hub1.port,
+ properties=properties)
+ if not m4_h1_agmt:
+ log.fatal("Fail to create a master -> hub replica agreement")
+ sys.exit(1)
+ log.debug("%s created" % m4_h1_agmt)
+
+ # Creating agreement from master 4 to hub 2
+ properties = {RA_NAME: 'meTo_' + hub2.host + ':' + str(hub2.port),
+ RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
+ RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
+ RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
+ RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
+ m4_h2_agmt = master4.agreement.create(suffix=SUFFIX, host=hub2.host,
+ port=hub2.port,
+ properties=properties)
+ if not m4_h2_agmt:
+ log.fatal("Fail to create a master -> hub replica agreement")
+ sys.exit(1)
+ log.debug("%s created" % m4_h2_agmt)
+
+ # Creating agreement from hub 1 to consumer 1
+ properties = {RA_NAME: 'me2_' + consumer1.host + ':' + str(consumer1.port),
+ RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
+ RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
+ RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
+ RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
+ h1_c1_agmt = hub1.agreement.create(suffix=SUFFIX, host=consumer1.host,
+ port=consumer1.port,
+ properties=properties)
+ if not h1_c1_agmt:
+ log.fatal("Fail to create a hub -> consumer replica agreement")
+ sys.exit(1)
+ log.debug("%s created" % h1_c1_agmt)
+
+ # Creating agreement from hub 1 to consumer 2
+ properties = {RA_NAME: 'me2_' + consumer2.host + ':' + str(consumer2.port),
+ RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
+ RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
+ RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
+ RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
+ h1_c2_agmt = hub1.agreement.create(suffix=SUFFIX, host=consumer2.host,
+ port=consumer2.port,
+ properties=properties)
+ if not h1_c2_agmt:
+ log.fatal("Fail to create a hub -> consumer replica agreement")
+ sys.exit(1)
+ log.debug("%s created" % h1_c2_agmt)
+
+ # Creating agreement from hub 1 to consumer 3
+ properties = {RA_NAME: 'me2_' + consumer3.host + ':' + str(consumer3.port),
+ RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
+ RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
+ RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
+ RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
+ h1_c3_agmt = hub1.agreement.create(suffix=SUFFIX, host=consumer3.host,
+ port=consumer3.port,
+ properties=properties)
+ if not h1_c3_agmt:
+ log.fatal("Fail to create a hub -> consumer replica agreement")
+ sys.exit(1)
+ log.debug("%s created" % h1_c3_agmt)
+
+ # Creating agreement from hub 1 to consumer 4
+ properties = {RA_NAME: 'me2_' + consumer4.host + ':' + str(consumer4.port),
+ RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
+ RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
+ RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
+ RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
+ h1_c4_agmt = hub1.agreement.create(suffix=SUFFIX, host=consumer4.host,
+ port=consumer4.port,
+ properties=properties)
+ if not h1_c4_agmt:
+ log.fatal("Fail to create a hub -> consumer replica agreement")
+ sys.exit(1)
+ log.debug("%s created" % h1_c4_agmt)
+
+ # Creating agreement from hub 2 to consumer 1
+ properties = {RA_NAME: 'me2_' + consumer1.host + ':' + str(consumer1.port),
+ RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
+ RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
+ RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
+ RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
+ h2_c1_agmt = hub2.agreement.create(suffix=SUFFIX, host=consumer1.host,
+ port=consumer1.port,
+ properties=properties)
+ if not h2_c1_agmt:
+ log.fatal("Fail to create a hub -> consumer replica agreement")
+ sys.exit(1)
+ log.debug("%s created" % h2_c1_agmt)
+
+ # Creating agreement from hub 2 to consumer 2
+ properties = {RA_NAME: 'me2_' + consumer2.host + ':' + str(consumer2.port),
+ RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
+ RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
+ RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
+ RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
+ h2_c2_agmt = hub2.agreement.create(suffix=SUFFIX, host=consumer2.host,
+ port=consumer2.port,
+ properties=properties)
+ if not h2_c2_agmt:
+ log.fatal("Fail to create a hub -> consumer replica agreement")
+ sys.exit(1)
+ log.debug("%s created" % h2_c2_agmt)
+
+ # Creating agreement from hub 2 to consumer 3
+ properties = {RA_NAME: 'me2_' + consumer3.host + ':' + str(consumer3.port),
+ RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
+ RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
+ RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
+ RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
+ h2_c3_agmt = hub2.agreement.create(suffix=SUFFIX, host=consumer3.host,
+ port=consumer3.port,
+ properties=properties)
+ if not h2_c3_agmt:
+ log.fatal("Fail to create a hub -> consumer replica agreement")
+ sys.exit(1)
+ log.debug("%s created" % h2_c3_agmt)
+
+ # Creating agreement from hub 2 to consumer 4
+ properties = {RA_NAME: 'me2_' + consumer4.host + ':' + str(consumer4.port),
+ RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
+ RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
+ RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
+ RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
+ h2_c4_agmt = hub2.agreement.create(suffix=SUFFIX, host=consumer4.host,
+ port=consumer4.port,
+ properties=properties)
+ if not h2_c4_agmt:
+ log.fatal("Fail to create a hub -> consumer replica agreement")
+ sys.exit(1)
+ log.debug("%s created" % h2_c4_agmt)
+
+ # Allow the replicas to get situated with the new agreements...
+ time.sleep(5)
+
+ #
+ # Initialize all the agreements
+ #
+ master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)
+ master1.waitForReplInit(m1_m2_agmt)
+ master1.agreement.init(SUFFIX, HOST_MASTER_3, PORT_MASTER_3)
+ master1.waitForReplInit(m1_m3_agmt)
+ master1.agreement.init(SUFFIX, HOST_MASTER_4, PORT_MASTER_4)
+ master1.waitForReplInit(m1_m4_agmt)
+ master1.agreement.init(SUFFIX, HOST_HUB_1, PORT_HUB_1)
+ master1.waitForReplInit(m1_h1_agmt)
+ hub1.agreement.init(SUFFIX, HOST_CONSUMER_1, PORT_CONSUMER_1)
+ hub1.waitForReplInit(h1_c1_agmt)
+ hub1.agreement.init(SUFFIX, HOST_CONSUMER_2, PORT_CONSUMER_2)
+ hub1.waitForReplInit(h1_c2_agmt)
+ hub1.agreement.init(SUFFIX, HOST_CONSUMER_3, PORT_CONSUMER_3)
+ hub1.waitForReplInit(h1_c3_agmt)
+ hub1.agreement.init(SUFFIX, HOST_CONSUMER_4, PORT_CONSUMER_4)
+ hub1.waitForReplInit(h1_c4_agmt)
+ master1.agreement.init(SUFFIX, HOST_HUB_2, PORT_HUB_2)
+ master1.waitForReplInit(m1_h2_agmt)
+
+ # Check replication is working...
+ if master1.testReplication(DEFAULT_SUFFIX, consumer1):
+ log.info('Replication is working.')
+ else:
+ log.fatal('Replication is not working.')
+ assert False
+
+ def fin():
+ """If we are debugging just stop the instances, otherwise remove
+ them
+ """
+ if DEBUGGING:
+ master1.stop()
+ master2.stop()
+ master3.stop()
+ master4.stop()
+ hub1.stop()
+ hub2.stop()
+ consumer1.stop()
+ consumer2.stop()
+ consumer3.stop()
+ consumer4.stop()
+ else:
+ master1.delete()
+ master2.delete()
+ master3.delete()
+ master4.delete()
+ hub1.delete()
+ hub2.delete()
+ consumer1.delete()
+ consumer2.delete()
+ consumer3.delete()
+ consumer4.delete()
+ request.addfinalizer(fin)
+
+ return TopologyReplication(master1, master2, master3, master4, hub1, hub2,
+ consumer1, consumer2, consumer3, consumer4)
+
+
+class AddDelUsers(threading.Thread):
+ """Add's and delets 50000 entries"""
+ def __init__(self, inst):
+ """
+ Initialize the thread
+ """
+ threading.Thread.__init__(self)
+ self.daemon = True
+ self.inst = inst
+ self.name = inst.serverid
+
+ def run(self):
+ """
+ Start adding users
+ """
+ idx = 0
+
+ log.info('AddDelUsers (%s) Adding and deleting %d entries...' %
+ (self.name, ADD_DEL_COUNT))
+
+ while idx < ADD_DEL_COUNT:
+ RDN_VAL = ('uid=%s-%d' % (self.name, idx))
+ USER_DN = ('%s,%s' % (RDN_VAL, DEFAULT_SUFFIX))
+
+ try:
+ self.inst.add_s(Entry((USER_DN, {'objectclass':
+ 'top extensibleObject'.split(),
+ 'uid': RDN_VAL})))
+ except ldap.LDAPError as e:
+ log.fatal('AddDelUsers (%s): failed to add (%s) error: %s' %
+ (self.name, USER_DN, str(e)))
+ assert False
+
+ try:
+ self.inst.delete_s(USER_DN)
+ except ldap.LDAPError as e:
+ log.fatal('AddDelUsers (%s): failed to delete (%s) error: %s' %
+ (self.name, USER_DN, str(e)))
+ assert False
+
+ idx += 1
+
+ log.info('AddDelUsers (%s) - Finished at: %s' %
+ (self.name, getDateTime()))
+
+
+def measureConvergence(topology):
+ """Find and measure the convergence of entries from each master
+ """
+
+ replicas = [topology.master1, topology.master2, topology.master3,
+ topology.master4, topology.hub1, topology.hub2,
+ topology.consumer1, topology.consumer2, topology.consumer3,
+ topology.consumer4]
+
+ if ADD_DEL_COUNT > 10:
+ interval = int(ADD_DEL_COUNT / 10)
+ else:
+ interval = 1
+
+ for master in [('1', topology.master1),
+ ('2', topology.master2),
+ ('3', topology.master3),
+ ('4', topology.master4)]:
+ # Start with the first entry
+ entries = ['ADD dn="uid=master_%s-0,%s' %
+ (master[0], DEFAULT_SUFFIX)]
+
+ # Add incremental entries to the list
+ idx = interval
+ while idx < ADD_DEL_COUNT:
+ entries.append('ADD dn="uid=master_%s-%d,%s' %
+ (master[0], idx, DEFAULT_SUFFIX))
+ idx += interval
+
+ # Add the last entry to the list (if it was not already added)
+ if idx != (ADD_DEL_COUNT - 1):
+ entries.append('ADD dn="uid=master_%s-%d,%s' %
+ (master[0], (ADD_DEL_COUNT - 1),
+ DEFAULT_SUFFIX))
+
+ ReplTools.replConvReport(DEFAULT_SUFFIX, entries, master[1], replicas)
+
+
+def test_MMR_Integrity(topology):
+ """Apply load to 4 masters at the same time. Perform adds and deletes.
+ If any updates are missed we will see an error 32 in the access logs or
+ we will have entries left over once the test completes.
+ """
+ loop = 0
+
+ ALL_REPLICAS = [topology.master1, topology.master2, topology.master3,
+ topology.master4,
+ topology.hub1, topology.hub2,
+ topology.consumer1, topology.consumer2,
+ topology.consumer3, topology.consumer4]
+
+ if TEST_CONVERGE_LATENCY:
+ try:
+ for inst in ALL_REPLICAS:
+ replica = inst.replicas.get(DEFAULT_SUFFIX)
+ replica.set('nsds5ReplicaReleaseTimeout', CONVERGENCE_TIMEOUT)
+ except ldap.LDAPError as e:
+ log.fatal('Failed to set replicas release timeout - error: %s' %
+ (str(e)))
+ assert False
+
+ if DEBUGGING:
+ # Enable Repl logging, and increase the max logs
+ try:
+ for inst in ALL_REPLICAS:
+ inst.enableReplLogging()
+ inst.modify_s("cn=config", [(ldap.MOD_REPLACE,
+ 'nsslapd-errorlog-maxlogsperdir',
+ '5')])
+ except ldap.LDAPError as e:
+ log.fatal('Failed to set max logs - error: %s' % (str(e)))
+ assert False
+
+ while loop < MAX_LOOPS:
+ # Remove the current logs so we have a clean set of logs to check.
+ log.info('Pass %d...' % (loop + 1))
+ log.info("Removing logs...")
+ for inst in ALL_REPLICAS:
+ inst.deleteAllLogs()
+
+ # Fire off 4 threads to apply the load
+ log.info("Start adding/deleting: " + getDateTime())
+ startTime = time.time()
+ add_del_m1 = AddDelUsers(topology.master1)
+ add_del_m1.start()
+ add_del_m2 = AddDelUsers(topology.master2)
+ add_del_m2.start()
+ add_del_m3 = AddDelUsers(topology.master3)
+ add_del_m3.start()
+ add_del_m4 = AddDelUsers(topology.master4)
+ add_del_m4.start()
+
+ # Wait for threads to finish sending their updates
+ add_del_m1.join()
+ add_del_m2.join()
+ add_del_m3.join()
+ add_del_m4.join()
+ log.info("Finished adding/deleting entries: " + getDateTime())
+
+ #
+ # Loop checking for error 32's, and for convergence to complete
+ #
+ log.info("Waiting for replication to converge...")
+ while True:
+ # First check for error 32's
+ for inst in ALL_REPLICAS:
+ if inst.searchAccessLog(" err=32 "):
+ log.fatal('An add was missed on: ' + inst.serverid)
+ assert False
+
+ # Next check to see if the last update is in the access log
+ converged = True
+ for inst in ALL_REPLICAS:
+ if not inst.searchAccessLog(LAST_DN_M1) or \
+ not inst.searchAccessLog(LAST_DN_M2) or \
+ not inst.searchAccessLog(LAST_DN_M3) or \
+ not inst.searchAccessLog(LAST_DN_M4):
+ converged = False
+ break
+
+ if converged:
+ elapsed_tm = int(time.time() - startTime)
+ convtime = str(datetime.timedelta(seconds=elapsed_tm))
+ log.info('Replication converged at: ' + getDateTime() +
+ ' - Elapsed Time: ' + convtime)
+ break
+ else:
+ # Check if replication is idle
+ replicas = [topology.master1, topology.master2,
+ topology.master3, topology.master4,
+ topology.hub1, topology.hub2]
+ if ReplTools.replIdle(replicas, DEFAULT_SUFFIX):
+ # Replication is idle - wait 30 secs for access log buffer
+ time.sleep(30)
+
+ # Now check the access log again...
+ converged = True
+ for inst in ALL_REPLICAS:
+ if not inst.searchAccessLog(LAST_DN_M1) or \
+ not inst.searchAccessLog(LAST_DN_M2) or \
+ not inst.searchAccessLog(LAST_DN_M3) or \
+ not inst.searchAccessLog(LAST_DN_M4):
+ converged = False
+ break
+
+ if converged:
+ elapsed_tm = int(time.time() - startTime)
+ convtime = str(datetime.timedelta(seconds=elapsed_tm))
+ log.info('Replication converged at: ' + getDateTime() +
+ ' - Elapsed Time: ' + convtime)
+ break
+ else:
+ log.fatal('Stopping replication check: ' +
+ getDateTime())
+ log.fatal('Failure: Replication is complete, but we ' +
+ 'never converged.')
+ assert False
+
+ # Sleep a bit before the next pass
+ time.sleep(3)
+
+ #
+ # Finally check the CSN's
+ #
+ log.info("Check the CSN's...")
+ if not ReplTools.checkCSNs(ALL_REPLICAS):
+ assert False
+ log.info("All CSN's present and accounted for.")
+
+ #
+ # Print the convergence report
+ #
+ log.info('Measuring convergence...')
+ measureConvergence(topology)
+
+ #
+ # Test complete
+ #
+ log.info('No lingering entries.')
+ log.info('Pass %d complete.' % (loop + 1))
+ elapsed_tm = int(time.time() - TEST_START)
+ convtime = str(datetime.timedelta(seconds=elapsed_tm))
+ log.info('Entire test ran for: ' + convtime)
+
+ loop += 1
+
+ log.info('Test PASSED')
+
+
+if __name__ == '__main__':
+ # Run isolated
+ # -s for DEBUG mode
+ CURRENT_FILE = os.path.realpath(__file__)
+ pytest.main("-s %s" % CURRENT_FILE)
diff --git a/dirsrvtests/tests/stress/replication/mmr_01_4m_test.py b/dirsrvtests/tests/stress/replication/mmr_01_4m_test.py
new file mode 100644
index 000000000..c48a070c1
--- /dev/null
+++ b/dirsrvtests/tests/stress/replication/mmr_01_4m_test.py
@@ -0,0 +1,572 @@
+import os
+import sys
+import time
+import datetime
+import ldap
+import logging
+import pytest
+import threading
+from lib389._constants import *
+from lib389.properties import *
+from lib389.tasks import *
+from lib389.utils import *
+from lib389.repltools import ReplTools
+
+logging.getLogger(__name__).setLevel(logging.DEBUG)
+log = logging.getLogger(__name__)
+
+DEBUGGING = False
+ADD_DEL_COUNT = 500
+MAX_LOOPS = 5
+TEST_CONVERGE_LATENCY = True
+CONVERGENCE_TIMEOUT = '60'
+master_list = []
+hub_list = []
+con_list = []
+TEST_START = time.time()
+
+LAST_DN_IDX = ADD_DEL_COUNT - 1
+LAST_DN_M1 = 'DEL dn="uid=master_1-%d,%s' % (LAST_DN_IDX, DEFAULT_SUFFIX)
+LAST_DN_M2 = 'DEL dn="uid=master_2-%d,%s' % (LAST_DN_IDX, DEFAULT_SUFFIX)
+LAST_DN_M3 = 'DEL dn="uid=master_3-%d,%s' % (LAST_DN_IDX, DEFAULT_SUFFIX)
+LAST_DN_M4 = 'DEL dn="uid=master_4-%d,%s' % (LAST_DN_IDX, DEFAULT_SUFFIX)
+
+
+class TopologyReplication(object):
+ """The Replication Topology Class"""
+ def __init__(self, master1, master2, master3, master4):
+ """Init"""
+ master1.open()
+ self.master1 = master1
+ master2.open()
+ self.master2 = master2
+ master3.open()
+ self.master3 = master3
+ master4.open()
+ self.master4 = master4
+
+
[email protected](scope="module")
+def topology(request):
+ """Create Replication Deployment"""
+
+ # Creating master 1...
+ if DEBUGGING:
+ master1 = DirSrv(verbose=True)
+ else:
+ master1 = DirSrv(verbose=False)
+ args_instance[SER_HOST] = HOST_MASTER_1
+ args_instance[SER_PORT] = PORT_MASTER_1
+ args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
+ args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
+ args_master = args_instance.copy()
+ master1.allocate(args_master)
+ instance_master1 = master1.exists()
+ if instance_master1:
+ master1.delete()
+ master1.create()
+ master1.open()
+ master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
+ replicaId=REPLICAID_MASTER_1)
+
+ # Creating master 2...
+ if DEBUGGING:
+ master2 = DirSrv(verbose=True)
+ else:
+ master2 = DirSrv(verbose=False)
+ args_instance[SER_HOST] = HOST_MASTER_2
+ args_instance[SER_PORT] = PORT_MASTER_2
+ args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
+ args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
+ args_master = args_instance.copy()
+ master2.allocate(args_master)
+ instance_master2 = master2.exists()
+ if instance_master2:
+ master2.delete()
+ master2.create()
+ master2.open()
+ master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
+ replicaId=REPLICAID_MASTER_2)
+
+ # Creating master 3...
+ if DEBUGGING:
+ master3 = DirSrv(verbose=True)
+ else:
+ master3 = DirSrv(verbose=False)
+ args_instance[SER_HOST] = HOST_MASTER_3
+ args_instance[SER_PORT] = PORT_MASTER_3
+ args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_3
+ args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
+ args_master = args_instance.copy()
+ master3.allocate(args_master)
+ instance_master3 = master3.exists()
+ if instance_master3:
+ master3.delete()
+ master3.create()
+ master3.open()
+ master3.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
+ replicaId=REPLICAID_MASTER_3)
+
+ # Creating master 4...
+ if DEBUGGING:
+ master4 = DirSrv(verbose=True)
+ else:
+ master4 = DirSrv(verbose=False)
+ args_instance[SER_HOST] = HOST_MASTER_4
+ args_instance[SER_PORT] = PORT_MASTER_4
+ args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_4
+ args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
+ args_master = args_instance.copy()
+ master4.allocate(args_master)
+ instance_master4 = master4.exists()
+ if instance_master4:
+ master4.delete()
+ master4.create()
+ master4.open()
+ master4.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
+ replicaId=REPLICAID_MASTER_4)
+
+ #
+ # Create all the agreements
+ #
+ # Creating agreement from master 1 to master 2
+ properties = {RA_NAME: 'meTo_' + master2.host + ':' + str(master2.port),
+ RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
+ RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
+ RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
+ RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
+ m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host,
+ port=master2.port,
+ properties=properties)
+ if not m1_m2_agmt:
+ log.fatal("Fail to create a master -> master replica agreement")
+ sys.exit(1)
+ log.debug("%s created" % m1_m2_agmt)
+
+ # Creating agreement from master 1 to master 3
+ properties = {RA_NAME: 'meTo_' + master3.host + ':' + str(master3.port),
+ RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
+ RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
+ RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
+ RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
+ m1_m3_agmt = master1.agreement.create(suffix=SUFFIX, host=master3.host,
+ port=master3.port,
+ properties=properties)
+ if not m1_m3_agmt:
+ log.fatal("Fail to create a master -> master replica agreement")
+ sys.exit(1)
+ log.debug("%s created" % m1_m3_agmt)
+
+ # Creating agreement from master 1 to master 4
+ properties = {RA_NAME: 'meTo_' + master4.host + ':' + str(master4.port),
+ RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
+ RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
+ RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
+ RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
+ m1_m4_agmt = master1.agreement.create(suffix=SUFFIX, host=master4.host,
+ port=master4.port,
+ properties=properties)
+ if not m1_m4_agmt:
+ log.fatal("Fail to create a master -> master replica agreement")
+ sys.exit(1)
+ log.debug("%s created" % m1_m4_agmt)
+
+ # Creating agreement from master 2 to master 1
+ properties = {RA_NAME: 'meTo_' + master1.host + ':' + str(master1.port),
+ RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
+ RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
+ RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
+ RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
+ m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host,
+ port=master1.port,
+ properties=properties)
+ if not m2_m1_agmt:
+ log.fatal("Fail to create a master -> master replica agreement")
+ sys.exit(1)
+ log.debug("%s created" % m2_m1_agmt)
+
+ # Creating agreement from master 2 to master 3
+ properties = {RA_NAME: 'meTo_' + master3.host + ':' + str(master3.port),
+ RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
+ RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
+ RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
+ RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
+ m2_m3_agmt = master2.agreement.create(suffix=SUFFIX, host=master3.host,
+ port=master3.port,
+ properties=properties)
+ if not m2_m3_agmt:
+ log.fatal("Fail to create a master -> master replica agreement")
+ sys.exit(1)
+ log.debug("%s created" % m2_m3_agmt)
+
+ # Creating agreement from master 2 to master 4
+ properties = {RA_NAME: 'meTo_' + master4.host + ':' + str(master4.port),
+ RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
+ RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
+ RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
+ RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
+ m2_m4_agmt = master2.agreement.create(suffix=SUFFIX, host=master4.host,
+ port=master4.port,
+ properties=properties)
+ if not m2_m4_agmt:
+ log.fatal("Fail to create a master -> master replica agreement")
+ sys.exit(1)
+ log.debug("%s created" % m2_m4_agmt)
+
+ # Creating agreement from master 3 to master 1
+ properties = {RA_NAME: 'meTo_' + master1.host + ':' + str(master1.port),
+ RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
+ RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
+ RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
+ RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
+ m3_m1_agmt = master3.agreement.create(suffix=SUFFIX, host=master1.host,
+ port=master1.port,
+ properties=properties)
+ if not m3_m1_agmt:
+ log.fatal("Fail to create a master -> master replica agreement")
+ sys.exit(1)
+ log.debug("%s created" % m3_m1_agmt)
+
+ # Creating agreement from master 3 to master 2
+ properties = {RA_NAME: 'meTo_' + master2.host + ':' + str(master2.port),
+ RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
+ RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
+ RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
+ RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
+ m3_m2_agmt = master3.agreement.create(suffix=SUFFIX, host=master2.host,
+ port=master2.port,
+ properties=properties)
+ if not m3_m2_agmt:
+ log.fatal("Fail to create a master -> master replica agreement")
+ sys.exit(1)
+ log.debug("%s created" % m3_m2_agmt)
+
+ # Creating agreement from master 3 to master 4
+ properties = {RA_NAME: 'meTo_' + master4.host + ':' + str(master4.port),
+ RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
+ RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
+ RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
+ RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
+ m3_m4_agmt = master3.agreement.create(suffix=SUFFIX, host=master4.host,
+ port=master4.port,
+ properties=properties)
+ if not m3_m4_agmt:
+ log.fatal("Fail to create a master -> master replica agreement")
+ sys.exit(1)
+ log.debug("%s created" % m3_m4_agmt)
+
+ # Creating agreement from master 4 to master 1
+ properties = {RA_NAME: 'meTo_' + master1.host + ':' + str(master1.port),
+ RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
+ RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
+ RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
+ RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
+ m4_m1_agmt = master4.agreement.create(suffix=SUFFIX, host=master1.host,
+ port=master1.port,
+ properties=properties)
+ if not m4_m1_agmt:
+ log.fatal("Fail to create a master -> master replica agreement")
+ sys.exit(1)
+ log.debug("%s created" % m4_m1_agmt)
+
+ # Creating agreement from master 4 to master 2
+ properties = {RA_NAME: 'meTo_' + master2.host + ':' + str(master2.port),
+ RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
+ RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
+ RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
+ RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
+ m4_m2_agmt = master4.agreement.create(suffix=SUFFIX, host=master2.host,
+ port=master2.port,
+ properties=properties)
+ if not m4_m2_agmt:
+ log.fatal("Fail to create a master -> master replica agreement")
+ sys.exit(1)
+ log.debug("%s created" % m4_m2_agmt)
+
+ # Creating agreement from master 4 to master 3
+ properties = {RA_NAME: 'meTo_' + master3.host + ':' + str(master3.port),
+ RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
+ RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
+ RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
+ RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
+ m4_m3_agmt = master4.agreement.create(suffix=SUFFIX, host=master3.host,
+ port=master3.port,
+ properties=properties)
+ if not m4_m3_agmt:
+ log.fatal("Fail to create a master -> master replica agreement")
+ sys.exit(1)
+ log.debug("%s created" % m4_m3_agmt)
+
+ # Allow the replicas to get situated with the new agreements...
+ time.sleep(5)
+
+ #
+ # Initialize all the agreements
+ #
+ master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)
+ master1.waitForReplInit(m1_m2_agmt)
+ master1.agreement.init(SUFFIX, HOST_MASTER_3, PORT_MASTER_3)
+ master1.waitForReplInit(m1_m3_agmt)
+ master1.agreement.init(SUFFIX, HOST_MASTER_4, PORT_MASTER_4)
+ master1.waitForReplInit(m1_m4_agmt)
+
+ # Check replication is working...
+ if master1.testReplication(DEFAULT_SUFFIX, master4):
+ log.info('Replication is working.')
+ else:
+ log.fatal('Replication is not working.')
+ assert False
+
+ def fin():
+ """If we are debugging just stop the instances, otherwise remove
+ them
+ """
+ if 1 or DEBUGGING:
+ master1.stop()
+ master2.stop()
+ master3.stop()
+ master4.stop()
+ else:
+ master1.delete()
+ master2.delete()
+ master3.delete()
+ master4.delete()
+ request.addfinalizer(fin)
+
+ return TopologyReplication(master1, master2, master3, master4)
+
+
+class AddDelUsers(threading.Thread):
+ """Add's and delets 50000 entries"""
+ def __init__(self, inst):
+ """
+ Initialize the thread
+ """
+ threading.Thread.__init__(self)
+ self.daemon = True
+ self.inst = inst
+ self.name = inst.serverid
+
+ def run(self):
+ """
+ Start adding users
+ """
+ idx = 0
+
+ log.info('AddDelUsers (%s) Adding and deleting %d entries...' %
+ (self.name, ADD_DEL_COUNT))
+
+ while idx < ADD_DEL_COUNT:
+ RDN_VAL = ('uid=%s-%d' % (self.name, idx))
+ USER_DN = ('%s,%s' % (RDN_VAL, DEFAULT_SUFFIX))
+
+ try:
+ self.inst.add_s(Entry((USER_DN, {'objectclass':
+ 'top extensibleObject'.split(),
+ 'uid': RDN_VAL})))
+ except ldap.LDAPError as e:
+ log.fatal('AddDelUsers (%s): failed to add (%s) error: %s' %
+ (self.name, USER_DN, str(e)))
+ assert False
+
+ try:
+ self.inst.delete_s(USER_DN)
+ except ldap.LDAPError as e:
+ log.fatal('AddDelUsers (%s): failed to delete (%s) error: %s' %
+ (self.name, USER_DN, str(e)))
+ assert False
+
+ idx += 1
+
+ log.info('AddDelUsers (%s) - Finished at: %s' %
+ (self.name, getDateTime()))
+
+
+def measureConvergence(topology):
+ """Find and measure the convergence of entries from each master
+ """
+
+ replicas = [topology.master1, topology.master2, topology.master3,
+ topology.master4]
+
+ if ADD_DEL_COUNT > 10:
+ interval = int(ADD_DEL_COUNT / 10)
+ else:
+ interval = 1
+
+ for master in [('1', topology.master1),
+ ('2', topology.master2),
+ ('3', topology.master3),
+ ('4', topology.master4)]:
+ # Start with the first entry
+ entries = ['ADD dn="uid=master_%s-0,%s' %
+ (master[0], DEFAULT_SUFFIX)]
+
+ # Add incremental entries to the list
+ idx = interval
+ while idx < ADD_DEL_COUNT:
+ entries.append('ADD dn="uid=master_%s-%d,%s' %
+ (master[0], idx, DEFAULT_SUFFIX))
+ idx += interval
+
+ # Add the last entry to the list (if it was not already added)
+ if idx != (ADD_DEL_COUNT - 1):
+ entries.append('ADD dn="uid=master_%s-%d,%s' %
+ (master[0], (ADD_DEL_COUNT - 1),
+ DEFAULT_SUFFIX))
+
+ ReplTools.replConvReport(DEFAULT_SUFFIX, entries, master[1], replicas)
+
+
+def test_MMR_Integrity(topology):
+ """Apply load to 4 masters at the same time. Perform adds and deletes.
+ If any updates are missed we will see an error 32 in the access logs or
+ we will have entries left over once the test completes.
+ """
+ loop = 0
+
+ ALL_REPLICAS = [topology.master1, topology.master2, topology.master3,
+ topology.master4]
+
+ if TEST_CONVERGE_LATENCY:
+ try:
+ for inst in ALL_REPLICAS:
+ replica = inst.replicas.get(DEFAULT_SUFFIX)
+ replica.set('nsds5ReplicaReleaseTimeout', CONVERGENCE_TIMEOUT)
+ except ldap.LDAPError as e:
+ log.fatal('Failed to set replicas release timeout - error: %s' %
+ (str(e)))
+ assert False
+
+ if DEBUGGING:
+ # Enable Repl logging, and increase the max logs
+ try:
+ for inst in ALL_REPLICAS:
+ inst.enableReplLogging()
+ inst.modify_s("cn=config", [(ldap.MOD_REPLACE,
+ 'nsslapd-errorlog-maxlogsperdir',
+ '5')])
+ except ldap.LDAPError as e:
+ log.fatal('Failed to set max logs - error: %s' % (str(e)))
+ assert False
+
+ while loop < MAX_LOOPS:
+ # Remove the current logs so we have a clean set of logs to check.
+ log.info('Pass %d...' % (loop + 1))
+ log.info("Removing logs...")
+ for inst in ALL_REPLICAS:
+ inst.deleteAllLogs()
+
+ # Fire off 4 threads to apply the load
+ log.info("Start adding/deleting: " + getDateTime())
+ startTime = time.time()
+ add_del_m1 = AddDelUsers(topology.master1)
+ add_del_m1.start()
+ add_del_m2 = AddDelUsers(topology.master2)
+ add_del_m2.start()
+ add_del_m3 = AddDelUsers(topology.master3)
+ add_del_m3.start()
+ add_del_m4 = AddDelUsers(topology.master4)
+ add_del_m4.start()
+
+ # Wait for threads to finish sending their updates
+ add_del_m1.join()
+ add_del_m2.join()
+ add_del_m3.join()
+ add_del_m4.join()
+ log.info("Finished adding/deleting entries: " + getDateTime())
+
+ #
+ # Loop checking for error 32's, and for convergence to complete
+ #
+ log.info("Waiting for replication to converge...")
+ while True:
+ # First check for error 32's
+ for inst in ALL_REPLICAS:
+ if inst.searchAccessLog(" err=32 "):
+ log.fatal('An add was missed on: ' + inst.serverid)
+ assert False
+
+ # Next check to see if the last update is in the access log
+ converged = True
+ for inst in ALL_REPLICAS:
+ if not inst.searchAccessLog(LAST_DN_M1) or \
+ not inst.searchAccessLog(LAST_DN_M2) or \
+ not inst.searchAccessLog(LAST_DN_M3) or \
+ not inst.searchAccessLog(LAST_DN_M4):
+ converged = False
+ break
+
+ if converged:
+ elapsed_tm = int(time.time() - startTime)
+ convtime = str(datetime.timedelta(seconds=elapsed_tm))
+ log.info('Replication converged at: ' + getDateTime() +
+ ' - Elapsed Time: ' + convtime)
+ break
+ else:
+ # Check if replication is idle
+ replicas = [topology.master1, topology.master2,
+ topology.master3, topology.master4]
+ if ReplTools.replIdle(replicas, DEFAULT_SUFFIX):
+ # Replication is idle - wait 30 secs for access log buffer
+ time.sleep(30)
+
+ # Now check the access log again...
+ converged = True
+ for inst in ALL_REPLICAS:
+ if not inst.searchAccessLog(LAST_DN_M1) or \
+ not inst.searchAccessLog(LAST_DN_M2) or \
+ not inst.searchAccessLog(LAST_DN_M3) or \
+ not inst.searchAccessLog(LAST_DN_M4):
+ converged = False
+ break
+
+ if converged:
+ elapsed_tm = int(time.time() - startTime)
+ convtime = str(datetime.timedelta(seconds=elapsed_tm))
+ log.info('Replication converged at: ' + getDateTime() +
+ ' - Elapsed Time: ' + convtime)
+ break
+ else:
+ log.fatal('Stopping replication check: ' +
+ getDateTime())
+ log.fatal('Failure: Replication is complete, but we ' +
+ 'never converged.')
+ assert False
+
+ # Sleep a bit before the next pass
+ time.sleep(3)
+
+ #
+ # Finally check the CSN's
+ #
+ log.info("Check the CSN's...")
+ if not ReplTools.checkCSNs(ALL_REPLICAS):
+ assert False
+ log.info("All CSN's present and accounted for.")
+
+ #
+ # Print the convergence report
+ #
+ log.info('Measuring convergence...')
+ measureConvergence(topology)
+
+ #
+ # Test complete
+ #
+ log.info('No lingering entries.')
+ log.info('Pass %d complete.' % (loop + 1))
+ elapsed_tm = int(time.time() - TEST_START)
+ convtime = str(datetime.timedelta(seconds=elapsed_tm))
+ log.info('Entire test ran for: ' + convtime)
+
+ loop += 1
+
+ log.info('Test PASSED')
+
+
+if __name__ == '__main__':
+ # Run isolated
+ # -s for DEBUG mode
+ CURRENT_FILE = os.path.realpath(__file__)
+ pytest.main("-s %s" % CURRENT_FILE)
| 0 |
c0ca290ffe9ac825db25c46f0c098e491be71e35
|
389ds/389-ds-base
|
Bump version to 2.0.6
|
commit c0ca290ffe9ac825db25c46f0c098e491be71e35
Author: Thierry Bordaz <[email protected]>
Date: Wed Jun 23 15:58:27 2021 +0200
Bump version to 2.0.6
diff --git a/VERSION.sh b/VERSION.sh
index aa7c0f70b..28aec3d8a 100644
--- a/VERSION.sh
+++ b/VERSION.sh
@@ -10,7 +10,7 @@ vendor="389 Project"
# PACKAGE_VERSION is constructed from these
VERSION_MAJOR=2
VERSION_MINOR=0
-VERSION_MAINT=5
+VERSION_MAINT=6
# NOTE: VERSION_PREREL is automatically set for builds made out of a git tree
VERSION_PREREL=
VERSION_DATE=$(date -u +%Y%m%d)
| 0 |
6d6630452f742f46519ec16aac4d6943e62ae286
|
389ds/389-ds-base
|
507460 Access log could mistakenly report notes=U for VLV searches
Summary: Access log reports 'notes=U' for VLV indexed searches if there are no records to be found
Fix Description: VLV creates an empty IDL if no matched entries are found. To do so, VLV code was calling idl_alloc with argument 0, which generated ALLID. It's changed to call idl_alloc with 1. It creates a normal empty IDL.
|
commit 6d6630452f742f46519ec16aac4d6943e62ae286
Author: Noriko Hosoi <[email protected]>
Date: Tue Jun 23 16:46:29 2009 -0700
507460 Access log could mistakenly report notes=U for VLV searches
Summary: Access log reports 'notes=U' for VLV indexed searches if there are no records to be found
Fix Description: VLV creates an empty IDL if no matched entries are found. To do so, VLV code was calling idl_alloc with argument 0, which generated ALLID. It's changed to call idl_alloc with 1. It creates a normal empty IDL.
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_search.c b/ldap/servers/slapd/back-ldbm/ldbm_search.c
index 7d91d47ab..8d57ae490 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_search.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_search.c
@@ -637,7 +637,7 @@ ldbm_back_search( Slapi_PBlock *pb )
* this is not an internal operation.
* We hope the plugins know what they are doing!
*/
- if (!operation_is_flag_set(operation, OP_FLAG_INTERNAL)) {
+ if (!operation_is_flag_set(operation, OP_FLAG_INTERNAL)) {
PR_Lock(inst->inst_config_mutex);
ri = inst->require_index;
diff --git a/ldap/servers/slapd/back-ldbm/vlv.c b/ldap/servers/slapd/back-ldbm/vlv.c
index f1e061679..f0ce9e6ac 100644
--- a/ldap/servers/slapd/back-ldbm/vlv.c
+++ b/ldap/servers/slapd/back-ldbm/vlv.c
@@ -1185,7 +1185,8 @@ vlv_build_candidate_list( backend *be, struct vlvIndex* p, const struct vlv_requ
vlv_request_control);
if (si==length) {
do_trim = 0;
- *candidates = idl_alloc(0);
+ /* minimum idl_alloc size should be 1; 0 is considered ALLID */
+ *candidates = idl_alloc(1);
}
break;
default:
@@ -1356,7 +1357,8 @@ vlv_trim_candidates(backend *be, const IDList *candidates, const sort_spec* sort
if(si==candidates->b_nids)
{
do_trim= 0;
- resultIdl= idl_alloc(0);
+ /* minimum idl_alloc size should be 1; 0 is considered ALLID */
+ resultIdl= idl_alloc(1);
}
break;
default:
| 0 |
a81146c980306a6bfcf6945f7bedf1038a4edf24
|
389ds/389-ds-base
|
Bug 613056 - fix coverify Defect Type: Null pointer dereferences issues 11892 - 11939
https://bugzilla.redhat.com/show_bug.cgi?id=613056
Resolves: bug 613056
Bug description: Fix coverify Defect Type: Null pointer dereferences issues 11892 - 11939
Fix description: Catch possible NULL pointer in windows_acquire_replica().
|
commit a81146c980306a6bfcf6945f7bedf1038a4edf24
Author: Endi S. Dewata <[email protected]>
Date: Fri Jul 9 20:17:57 2010 -0500
Bug 613056 - fix coverify Defect Type: Null pointer dereferences issues 11892 - 11939
https://bugzilla.redhat.com/show_bug.cgi?id=613056
Resolves: bug 613056
Bug description: Fix coverify Defect Type: Null pointer dereferences issues 11892 - 11939
Fix description: Catch possible NULL pointer in windows_acquire_replica().
diff --git a/ldap/servers/plugins/replication/windows_protocol_util.c b/ldap/servers/plugins/replication/windows_protocol_util.c
index a3d007728..ba7559cdb 100644
--- a/ldap/servers/plugins/replication/windows_protocol_util.c
+++ b/ldap/servers/plugins/replication/windows_protocol_util.c
@@ -601,6 +601,13 @@ windows_acquire_replica(Private_Repl_Protocol *prp, RUV **ruv, int check_ruv)
LDAPDebug( LDAP_DEBUG_TRACE, "=> windows_acquire_replica\n", 0, 0, 0 );
+ if (NULL == ruv)
+ {
+ slapi_log_error(SLAPI_LOG_FATAL, windows_repl_plugin_name, "NULL ruv\n");
+ return_value = ACQUIRE_FATAL_ERROR;
+ goto done;
+ }
+
PR_ASSERT(prp);
if (prp->replica_acquired) /* we already acquire replica */
@@ -613,7 +620,7 @@ windows_acquire_replica(Private_Repl_Protocol *prp, RUV **ruv, int check_ruv)
return ACQUIRE_SUCCESS;
}
- if (NULL != ruv)
+ if (NULL != *ruv)
{
ruv_destroy ( ruv );
}
@@ -716,7 +723,7 @@ windows_acquire_replica(Private_Repl_Protocol *prp, RUV **ruv, int check_ruv)
/* replica successfully acquired */
prp->replica_acquired = PR_TRUE;
}
-
+done:
LDAPDebug( LDAP_DEBUG_TRACE, "<= windows_acquire_replica\n", 0, 0, 0 );
return return_value;
| 0 |
a7de5f2a4323d224c63f722172eee8b63752fb45
|
389ds/389-ds-base
|
Bug 635987 - Incorrect sub scope search result with ACL containing ldap:///self
https://bugzilla.redhat.com/show_bug.cgi?id=635987
Description:
This commit made for the bug 635987 introduced a bug to replication.
commit 8ac525e5ac997378f4f2a386e9b96568c8d66db5
Author: Noriko Hosoi <[email protected]>
Date: Tue Sep 21 15:12:07 2010 -0700
subtree_candidates (ldbm_search.c)
If you do have a tombstone filter, descendants will be NULL,
and idl_intersection of candidates and descendents will wipe
out all of the candidates, leaving just the one entry, e->ep_id.
Changed to call idl_intersection only when the filter is not
for tombstone or entryrdn_get_noancestorid (false, by default).
|
commit a7de5f2a4323d224c63f722172eee8b63752fb45
Author: Noriko Hosoi <[email protected]>
Date: Fri Sep 24 11:42:31 2010 -0700
Bug 635987 - Incorrect sub scope search result with ACL containing ldap:///self
https://bugzilla.redhat.com/show_bug.cgi?id=635987
Description:
This commit made for the bug 635987 introduced a bug to replication.
commit 8ac525e5ac997378f4f2a386e9b96568c8d66db5
Author: Noriko Hosoi <[email protected]>
Date: Tue Sep 21 15:12:07 2010 -0700
subtree_candidates (ldbm_search.c)
If you do have a tombstone filter, descendants will be NULL,
and idl_intersection of candidates and descendents will wipe
out all of the candidates, leaving just the one entry, e->ep_id.
Changed to call idl_intersection only when the filter is not
for tombstone or entryrdn_get_noancestorid (false, by default).
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_search.c b/ldap/servers/slapd/back-ldbm/ldbm_search.c
index 30674960b..568d32c66 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_search.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_search.c
@@ -957,13 +957,17 @@ subtree_candidates(
*err = entryrdn_get_subordinates(be,
slapi_entry_get_sdn_const(e->ep_entry),
e->ep_id, &descendants, NULL);
+ idl_insert(&descendants, e->ep_id);
+ candidates = idl_intersection(be, candidates, descendants);
+ idl_free(tmp);
+ idl_free(descendants);
} else if (!has_tombstone_filter) {
*err = ldbm_ancestorid_read(be, NULL, e->ep_id, &descendants);
- }
- idl_insert(&descendants, e->ep_id);
- candidates = idl_intersection(be, candidates, descendants);
- idl_free(tmp);
- idl_free(descendants);
+ idl_insert(&descendants, e->ep_id);
+ candidates = idl_intersection(be, candidates, descendants);
+ idl_free(tmp);
+ idl_free(descendants);
+ } /* else == has_tombstone_filter: do nothing */
}
return( candidates );
| 0 |
2dab9224d575596c30492c6db6bb31dc662df42c
|
389ds/389-ds-base
|
Issue 5872 - part 2 - fix is_dbi regression (#5887)
A one liner fix to handle a regression in nightly tests about is_dbi function (need to convert dbscan output back into string):
Issue: 5272 part 2
Reviewed by: @mreynolds389 Thanks!
|
commit 2dab9224d575596c30492c6db6bb31dc662df42c
Author: progier389 <[email protected]>
Date: Tue Aug 8 14:18:58 2023 +0200
Issue 5872 - part 2 - fix is_dbi regression (#5887)
A one liner fix to handle a regression in nightly tests about is_dbi function (need to convert dbscan output back into string):
Issue: 5272 part 2
Reviewed by: @mreynolds389 Thanks!
diff --git a/src/lib389/lib389/__init__.py b/src/lib389/lib389/__init__.py
index e7653c24a..7590ec442 100644
--- a/src/lib389/lib389/__init__.py
+++ b/src/lib389/lib389/__init__.py
@@ -3006,7 +3006,7 @@ class DirSrv(SimpleLDAPObject, object):
def is_dbi(self, dbipattern):
if self.is_dbi_supported():
# Use dbscan to determine whether the database instance exists.
- output = self.dbscan(args=['-L', self.ds_paths.db_dir], stopping=False)
+ output = self.dbscan(args=['-L', self.ds_paths.db_dir], stopping=False).decode()
self.log.debug("is_dbi output is: " + output)
return dbipattern.lower() in output.lower()
else:
| 0 |
94b265fb509ac194dec8e51b6d02f7fd88673aac
|
389ds/389-ds-base
|
Bug 630097 - (cov#15462) NULL dereference in mep_modrdn_post_op()
If we fail to fetch the postop entry for a modrdn operation in the
Managed Entry Plug-in, we end up passing a NULL pointer to
slapi_entry_attr_get_charptr(). This function dereferences the
entry without checking if it is NULL first. The mep_modrdn_post_op()
function should just return if we are unable to fetch the postop
entry.
I believe that this issue could trigger a crash when chain-on-update
is configured and a modrdn operation is chained. There is no postop
entry in this case.
|
commit 94b265fb509ac194dec8e51b6d02f7fd88673aac
Author: Nathan Kinder <[email protected]>
Date: Tue Sep 14 11:14:53 2010 -0700
Bug 630097 - (cov#15462) NULL dereference in mep_modrdn_post_op()
If we fail to fetch the postop entry for a modrdn operation in the
Managed Entry Plug-in, we end up passing a NULL pointer to
slapi_entry_attr_get_charptr(). This function dereferences the
entry without checking if it is NULL first. The mep_modrdn_post_op()
function should just return if we are unable to fetch the postop
entry.
I believe that this issue could trigger a crash when chain-on-update
is configured and a modrdn operation is chained. There is no postop
entry in this case.
diff --git a/ldap/servers/plugins/mep/mep.c b/ldap/servers/plugins/mep/mep.c
index 716b39b01..c0ce01398 100644
--- a/ldap/servers/plugins/mep/mep.c
+++ b/ldap/servers/plugins/mep/mep.c
@@ -2021,6 +2021,7 @@ mep_modrdn_post_op(Slapi_PBlock *pb)
slapi_log_error(SLAPI_LOG_PLUGIN, MEP_PLUGIN_SUBSYSTEM,
"mep_modrdn_post_op: Error "
"retrieving post-op entry\n");
+ return 0;
}
if ((old_dn = mep_get_dn(pb))) {
| 0 |
db2a098b97aaca733395822990cb7574e96e9135
|
389ds/389-ds-base
|
Resolves: bug 486474
Bug Description: overriding arguments to setup causes setup to fail
Reviewed by: ulf.weltman, nkinder (Thanks!)
Fix Description: Parameters specified on the command line should override and replace (not add to) any parameters specified in a given .inf file. I refactored the code a little too - I moved the argv processing into the Inf module out of the Setup and Migration modules. The code will first process the args and store the values in a temporary hash ref. Then it will process the temp hash ref, replacing the values in the main inf with the values from the hash.
Platforms tested: RHEL4
Flag Day: no
Doc impact: no
|
commit db2a098b97aaca733395822990cb7574e96e9135
Author: Rich Megginson <[email protected]>
Date: Tue Feb 24 16:57:45 2009 +0000
Resolves: bug 486474
Bug Description: overriding arguments to setup causes setup to fail
Reviewed by: ulf.weltman, nkinder (Thanks!)
Fix Description: Parameters specified on the command line should override and replace (not add to) any parameters specified in a given .inf file. I refactored the code a little too - I moved the argv processing into the Inf module out of the Setup and Migration modules. The code will first process the args and store the values in a temporary hash ref. Then it will process the temp hash ref, replacing the values in the main inf with the values from the hash.
Platforms tested: RHEL4
Flag Day: no
Doc impact: no
diff --git a/ldap/admin/src/scripts/Inf.pm b/ldap/admin/src/scripts/Inf.pm
index 4d4136303..0ac11a41a 100644
--- a/ldap/admin/src/scripts/Inf.pm
+++ b/ldap/admin/src/scripts/Inf.pm
@@ -192,6 +192,60 @@ sub write {
close INF;
}
+sub updateFromArgs {
+ my $self = shift;
+ my $argsinf = {}; # tmp for args read in
+
+ if (!@_) {
+ return 1; # no args - just return
+ }
+
+ # read args into temp inf
+ for (@_) {
+ if (/^([\w_-]+)\.([\w_-]+)=(.*)$/) { # e.g. section.param=value
+ my $sec = $1;
+ my $parm = $2;
+ my $val = $3;
+ # a single value is just a single scalar
+ # multiple values are represented by an array ref
+ if (exists($argsinf->{$sec}->{$parm})) {
+ if (!ref($argsinf->{$sec}->{$parm})) {
+ # convert single scalar to array ref
+ my $ary = [$argsinf->{$sec}->{$parm}];
+ $argsinf->{$sec}->{$parm} = $ary;
+ }
+ # just push the new value
+ push @{$argsinf->{$sec}->{$parm}}, $val;
+ } else {
+ # single value
+ $argsinf->{$sec}->{$parm} = $val;
+ }
+ } else { # error
+ print STDERR "Error: unknown command line option $_\n";
+ return;
+ }
+ }
+
+ # no args read - just return true
+ if (!$argsinf || !%{$argsinf}) {
+ return 1;
+ }
+
+ # override inf with vals read from args
+ while (my ($name, $sec) = each %{$argsinf}) {
+ if (ref($sec) eq 'HASH') {
+ for my $key (keys %{$sec}) {
+ if (defined($sec->{$key})) {
+ my $val = $sec->{$key};
+ $self->{$name}->{$key} = $val;
+ }
+ }
+ }
+ }
+
+ return 1;
+}
+
#############################################################################
# Mandatory TRUE return value.
#
diff --git a/ldap/admin/src/scripts/Migration.pm.in b/ldap/admin/src/scripts/Migration.pm.in
index 7256d2a6a..86eb6fd12 100644
--- a/ldap/admin/src/scripts/Migration.pm.in
+++ b/ldap/admin/src/scripts/Migration.pm.in
@@ -232,14 +232,9 @@ sub init {
# see if user passed in default inf values - also, command line
# arguments override those passed in via an inf file - this
# allows the reuse of .inf files with some parameters overridden
- for (@ARGV) {
- if (/^([\w_-]+)\.([\w_-]+)=(.*)$/) { # e.g. section.param=value
- $self->{inf}->{$1}->{$2} = $3;
- } else { # error
- print STDERR "Error: unknown command line option $_\n";
- usage();
- exit 1;
- }
+ if (!$self->{inf}->updateFromArgs(@ARGV)) {
+ HelpMessage();
+ exit 1;
}
# this is the base config directory - the directory containing
diff --git a/ldap/admin/src/scripts/Setup.pm.in b/ldap/admin/src/scripts/Setup.pm.in
index 814a6a84c..7ad57c1f9 100644
--- a/ldap/admin/src/scripts/Setup.pm.in
+++ b/ldap/admin/src/scripts/Setup.pm.in
@@ -161,30 +161,9 @@ sub init {
# see if user passed in default inf values - also, command line
# arguments override those passed in via an inf file - this
# allows the reuse of .inf files with some parameters overridden
- for (@ARGV) {
- if (/^([\w_-]+)\.([\w_-]+)=(.*)$/) { # e.g. section.param=value
- my $sec = $1;
- my $parm = $2;
- my $val = $3;
- # a single value is just a single scalar
- # multiple values are represented by an array ref
- if (exists($self->{inf}->{$sec}->{$parm})) {
- if (!ref($self->{inf}->{$sec}->{$parm})) {
- # convert single scalar to array ref
- my $ary = [$self->{inf}->{$sec}->{$parm}];
- $self->{inf}->{$sec}->{$parm} = $ary;
- }
- # just push the new value
- push @{$self->{inf}->{$sec}->{$parm}}, $val;
- } else {
- # single value
- $self->{inf}->{$sec}->{$parm} = $val;
- }
- } else { # error
- print STDERR "Error: unknown command line option $_\n";
- HelpMessage();
- exit 1;
- }
+ if (!$self->{inf}->updateFromArgs(@ARGV)) {
+ HelpMessage();
+ exit 1;
}
# this is the base config directory - the directory containing
| 0 |
858abb797f6586f910b839939c0d7c0e86be73fd
|
389ds/389-ds-base
|
Resolves: bug 165761
Added rfc2307bis schema, but in the data directory. It is not compatible with the older rfc2307 schema included with the directory server.
Users will need to upgrade their database to fix posixGroup entries in order
to use this schema
|
commit 858abb797f6586f910b839939c0d7c0e86be73fd
Author: Rich Megginson <[email protected]>
Date: Thu Oct 11 16:29:49 2007 +0000
Resolves: bug 165761
Added rfc2307bis schema, but in the data directory. It is not compatible with the older rfc2307 schema included with the directory server.
Users will need to upgrade their database to fix posixGroup entries in order
to use this schema
diff --git a/Makefile.am b/Makefile.am
index e82b0f6eb..33e2cd16c 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -157,7 +157,8 @@ sampledata_DATA = $(srcdir)/ldap/ldif/Ace.ldif \
ldap/ldif/template-sasl.ldif \
$(srcdir)/ldap/servers/slapd/tools/rsearch/scripts/dbgen-FamilyNames \
$(srcdir)/ldap/servers/slapd/tools/rsearch/scripts/dbgen-GivenNames \
- $(srcdir)/ldap/servers/slapd/tools/rsearch/scripts/dbgen-OrgUnits
+ $(srcdir)/ldap/servers/slapd/tools/rsearch/scripts/dbgen-OrgUnits \
+ $(srcdir)/ldap/schema/10rfc2307bis.ldif
schema_DATA = $(srcdir)/ldap/schema/00core.ldif \
$(srcdir)/ldap/schema/01common.ldif \
diff --git a/Makefile.in b/Makefile.in
index 8a41c79f1..366e590e5 100644
--- a/Makefile.in
+++ b/Makefile.in
@@ -1073,7 +1073,8 @@ sampledata_DATA = $(srcdir)/ldap/ldif/Ace.ldif \
ldap/ldif/template-sasl.ldif \
$(srcdir)/ldap/servers/slapd/tools/rsearch/scripts/dbgen-FamilyNames \
$(srcdir)/ldap/servers/slapd/tools/rsearch/scripts/dbgen-GivenNames \
- $(srcdir)/ldap/servers/slapd/tools/rsearch/scripts/dbgen-OrgUnits
+ $(srcdir)/ldap/servers/slapd/tools/rsearch/scripts/dbgen-OrgUnits \
+ $(srcdir)/ldap/schema/10rfc2307bis.ldif
schema_DATA = $(srcdir)/ldap/schema/00core.ldif \
$(srcdir)/ldap/schema/01common.ldif \
diff --git a/ldap/schema/10rfc2307bis.ldif b/ldap/schema/10rfc2307bis.ldif
new file mode 100644
index 000000000..7553fd20f
--- /dev/null
+++ b/ldap/schema/10rfc2307bis.ldif
@@ -0,0 +1,348 @@
+# New and improved RFC 2307 schema (aka RFC 2307 bis)
+# "An Approach for Using LDAP as a Network Information Service"
+# This schema has not yet been approved.
+#
+dn: cn=schema
+attributeTypes: (
+ 1.3.6.1.1.1.1.0 NAME 'uidNumber'
+ DESC 'An integer uniquely identifying a user in an administrative domain'
+ EQUALITY integerMatch
+ SYNTAX 1.3.6.1.4.1.1466.115.121.1.27
+ SINGLE-VALUE
+ )
+attributeTypes: (
+ 1.3.6.1.1.1.1.1 NAME 'gidNumber'
+ DESC 'An integer uniquely identifying a group in an
+ administrative domain'
+ EQUALITY integerMatch
+ SYNTAX 1.3.6.1.4.1.1466.115.121.1.27
+ SINGLE-VALUE
+ )
+attributeTypes: (
+ 1.3.6.1.1.1.1.2 NAME 'gecos'
+ DESC 'The GECOS field; the common name'
+ EQUALITY caseIgnoreIA5Match
+ SUBSTR caseIgnoreIA5SubstringsMatch
+ SYNTAX 1.3.6.1.4.1.1466.115.121.1.26
+ SINGLE-VALUE
+ )
+attributeTypes: (
+ 1.3.6.1.1.1.1.3 NAME 'homeDirectory'
+ DESC 'The absolute path to the home directory'
+ EQUALITY caseExactIA5Match
+ SYNTAX 1.3.6.1.4.1.1466.115.121.1.26
+ SINGLE-VALUE
+ )
+attributeTypes: (
+ 1.3.6.1.1.1.1.4 NAME 'loginShell'
+ DESC 'The path to the login shell'
+ EQUALITY caseExactIA5Match
+ SYNTAX 1.3.6.1.4.1.1466.115.121.1.26
+ SINGLE-VALUE
+ )
+attributeTypes: (
+ 1.3.6.1.1.1.1.5 NAME 'shadowLastChange'
+ EQUALITY integerMatch
+ SYNTAX 1.3.6.1.4.1.1466.115.121.1.27
+ SINGLE-VALUE
+ )
+attributeTypes: (
+ 1.3.6.1.1.1.1.6 NAME 'shadowMin'
+ EQUALITY integerMatch
+ SYNTAX 1.3.6.1.4.1.1466.115.121.1.27
+ SINGLE-VALUE
+ )
+attributeTypes: (
+ 1.3.6.1.1.1.1.7 NAME 'shadowMax'
+ EQUALITY integerMatch
+ SYNTAX 1.3.6.1.4.1.1466.115.121.1.27
+ SINGLE-VALUE
+ )
+attributeTypes: (
+ 1.3.6.1.1.1.1.8 NAME 'shadowWarning'
+ EQUALITY integerMatch
+ SYNTAX 1.3.6.1.4.1.1466.115.121.1.27
+ SINGLE-VALUE
+ )
+attributeTypes: (
+ 1.3.6.1.1.1.1.9 NAME 'shadowInactive'
+ EQUALITY integerMatch
+ SYNTAX 1.3.6.1.4.1.1466.115.121.1.27
+ SINGLE-VALUE
+ )
+attributeTypes: (
+ 1.3.6.1.1.1.1.10 NAME 'shadowExpire'
+ EQUALITY integerMatch
+ SYNTAX 1.3.6.1.4.1.1466.115.121.1.27
+ SINGLE-VALUE
+ )
+attributeTypes: (
+ 1.3.6.1.1.1.1.11 NAME 'shadowFlag'
+ EQUALITY integerMatch
+ SYNTAX 1.3.6.1.4.1.1466.115.121.1.27
+ SINGLE-VALUE
+ )
+attributeTypes: (
+ 1.3.6.1.1.1.1.12 NAME 'memberUid'
+ EQUALITY caseExactIA5Match
+ SYNTAX 1.3.6.1.4.1.1466.115.121.1.26
+ )
+attributeTypes: (
+ 1.3.6.1.1.1.1.13 NAME 'memberNisNetgroup'
+ EQUALITY caseExactIA5Match
+ SUBSTR caseExactIA5SubstringsMatch
+ SYNTAX 1.3.6.1.4.1.1466.115.121.1.26
+ )
+attributeTypes: (
+ 1.3.6.1.1.1.1.14 NAME 'nisNetgroupTriple'
+ DESC 'Netgroup triple'
+ EQUALITY caseIgnoreIA5Match
+ SYNTAX 1.3.6.1.4.1.1466.115.121.1.26
+ )
+attributeTypes: (
+ 1.3.6.1.1.1.1.15 NAME 'ipServicePort'
+ DESC 'Service port number'
+ EQUALITY integerMatch
+ SYNTAX 1.3.6.1.4.1.1466.115.121.1.27
+ SINGLE-VALUE
+ )
+attributeTypes: (
+ 1.3.6.1.1.1.1.16 NAME 'ipServiceProtocol'
+ DESC 'Service protocol name'
+ SUP name
+ )
+attributeTypes: (
+ 1.3.6.1.1.1.1.17 NAME 'ipProtocolNumber'
+ DESC 'IP protocol number'
+ EQUALITY integerMatch
+ SYNTAX 1.3.6.1.4.1.1466.115.121.1.27
+ SINGLE-VALUE
+ )
+attributeTypes: (
+ 1.3.6.1.1.1.1.18 NAME 'oncRpcNumber'
+ DESC 'ONC RPC number'
+ EQUALITY integerMatch
+ SYNTAX 1.3.6.1.4.1.1466.115.121.1.27
+ SINGLE-VALUE
+ )
+attributeTypes: (
+ 1.3.6.1.1.1.1.19 NAME 'ipHostNumber'
+ DESC 'IPv4 addresses as a dotted decimal omitting leading
+ zeros or IPv6 addresses as defined in RFC2373'
+ SUP name
+ )
+attributeTypes: (
+ 1.3.6.1.1.1.1.20 NAME 'ipNetworkNumber'
+ DESC 'IP network as a dotted decimal, eg. 192.168,
+ omitting leading zeros'
+ SUP name
+ SINGLE-VALUE
+ )
+attributeTypes: (
+ 1.3.6.1.1.1.1.21 NAME 'ipNetmaskNumber'
+ DESC 'IP netmask as a dotted decimal, eg. 255.255.255.0,
+ omitting leading zeros'
+ EQUALITY caseIgnoreIA5Match
+ SYNTAX 1.3.6.1.4.1.1466.115.121.1.26
+ SINGLE-VALUE
+ )
+attributeTypes: (
+ 1.3.6.1.1.1.1.22 NAME 'macAddress'
+ DESC 'MAC address in maximal, colon separated hex
+ notation, eg. 00:00:92:90:ee:e2'
+ EQUALITY caseIgnoreIA5Match
+ SYNTAX 1.3.6.1.4.1.1466.115.121.1.26
+ )
+attributeTypes: (
+ 1.3.6.1.1.1.1.23 NAME 'bootParameter'
+ DESC 'rpc.bootparamd parameter'
+ EQUALITY caseExactIA5Match
+ SYNTAX 1.3.6.1.4.1.1466.115.121.1.26
+ )
+attributeTypes: (
+ 1.3.6.1.1.1.1.24 NAME 'bootFile'
+ DESC 'Boot image name'
+ EQUALITY caseExactIA5Match
+ SYNTAX 1.3.6.1.4.1.1466.115.121.1.26
+ )
+attributeTypes: (
+ 1.3.6.1.1.1.1.26 NAME 'nisMapName'
+ DESC 'Name of a A generic NIS map'
+ SUP name
+ )
+attributeTypes: (
+ 1.3.6.1.1.1.1.27 NAME 'nisMapEntry'
+ DESC 'A generic NIS entry'
+ EQUALITY caseExactIA5Match
+ SUBSTR caseExactIA5SubstringsMatch
+ SYNTAX 1.3.6.1.4.1.1466.115.121.1.26
+ SINGLE-VALUE
+ )
+attributeTypes: (
+ 1.3.6.1.1.1.1.28 NAME 'nisPublicKey'
+ DESC 'NIS public key'
+ EQUALITY octetStringMatch
+ SYNTAX 1.3.6.1.4.1.1466.115.121.1.40
+ SINGLE-VALUE
+ )
+attributeTypes: (
+ 1.3.6.1.1.1.1.29 NAME 'nisSecretKey'
+ DESC 'NIS secret key'
+ EQUALITY octetStringMatch
+ SYNTAX 1.3.6.1.4.1.1466.115.121.1.40
+ SINGLE-VALUE
+ )
+attributeTypes: (
+ 1.3.6.1.1.1.1.30 NAME 'nisDomain'
+ DESC 'NIS domain'
+ EQUALITY caseIgnoreIA5Match
+ SYNTAX 1.3.6.1.4.1.1466.115.121.1.26
+ )
+attributeTypes: (
+ 1.3.6.1.1.1.1.31 NAME 'automountMapName'
+ DESC 'automount Map Name'
+ EQUALITY caseExactIA5Match
+ SUBSTR caseExactIA5SubstringsMatch
+ SYNTAX 1.3.6.1.4.1.1466.115.121.1.26
+ SINGLE-VALUE
+ )
+attributeTypes: (
+ 1.3.6.1.1.1.1.32 NAME 'automountKey'
+ DESC 'Automount Key value'
+ EQUALITY caseExactIA5Match
+ SUBSTR caseExactIA5SubstringsMatch
+ SYNTAX 1.3.6.1.4.1.1466.115.121.1.26
+ SINGLE-VALUE
+ )
+attributeTypes: (
+ 1.3.6.1.1.1.1.33 NAME 'automountInformation'
+ DESC 'Automount information'
+ EQUALITY caseExactIA5Match
+ SUBSTR caseExactIA5SubstringsMatch
+ SYNTAX 1.3.6.1.4.1.1466.115.121.1.26
+ SINGLE-VALUE
+ )
+# end of attribute types - beginning of objectclasses
+objectClasses: (
+ 1.3.6.1.1.1.2.0 NAME 'posixAccount' SUP top AUXILIARY
+ DESC 'Abstraction of an account with POSIX attributes'
+ MUST ( cn $ uid $ uidNumber $ gidNumber $ homeDirectory )
+ MAY ( userPassword $ loginShell $ gecos $
+ description )
+ )
+objectClasses: (
+ 1.3.6.1.1.1.2.1 NAME 'shadowAccount' SUP top AUXILIARY
+ DESC 'Additional attributes for shadow passwords'
+ MUST uid
+ MAY ( userPassword $ description $
+ shadowLastChange $ shadowMin $ shadowMax $
+ shadowWarning $ shadowInactive $
+ shadowExpire $ shadowFlag )
+ )
+objectClasses: (
+ 1.3.6.1.1.1.2.2 NAME 'posixGroup' SUP top AUXILIARY
+ DESC 'Abstraction of a group of accounts'
+ MUST gidNumber
+ MAY ( userPassword $ memberUid $
+ description )
+ )
+objectClasses: (
+ 1.3.6.1.1.1.2.3 NAME 'ipService' SUP top STRUCTURAL
+ DESC 'Abstraction an Internet Protocol service.
+ Maps an IP port and protocol (such as tcp or udp)
+ to one or more names; the distinguished value of
+ the cn attribute denotes the services canonical
+ name'
+ MUST ( cn $ ipServicePort $ ipServiceProtocol )
+ MAY description
+ )
+objectClasses: (
+ 1.3.6.1.1.1.2.4 NAME 'ipProtocol' SUP top STRUCTURAL
+ DESC 'Abstraction of an IP protocol. Maps a protocol number
+ to one or more names. The distinguished value of the cn
+ attribute denotes the protocols canonical name'
+ MUST ( cn $ ipProtocolNumber )
+ MAY description
+ )
+objectClasses: (
+ 1.3.6.1.1.1.2.5 NAME 'oncRpc' SUP top STRUCTURAL
+ DESC 'Abstraction of an Open Network Computing (ONC)
+ [RFC1057] Remote Procedure Call (RPC) binding.
+ This class maps an ONC RPC number to a name.
+ The distinguished value of the cn attribute denotes
+ the RPC services canonical name'
+ MUST ( cn $ oncRpcNumber )
+ MAY description
+ )
+objectClasses: (
+ 1.3.6.1.1.1.2.6 NAME 'ipHost' SUP top AUXILIARY
+ DESC 'Abstraction of a host, an IP device. The distinguished
+ value of the cn attribute denotes the hosts canonical
+ name. Device SHOULD be used as a structural class'
+ MUST ( cn $ ipHostNumber )
+ MAY ( userPassword $ l $ description $ manager )
+ )
+objectClasses: (
+ 1.3.6.1.1.1.2.7 NAME 'ipNetwork' SUP top STRUCTURAL
+ DESC 'Abstraction of a network. The distinguished value of
+ the cn attribute denotes the networks canonical name'
+ MUST ipNetworkNumber
+ MAY ( cn $ ipNetmaskNumber $ l $ description $ manager )
+ )
+objectClasses: (
+ 1.3.6.1.1.1.2.8 NAME 'nisNetgroup' SUP top STRUCTURAL
+ DESC 'Abstraction of a netgroup. May refer to other netgroups'
+ MUST cn
+ MAY ( nisNetgroupTriple $ memberNisNetgroup $ description )
+ )
+objectClasses: (
+ 1.3.6.1.1.1.2.9 NAME 'nisMap' SUP top STRUCTURAL
+ DESC 'A generic abstraction of a NIS map'
+ MUST nisMapName
+ MAY description
+ )
+objectClasses: (
+ 1.3.6.1.1.1.2.10 NAME 'nisObject' SUP top STRUCTURAL
+ DESC 'An entry in a NIS map'
+ MUST ( cn $ nisMapEntry $ nisMapName )
+ MAY description
+ )
+objectClasses: (
+ 1.3.6.1.1.1.2.11 NAME 'ieee802Device' SUP top AUXILIARY
+ DESC 'A device with a MAC address; device SHOULD be
+ used as a structural class'
+ MAY macAddress
+ )
+objectClasses: (
+ 1.3.6.1.1.1.2.12 NAME 'bootableDevice' SUP top AUXILIARY
+ DESC 'A device with boot parameters; device SHOULD be
+ used as a structural class'
+ MAY ( bootFile $ bootParameter )
+ )
+objectClasses: (
+ 1.3.6.1.1.1.2.14 NAME 'nisKeyObject' SUP top AUXILIARY
+ DESC 'An object with a public and secret key'
+ MUST ( cn $ nisPublicKey $ nisSecretKey )
+ MAY ( uidNumber $ description )
+ )
+objectClasses: (
+ 1.3.6.1.1.1.2.15 NAME 'nisDomainObject' SUP top AUXILIARY
+ DESC 'Associates a NIS domain with a naming context'
+ MUST nisDomain
+ )
+objectClasses: (
+ 1.3.6.1.1.1.2.16 NAME 'automountMap' SUP top STRUCTURAL
+ MUST ( automountMapName )
+ MAY description
+ )
+objectClasses: (
+ 1.3.6.1.1.1.2.17 NAME 'automount' SUP top STRUCTURAL
+ DESC 'Automount information'
+ MUST ( automountKey $ automountInformation )
+ MAY description
+ )
+## namedObject is needed for groups without members
+objectClasses: (
+ 1.3.6.1.4.1.5322.13.1.1 NAME 'namedObject' SUP top STRUCTURAL
+ MAY cn
+ )
| 0 |
e0485033ec25c85f95129266dc9e3735b283abc7
|
389ds/389-ds-base
|
Fix compiler warning in snmp main()
Reviewed by: mreynolds(one line commit rule)
|
commit e0485033ec25c85f95129266dc9e3735b283abc7
Author: Mark Reynolds <[email protected]>
Date: Tue Jan 15 14:40:39 2019 -0500
Fix compiler warning in snmp main()
Reviewed by: mreynolds(one line commit rule)
diff --git a/ldap/servers/snmp/main.c b/ldap/servers/snmp/main.c
index 1e6470521..ccfb9b820 100644
--- a/ldap/servers/snmp/main.c
+++ b/ldap/servers/snmp/main.c
@@ -295,7 +295,7 @@ load_config(char *conf_path)
/* set pidfile path */
if ((pidfile = malloc(strlen(LOCALSTATEDIR) + strlen("/run/") +
strlen(LDAP_AGENT_PIDFILE) + 1)) != NULL) {
- strncpy(pidfile, LOCALSTATEDIR, strlen(LOCALSTATEDIR));
+ strncpy(pidfile, LOCALSTATEDIR, strlen(LOCALSTATEDIR) + 1);
/* The above will likely not be NULL terminated, but we need to
* be sure that we're properly NULL terminated for the below
* strcat() to work properly. */
| 0 |
9e4ce5fad6f36baf8db88bf1820df28cc6e4e1a5
|
389ds/389-ds-base
|
Issue 50026 - Audit log does not capture the operation where nsslapd-lookthroughlimit is modified
Description:
Updated test case to check modification of attributes in audit log, because it wasn't logged in correct format.
Also removed function in test_internal_log_level_131076 in ds_logs_test.py that I used for debugging when making that test and forgot to delete it.
https://pagure.io/389-ds-base/issue/50026
Reviewed by: mreynolds, tbordaz, spichugi (Thanks!)
|
commit 9e4ce5fad6f36baf8db88bf1820df28cc6e4e1a5
Author: Barbora Smejkalová <[email protected]>
Date: Fri Apr 5 13:05:32 2019 +0200
Issue 50026 - Audit log does not capture the operation where nsslapd-lookthroughlimit is modified
Description:
Updated test case to check modification of attributes in audit log, because it wasn't logged in correct format.
Also removed function in test_internal_log_level_131076 in ds_logs_test.py that I used for debugging when making that test and forgot to delete it.
https://pagure.io/389-ds-base/issue/50026
Reviewed by: mreynolds, tbordaz, spichugi (Thanks!)
diff --git a/dirsrvtests/tests/suites/basic/basic_test.py b/dirsrvtests/tests/suites/basic/basic_test.py
index 46fc164bd..7e1471a3e 100644
--- a/dirsrvtests/tests/suites/basic/basic_test.py
+++ b/dirsrvtests/tests/suites/basic/basic_test.py
@@ -22,6 +22,9 @@ from lib389.idm.organizationalunit import OrganizationalUnits
from lib389._constants import DN_DM, PASSWORD, PW_DM
from lib389.topologies import topology_st
from lib389.paths import Paths
+from lib389.idm.directorymanager import DirectoryManager
+from lib389.config import LDBMConfig
+
default_paths = Paths()
@@ -1100,56 +1103,50 @@ def test_critical_msg_on_empty_range_idl(topology_st):
assert not topology_st.standalone.searchErrorsLog('CRIT - list_candidates - NULL idl was recieved from filter_candidates_ext.')
-def audit_pattern_found(server, log_pattern):
- file_obj = open(server.ds_paths.audit_log, "r")
-
- found = None
- # Use a while true iteration because 'for line in file: hit a
- log.info('Audit log contains')
- while True:
- line = file_obj.readline()
- log.info(line)
- found = log_pattern.search(line)
- if ((line == '') or (found)):
- break
-
- return found
-
-
[email protected]
@pytest.mark.ds50026
-def test_ticketldbm_audit(topology_st):
+def test_ldbm_modification_audit_log(topology_st):
"""When updating LDBM config attributes, those attributes/values are not listed
in the audit log
:id: 5bf75c47-a283-430e-a65c-3c5fd8dbadb8
:setup: Standalone Instance
:steps:
- 1. Enable audit log
- 2. Update a set of config attrs in LDBM config
- 3. Disable audit log (to restore the default config)
- 4. Check that config attrs are listed in the audit log
+ 1. Bind as DM
+ 2. Enable audit log
+ 3. Update a set of config attrs in LDBM config
+ 4. Restart the server
+ 5. Check that config attrs are listed in the audit log
:expectedresults:
- 1. Should succeeds
- 2. Should succeeds
- 3. Should succeeds
- 4. Should succeeds
+ 1. Operation successful
+ 2. Operation successful
+ 3. Operation successful
+ 4. Operation successful
+ 5. Audit log should contain modification of attrs"
"""
- inst = topology_st[0]
- inst.config.enable_log('audit')
+ VALUE = '10001'
+
+ d_manager = DirectoryManager(topology_st.standalone)
+ conn = d_manager.bind()
+ config_ldbm = LDBMConfig(conn)
+
+ log.info("Enable audit logging")
+ conn.config.enable_log('audit')
- #inst.ds_paths.audit_log
attrs = ['nsslapd-lookthroughlimit', 'nsslapd-pagedidlistscanlimit', 'nsslapd-idlistscanlimit', 'nsslapd-db-locks']
- mods = []
+
for attr in attrs:
- mods.append((ldap.MOD_REPLACE, attr, b'10001'))
- inst.modify_s(DN_CONFIG_LDBM, mods)
- inst.config.enable_log('audit')
+ log.info("Set attribute %s to value %s" % (attr, VALUE))
+ config_ldbm.set(attr, VALUE)
+
+ log.info('Restart the server to flush the logs')
+ conn.restart()
for attr in attrs:
- log.info("Check %s is replaced in the audit log" % attr)
- regex = re.compile("^replace: %s" % attr)
- assert audit_pattern_found(inst, regex)
+ log.info("Check if attribute %s is replaced in the audit log" % attr)
+ assert conn.searchAuditLog('replace: %s' % attr)
+ assert conn.searchAuditLog('%s: %s' % (attr, VALUE))
@pytest.mark.skipif(not get_user_is_root() or not default_paths.perl_enabled,
diff --git a/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py b/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
index a45dd46fb..d54d79375 100644
--- a/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
+++ b/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
@@ -17,6 +17,7 @@ from lib389.idm.user import UserAccounts
from lib389.idm.group import Groups
from lib389.idm.organizationalunit import OrganizationalUnits
+
logging.getLogger(__name__).setLevel(logging.DEBUG)
log = logging.getLogger(__name__)
@@ -474,10 +475,6 @@ def test_internal_log_level_131076(topology_st, add_user_log_level_131076):
log.info('Restart the server to flush the logs')
topo.restart()
- path = topo.ds_access_log._get_log_path()
- with open(path) as f:
- print(f.read())
-
# These comments contain lines we are trying to find without regex
log.info("Check the access logs for ADD operation of the user")
# op=10 ADD dn="uid=test_user_777,ou=branch1,dc=example,dc=com"
| 0 |
2619204d067594cd9bf9371e95d8f9a41fbd9c42
|
389ds/389-ds-base
|
Bug 614511 - fix coverify Defect Type: Null pointer dereferences issues 11846 - 11891
https://bugzilla.redhat.com/show_bug.cgi?id=614511
Resolves: bug 614511
Bug description: Fix coverify Defect Type: Null pointer dereferences issues 11846 - 11891
description: Catch possible NULL pointer in collation_indexer_create().
coverity ID: 11873
|
commit 2619204d067594cd9bf9371e95d8f9a41fbd9c42
Author: Noriko Hosoi <[email protected]>
Date: Fri Aug 20 13:18:06 2010 -0700
Bug 614511 - fix coverify Defect Type: Null pointer dereferences issues 11846 - 11891
https://bugzilla.redhat.com/show_bug.cgi?id=614511
Resolves: bug 614511
Bug description: Fix coverify Defect Type: Null pointer dereferences issues 11846 - 11891
description: Catch possible NULL pointer in collation_indexer_create().
coverity ID: 11873
diff --git a/ldap/servers/plugins/collation/collate.c b/ldap/servers/plugins/collation/collate.c
index 0314469d4..be2e37203 100644
--- a/ldap/servers/plugins/collation/collate.c
+++ b/ldap/servers/plugins/collation/collate.c
@@ -426,6 +426,9 @@ collation_indexer_create (const char* oid)
indexer_t* ix = NULL;
const coll_id_t** id = collation_id;
char* locale = NULL; /* NULL == default locale */
+ UCollator* coll = NULL;
+ collation_indexer_t* etc = NULL;
+
if (id) for (; *id; ++id) {
if (!strcasecmp (oid, (*id)->oid)) {
const coll_profile_t* profile = (*id)->profile;
@@ -444,7 +447,7 @@ collation_indexer_create (const char* oid)
profile->variant);
}
if (err == U_ZERO_ERROR) {
- UCollator* coll = ucol_open(locale, &err);
+ coll = ucol_open(locale, &err);
/*
* If we found exactly the right collator for this locale,
* or if we found a fallback one, or if we are happy with
@@ -452,8 +455,7 @@ collation_indexer_create (const char* oid)
*/
if (err == U_ZERO_ERROR || err == U_USING_FALLBACK_WARNING ||
(err == U_USING_DEFAULT_WARNING && is_default)) {
- collation_indexer_t* etc = (collation_indexer_t*)
- slapi_ch_calloc (1, sizeof (collation_indexer_t));
+ etc = (collation_indexer_t*) slapi_ch_calloc (1, sizeof (collation_indexer_t));
ix = (indexer_t*) slapi_ch_calloc (1, sizeof (indexer_t));
ucol_setAttribute (coll, UCOL_STRENGTH, profile->strength, &err);
if (err != U_ZERO_ERROR && err != U_USING_FALLBACK_WARNING
@@ -475,6 +477,11 @@ collation_indexer_create (const char* oid)
break; /* found the 'official' id */
}
}
+ if (!*id) {
+ LDAPDebug (LDAP_DEBUG_ANY, "collation_indexer_create: id not found\n", 0, 0, 0);
+ goto error;
+ }
+
ix->ix_etc = etc;
ix->ix_oid = (*id)->oid;
ix->ix_index = collation_index;
@@ -500,6 +507,15 @@ collation_indexer_create (const char* oid)
break; /* failed to create the specified collator */
}
}
+ goto done;
+error:
+ slapi_ch_free((void **)&etc);
+ slapi_ch_free((void **)&ix);
+ if (coll) {
+ ucol_close (coll);
+ coll = NULL;
+ }
+done:
if (locale) {
PR_smprintf_free(locale);
locale = NULL;
| 0 |
087b11f23cbb1f17c75c66302886c671216543f9
|
389ds/389-ds-base
|
Ticket 48951 - dsadm dsconfig status and plugin
Fix Description: Add support for dsadm and dsconf to be able to check the
server status. Additionally, we add plugin support, and move fixtures to a
shared fixture module.
For example:
> dsadm instance status
Directory Server instance name : localhost
INFO:dsadm:Instance is running
> dsconf -D 'cn=Directory Manager' -H ldap://localhost plugin list
Enter password for cn=Directory Manager on ldap://localhost :
INFO:dsconf.plugin_list:7-bit check
INFO:dsconf.plugin_list:Account Policy Plugin
INFO:dsconf.plugin_list:Account Usability Plugin
...
INFO:dsconf.plugin_list:SSHA384
INFO:dsconf.plugin_list:SSHA512
https://fedorahosted.org/389/ticket/48951
Author: wibrown
Review by: mreynolds (Thanks!)
|
commit 087b11f23cbb1f17c75c66302886c671216543f9
Author: William Brown <[email protected]>
Date: Wed Aug 31 15:30:09 2016 +1000
Ticket 48951 - dsadm dsconfig status and plugin
Fix Description: Add support for dsadm and dsconf to be able to check the
server status. Additionally, we add plugin support, and move fixtures to a
shared fixture module.
For example:
> dsadm instance status
Directory Server instance name : localhost
INFO:dsadm:Instance is running
> dsconf -D 'cn=Directory Manager' -H ldap://localhost plugin list
Enter password for cn=Directory Manager on ldap://localhost :
INFO:dsconf.plugin_list:7-bit check
INFO:dsconf.plugin_list:Account Policy Plugin
INFO:dsconf.plugin_list:Account Usability Plugin
...
INFO:dsconf.plugin_list:SSHA384
INFO:dsconf.plugin_list:SSHA512
https://fedorahosted.org/389/ticket/48951
Author: wibrown
Review by: mreynolds (Thanks!)
diff --git a/src/lib389/cli/dsadm b/src/lib389/cli/dsadm
index 00ea54df9..77262f057 100755
--- a/src/lib389/cli/dsadm
+++ b/src/lib389/cli/dsadm
@@ -12,6 +12,7 @@ import argparse
import logging
import sys
+from lib389.cli_base import _get_arg
from lib389 import DirSrv
from lib389.cli_adm import instance as cli_instance
from lib389.cli_base import disconnect_instance
@@ -66,12 +67,24 @@ if __name__ == '__main__':
inst = DirSrv(verbose=args.verbose)
result = True
+
+ # we allocate an instance in all cases unless we are CREATING the server.
+ if not (hasattr(args, 'noinst') and args.noinst is True):
+ # Get the instance name if needed.
+ inst_id = _get_arg( args.instance, msg="Directory Server instance name")
+
+ # Allocate the instance based on name
+ insts = inst.list(serverid=inst_id)
+ if len(insts) != 1:
+ raise ValueError("No such instance %s" % inst_id)
+
+ inst.allocate(insts[0])
+ log.debug('Instance allocated')
+
if args.verbose:
- # inst = connect_instance(args.ldapurl, args.binddn, args.verbose, args.starttls)
result = args.func(inst, log, args)
else:
try:
- # inst = connect_instance(args.ldapurl, args.binddn, args.verbose, args.starttls)
result = args.func(inst, log, args)
except Exception as e:
log.debug(e, exc_info=True)
diff --git a/src/lib389/cli/dsconf b/src/lib389/cli/dsconf
index 84507bac8..264622410 100755
--- a/src/lib389/cli/dsconf
+++ b/src/lib389/cli/dsconf
@@ -15,6 +15,7 @@ import sys
from lib389 import DirSrv
from lib389.cli_conf import backend as cli_backend
+from lib389.cli_conf import plugin as cli_plugin
from lib389.cli_base import disconnect_instance, connect_instance
logging.basicConfig()
@@ -51,6 +52,7 @@ if __name__ == '__main__':
subparsers = parser.add_subparsers(help="resources to act upon")
cli_backend.create_parser(subparsers)
+ cli_plugin.create_parser(subparsers)
args = parser.parse_args()
diff --git a/src/lib389/lib389/__init__.py b/src/lib389/lib389/__init__.py
index 59a564e8b..e16db6d3c 100644
--- a/src/lib389/lib389/__init__.py
+++ b/src/lib389/lib389/__init__.py
@@ -1119,6 +1119,19 @@ class DirSrv(SimpleLDAPObject):
# whatever the initial state, the instance is now Offline
self.state = DIRSRV_STATE_OFFLINE
+ def status(self):
+ """
+ Determine if an instance is running or not.
+
+ Will update the self.state parameter.
+ """
+ status_prog = os.path.join(self.prefix, 'sbin', 'status-dirsrv')
+ try:
+ subprocess.check_call([status_prog, self.serverid])
+ return True
+ except subprocess.CalledProcessError:
+ return False
+
def restart(self, timeout=120):
'''
It restarts an instance and rebind it. Its final state after rebind
diff --git a/src/lib389/lib389/cli_adm/instance.py b/src/lib389/lib389/cli_adm/instance.py
index 057418e89..7c1ac75ca 100644
--- a/src/lib389/lib389/cli_adm/instance.py
+++ b/src/lib389/lib389/cli_adm/instance.py
@@ -7,7 +7,6 @@
# --- END COPYRIGHT BLOCK ---
from lib389._constants import *
-from lib389.cli_base import _get_arg
from lib389.tools import DirSrvTools
from lib389.instance.setup import SetupDs
@@ -31,25 +30,19 @@ def instance_list(inst, log, args):
log.info(e)
log.info("Perhaps you need to be a different user?")
-def instance_action(inst, log, args, act):
- # Get the instance name if needed.
- inst_id = _get_arg( args.instance, msg="Directory Server instance name to %s" % act)
-
- # Allocate the instance based on name
- insts = inst.list(serverid=inst_id)
- if len(insts) != 1:
- raise ValueError("No such instance %s" % inst_id)
-
- inst.allocate(insts[0])
-
- # Start it!
- DirSrvTools.serverCmd(inst, act, True)
-
def instance_start(inst, log, args):
- instance_action(inst, log, args, "start")
+ if inst.status() is False:
+ inst.start()
def instance_stop(inst, log, args):
- instance_action(inst, log, args, "stop")
+ if inst.status() is True:
+ inst.stop()
+
+def instance_status(inst, log, args):
+ if inst.status() is True:
+ log.info("Instance is running")
+ else:
+ log.info("Instance is not running")
def instance_create(inst, log, args):
if not args.ack:
@@ -124,13 +117,17 @@ def create_parser(subparsers):
list_parser.set_defaults(func=instance_list)
start_parser = subcommands.add_parser('start', help="Start an instance of Directory Server, if it is not currently running")
- start_parser.add_argument('instance', nargs=1, help="The name of the instance to start.")
+ # start_parser.add_argument('instance', nargs=1, help="The name of the instance to start.")
start_parser.set_defaults(func=instance_start)
stop_parser = subcommands.add_parser('stop', help="Stop an instance of Directory Server, if it is currently running")
- stop_parser.add_argument('instance', nargs=1, help="The name of the instance to stop.")
+ # stop_parser.add_argument('instance', nargs=1, help="The name of the instance to stop.")
stop_parser.set_defaults(func=instance_stop)
+ status_parser = subcommands.add_parser('status', help="Check running status of an instance of Directory Server")
+ # status_parser.add_argument('instance', nargs=1, help="The name of the instance to check.")
+ status_parser.set_defaults(func=instance_status)
+
create_parser = subcommands.add_parser('create', help="Create an instance of Directory Server. Can be interactive or silent with an inf answer file")
create_parser.add_argument('-n', '--dryrun', help="Validate system and configurations only. Do not alter the system.", action='store_true', default=False)
create_parser.add_argument('-f', '--file', help="Inf file to use with prepared answers")
@@ -140,7 +137,9 @@ By setting this value you acknowledge and take responsibility for the fact this
""",
action='store_true', default=False)
create_parser.set_defaults(func=instance_create)
+ create_parser.set_defaults(noinst=True)
example_parser = subcommands.add_parser('example', help="Display an example ini answer file, with comments")
example_parser.set_defaults(func=instance_example)
+ create_parser.set_defaults(noinst=True)
diff --git a/src/lib389/lib389/cli_conf/plugin.py b/src/lib389/lib389/cli_conf/plugin.py
new file mode 100644
index 000000000..8736c3eb1
--- /dev/null
+++ b/src/lib389/lib389/cli_conf/plugin.py
@@ -0,0 +1,88 @@
+# --- BEGIN COPYRIGHT BLOCK ---
+# Copyright (C) 2016 Red Hat, Inc.
+# All rights reserved.
+#
+# License: GPL (version 3 or any later version).
+# See LICENSE for details.
+# --- END COPYRIGHT BLOCK ---
+
+from lib389.plugins import Plugin, Plugins
+import argparse
+
+from lib389.cli_base import (
+ _generic_list,
+ _generic_get,
+ _generic_get_dn,
+ _generic_create,
+ _generic_delete,
+ _get_arg,
+ _get_args,
+ _get_attributes,
+ _warn,
+ )
+
+SINGULAR = Plugin
+MANY = Plugins
+RDN = 'cn'
+
+
+def plugin_list(inst, basedn, log, args):
+ _generic_list(inst, basedn, log.getChild('plugin_list'), MANY)
+
+def plugin_get(inst, basedn, log, args):
+ rdn = _get_arg( args.selector, msg="Enter %s to retrieve" % RDN)
+ _generic_get(inst, basedn, log.getChild('plugin_get'), MANY, rdn)
+
+def plugin_get_dn(inst, basedn, log, args):
+ dn = _get_arg( args.dn, msg="Enter dn to retrieve")
+ _generic_get_dn(inst, basedn, log.getChild('plugin_get_dn'), MANY, dn)
+
+# Plugin enable
+def plugin_enable(inst, basedn, log, args):
+ dn = _get_arg( args.dn, msg="Enter plugin dn to enable")
+ mc = MANY(inst, basedn)
+ o = mc.get(dn=dn)
+ o.enable()
+ o_str = o.display()
+ log.info('Enabled %s', o_str)
+
+# Plugin disable
+def plugin_disable(inst, basedn, log, args, warn=True):
+ dn = _get_arg( args.dn, msg="Enter plugin dn to disable")
+ if warn:
+ _warn(dn, msg="Disabling %s %s" % (SINGULAR.__name__, dn))
+ mc = MANY(inst, basedn)
+ o = mc.get(dn=dn)
+ o.disable()
+ o_str = o.display()
+ log.info('Disabled %s', o_str)
+
+# Plugin configure?
+def plugin_configure(inst, basedn, log, args):
+ pass
+
+def create_parser(subparsers):
+ plugin_parser = subparsers.add_parser('plugin', help="Manage plugins available on the server")
+
+ subcommands = plugin_parser.add_subparsers(help="action")
+
+ list_parser = subcommands.add_parser('list', help="List current configured (enabled and disabled) plugins")
+ list_parser.set_defaults(func=plugin_list)
+
+ get_parser = subcommands.add_parser('get', help='get')
+ get_parser.set_defaults(func=plugin_get)
+ get_parser.add_argument('selector', nargs='?', help='The plugin to search for')
+
+ get_dn_parser = subcommands.add_parser('get_dn', help='get_dn')
+ get_dn_parser.set_defaults(func=plugin_get_dn)
+ get_dn_parser.add_argument('dn', nargs='?', help='The plugin dn to get')
+
+ enable_parser = subcommands.add_parser('enable', help='enable a plugin in the server')
+ enable_parser.set_defaults(func=plugin_enable)
+ enable_parser.add_argument('dn', nargs='?', help='The dn to enable')
+
+ disable_parser = subcommands.add_parser('disable', help='disable the plugin configuration')
+ disable_parser.set_defaults(func=plugin_disable)
+ disable_parser.add_argument('dn', nargs='?', help='The dn to disable')
+
+
diff --git a/src/lib389/lib389/tests/cli/__init__.py b/src/lib389/lib389/tests/cli/__init__.py
index d57ac3325..9a57230c6 100644
--- a/src/lib389/lib389/tests/cli/__init__.py
+++ b/src/lib389/lib389/tests/cli/__init__.py
@@ -6,3 +6,70 @@
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
+import pytest
+
+from lib389 import DirSrv
+from lib389.cli_base import LogCapture, FakeArgs
+
+from lib389.instance.setup import SetupDs
+from lib389.instance.options import General2Base, Slapd2Base
+from lib389._constants import *
+
+INSTANCE_PORT = 54321
+INSTANCE_SERVERID = 'standalone'
+
+DEBUGGING = True
+
+class TopologyInstance(object):
+ def __init__(self, standalone, logcap):
+ # For these tests, we don't want to open the instance.
+ # instance.open()
+ self.standalone = standalone
+ self.logcap = logcap
+
+# Need a teardown to destroy the instance.
[email protected]
+def topology(request):
+
+ lc = LogCapture()
+ instance = DirSrv(verbose=DEBUGGING)
+ instance.log.debug("Instance allocated")
+ args = {SER_PORT: INSTANCE_PORT,
+ SER_SERVERID_PROP: INSTANCE_SERVERID}
+ instance.allocate(args)
+ if instance.exists():
+ instance.delete()
+
+ # This will need to change to instance.create in the future
+ # when it's linked up!
+ sds = SetupDs(verbose=DEBUGGING, dryrun=False, log=lc.log)
+
+ # Get the dicts from Type2Base, as though they were from _validate_ds_2_config
+ # IE get the defaults back just from Slapd2Base.collect
+ # Override instance name, root password, port and secure port.
+
+ general_options = General2Base(lc.log)
+ general_options.verify()
+ general = general_options.collect()
+
+ # Need an args -> options2 ...
+ slapd_options = Slapd2Base(lc.log)
+ slapd_options.set('instance_name', INSTANCE_SERVERID)
+ slapd_options.set('port', INSTANCE_PORT)
+ slapd_options.set('root_password', PW_DM)
+ slapd_options.verify()
+ slapd = slapd_options.collect()
+
+ sds.create_from_args(general, slapd, {}, None)
+ insts = instance.list(serverid=INSTANCE_SERVERID)
+ # Assert we did change the system.
+ assert(len(insts) == 1)
+ # Make sure we can connect
+ instance.open(connOnly=True)
+
+ def fin():
+ if instance.exists() and not DEBUGGING:
+ instance.delete()
+ request.addfinalizer(fin)
+
+ return TopologyInstance(instance, lc)
diff --git a/src/lib389/lib389/tests/cli/conf_backend.py b/src/lib389/lib389/tests/cli/conf_backend.py
index 33f9b28ac..da8ec7370 100644
--- a/src/lib389/lib389/tests/cli/conf_backend.py
+++ b/src/lib389/lib389/tests/cli/conf_backend.py
@@ -9,73 +9,11 @@
import pytest
from lib389.cli_conf.backend import backend_list, backend_get, backend_get_dn, backend_create, backend_delete
-from lib389 import DirSrv
-from lib389.cli_base import LogCapture, FakeArgs
-
-from lib389.instance.setup import SetupDs
-from lib389.instance.options import General2Base, Slapd2Base
-from lib389._constants import *
-
-INSTANCE_PORT = 54321
-INSTANCE_SERVERID = 'standalone'
-
-DEBUGGING = True
-
-class TopologyInstance(object):
- def __init__(self, standalone, logcap):
- # For these tests, we don't want to open the instance.
- # instance.open()
- self.standalone = standalone
- self.logcap = logcap
-
-# Need a teardown to destroy the instance.
[email protected]
-def topology(request):
-
- lc = LogCapture()
- instance = DirSrv(verbose=DEBUGGING)
- instance.log.debug("Instance allocated")
- args = {SER_PORT: INSTANCE_PORT,
- SER_SERVERID_PROP: INSTANCE_SERVERID}
- instance.allocate(args)
- if instance.exists():
- instance.delete()
-
- # This will need to change to instance.create in the future
- # when it's linked up!
- sds = SetupDs(verbose=DEBUGGING, dryrun=False, log=lc.log)
-
- # Get the dicts from Type2Base, as though they were from _validate_ds_2_config
- # IE get the defaults back just from Slapd2Base.collect
- # Override instance name, root password, port and secure port.
-
- general_options = General2Base(lc.log)
- general_options.verify()
- general = general_options.collect()
-
- # Need an args -> options2 ...
- slapd_options = Slapd2Base(lc.log)
- slapd_options.set('instance_name', INSTANCE_SERVERID)
- slapd_options.set('port', INSTANCE_PORT)
- slapd_options.set('root_password', PW_DM)
- slapd_options.verify()
- slapd = slapd_options.collect()
-
- sds.create_from_args(general, slapd, {}, None)
- insts = instance.list(serverid=INSTANCE_SERVERID)
- # Assert we did change the system.
- assert(len(insts) == 1)
- # Make sure we can connect
- instance.open(connOnly=True)
-
- def fin():
- if instance.exists() and not DEBUGGING:
- instance.delete()
- request.addfinalizer(fin)
-
- return TopologyInstance(instance, lc)
+from lib389.cli_base import LogCapture, FakeArgs
+from lib389.tests.cli import topology
+# Topology is pulled from __init__.py
def test_backend_cli(topology):
#
args = FakeArgs()
diff --git a/src/lib389/lib389/tests/cli/conf_plugin.py b/src/lib389/lib389/tests/cli/conf_plugin.py
new file mode 100644
index 000000000..3f1955fc6
--- /dev/null
+++ b/src/lib389/lib389/tests/cli/conf_plugin.py
@@ -0,0 +1,53 @@
+# --- BEGIN COPYRIGHT BLOCK ---
+# Copyright (C) 2016 Red Hat, Inc.
+# All rights reserved.
+#
+# License: GPL (version 3 or any later version).
+# See LICENSE for details.
+# --- END COPYRIGHT BLOCK ---
+
+import pytest
+
+from lib389.cli_conf.plugin import plugin_list, plugin_get, plugin_get_dn, plugin_enable, plugin_disable
+
+from lib389.cli_base import LogCapture, FakeArgs
+
+from lib389.tests.cli import topology
+
+plugins = [
+ 'Class of Service',
+ 'ldbm database',
+ 'Roles Plugin',
+ 'USN',
+ 'SSHA512',
+]
+
+# Topology is pulled from __init__.py
+def test_plugin_cli(topology):
+ args = FakeArgs()
+
+ plugin_list(topology.standalone, None, topology.logcap.log, None)
+ for p in plugins:
+ assert(topology.logcap.contains(p))
+ topology.logcap.flush()
+
+ # print(topology.logcap.outputs)
+ # Need to delete something, then re-add it.
+ args.selector = 'USN'
+ plugin_get(topology.standalone, None, topology.logcap.log, args)
+ assert(topology.logcap.contains('USN'))
+ topology.logcap.flush()
+
+ args.dn = 'cn=USN,cn=plugins,cn=config'
+ plugin_get_dn(topology.standalone, None, topology.logcap.log, args)
+ assert(topology.logcap.contains('USN'))
+ topology.logcap.flush()
+
+ plugin_disable(topology.standalone, None, topology.logcap.log, args, warn=False)
+ assert(topology.logcap.contains('Disabled'))
+ topology.logcap.flush()
+
+ plugin_enable(topology.standalone, None, topology.logcap.log, args)
+ assert(topology.logcap.contains('Enabled'))
+ topology.logcap.flush()
+
diff --git a/src/lib389/lib389/tools.py b/src/lib389/lib389/tools.py
index 58ab4caa0..4223774bb 100644
--- a/src/lib389/lib389/tools.py
+++ b/src/lib389/lib389/tools.py
@@ -250,9 +250,11 @@ class DirSrvTools(object):
if cmd == 'start':
fullCmd = os.path.join(sbinDir, 'start-dirsrv %s' % self.serverid)
cmdPat = 'slapd started.'
- else:
+ elif cmd == 'stop':
fullCmd = os.path.join(sbinDir, 'stop-dirsrv %s' % self.serverid)
cmdPat = 'slapd stopped.'
+ else:
+ raise Exception('Invalid cmd passed!')
if "USE_GDB" in os.environ or "USE_VALGRIND" in os.environ:
timeout = timeout * 3
@@ -284,7 +286,7 @@ class DirSrvTools(object):
else:
done = True
- log.warn("Running command: %r - timeout(%d)" % (fullCmd, timeout))
+ log.info("Running command: %r - timeout(%d)" % (fullCmd, timeout))
rc = runCmd("%s" % fullCmd, timeout)
while rc == 0 and not done and int(time.time()) < full_timeout:
line = logfp.readline()
@@ -292,7 +294,7 @@ class DirSrvTools(object):
lastLine = line
if verbose:
log.debug("current line: %r" % line.strip())
- if line.find(cmdPat) >= 0:
+ if cmdPat is not None and line.find(cmdPat) >= 0:
started += 1
if started == 2:
done = True
| 0 |
f132cf41805a9ff525f611967b88a6d85f520def
|
389ds/389-ds-base
|
Ticket 48377 - Bundle jemalloc with Directory Server
Description: Updated spec files to download the latest supported
version of jemalloc source code, and build it. Then
include the library in the server's library directory
(/usr/lib64/dirsrv). Also, the bundled jemalloc library
can coexist with any existing jemalloc package. This
allows us to control the supported version that we ship.
Then in /etc/sysconfig/dirsrv added a commented LD_PRELOAD
line that is pre-set for the server.
Also fixed rpm.mk so that it looked at the proper spec file
for nunc-stans and jemalloc configuraton.
https://fedorahosted.org/389/ticket/48377
Reviewed by: rmeggins, wibrown, and nhosoi(Thanks!!!)
|
commit f132cf41805a9ff525f611967b88a6d85f520def
Author: Mark Reynolds <[email protected]>
Date: Wed Dec 16 15:19:24 2015 -0500
Ticket 48377 - Bundle jemalloc with Directory Server
Description: Updated spec files to download the latest supported
version of jemalloc source code, and build it. Then
include the library in the server's library directory
(/usr/lib64/dirsrv). Also, the bundled jemalloc library
can coexist with any existing jemalloc package. This
allows us to control the supported version that we ship.
Then in /etc/sysconfig/dirsrv added a commented LD_PRELOAD
line that is pre-set for the server.
Also fixed rpm.mk so that it looked at the proper spec file
for nunc-stans and jemalloc configuraton.
https://fedorahosted.org/389/ticket/48377
Reviewed by: rmeggins, wibrown, and nhosoi(Thanks!!!)
diff --git a/ldap/admin/src/base-initconfig.in b/ldap/admin/src/base-initconfig.in
index 2d47eb785..e803a360f 100644
--- a/ldap/admin/src/base-initconfig.in
+++ b/ldap/admin/src/base-initconfig.in
@@ -42,3 +42,9 @@
# up before we assume there is a problem and fail to start
# if using systemd, omit the "; export VARNAME" at the end
#PID_TIME=600 ; export PID_TIME
+
+# jemalloc is a general purpose malloc implementation that emphasizes
+# fragmentation avoidance and scalable concurrency support. jemalloc
+# has been shown to have a significant positive impact on the Directory
+# Server's process size/growth.
+#LD_PRELOAD=@libdir@/@package_name@/libjemalloc.so.1 ; export LD_PRELOAD
diff --git a/rpm.mk b/rpm.mk
index aa397b79e..4d89777ac 100644
--- a/rpm.mk
+++ b/rpm.mk
@@ -4,8 +4,12 @@ RPM_RELEASE ?= $(shell $(PWD)/rpm/rpmverrel.sh release)
PACKAGE = 389-ds-base
RPM_NAME_VERSION = $(PACKAGE)-$(RPM_VERSION)
TARBALL = $(RPM_NAME_VERSION).tar.bz2
-NUNC_STANS_URL ?= $(shell rpmspec -P -D 'use_nunc_stans 1' $(PWD)/rpm/389-ds-base.spec.in | awk '/^Source3:/ {print $$2}')
+NUNC_STANS_URL ?= $(shell rpmspec -P -D 'use_nunc_stans 1' $(RPMBUILD)/SPECS/389-ds-base.spec | awk '/^Source4:/ {print $$2}')
NUNC_STANS_TARBALL ?= $(shell basename "$(NUNC_STANS_URL)")
+JEMALLOC_URL ?= $(shell rpmspec -P $(RPMBUILD)/SPECS/389-ds-base.spec | awk '/^Source3:/ {print $$2}')
+JEMALLOC_TARBALL ?= $(shell basename "$(JEMALLOC_URL)")
+NUNC_STANS_ON = 1
+BUNDLE_JEMALLOC = 1
clean:
rm -rf dist
@@ -19,7 +23,13 @@ tarballs: local-archive
-mkdir -p dist/sources
cd dist; tar cfj sources/$(TARBALL) $(RPM_NAME_VERSION)
rm -rf dist/$(RPM_NAME_VERSION)
- cd dist/sources; wget $(NUNC_STANS_URL)
+ cd dist/sources ; \
+ if [ $(NUNC_STANS_ON) -eq 1 ]; then \
+ wget $(NUNC_STANS_URL) ; \
+ fi ; \
+ if [ $(BUNDLE_JEMALLOC) -eq 1 ]; then \
+ wget $(JEMALLOC_URL) ; \
+ fi
rpmroot:
rm -rf $(RPMBUILD)
@@ -28,6 +38,10 @@ rpmroot:
mkdir -p $(RPMBUILD)/SOURCES
mkdir -p $(RPMBUILD)/SPECS
mkdir -p $(RPMBUILD)/SRPMS
+ sed -e s/__VERSION__/$(RPM_VERSION)/ -e s/__RELEASE__/$(RPM_RELEASE)/ \
+ -e s/__NUNC_STANS_ON__/$(NUNC_STANS_ON)/ \
+ -e s/__BUNDLE_JEMALLOC__/$(BUNDLE_JEMALLOC)/ \
+ rpm/$(PACKAGE).spec.in > $(RPMBUILD)/SPECS/$(PACKAGE).spec
rpmdistdir:
mkdir -p dist/rpms
@@ -37,10 +51,14 @@ srpmdistdir:
rpmbuildprep:
cp dist/sources/$(TARBALL) $(RPMBUILD)/SOURCES/
- cp dist/sources/$(NUNC_STANS_TARBALL) $(RPMBUILD)/SOURCES/
+ if [ $(NUNC_STANS_ON) -eq 1 ]; then \
+ cp dist/sources/$(NUNC_STANS_TARBALL) $(RPMBUILD)/SOURCES/ ; \
+ fi
+ if [ $(BUNDLE_JEMALLOC) -eq 1 ]; then \
+ cp dist/sources/$(JEMALLOC_TARBALL) $(RPMBUILD)/SOURCES/ ; \
+ fi
cp rpm/$(PACKAGE)-* $(RPMBUILD)/SOURCES/
- sed -e s/__VERSION__/$(RPM_VERSION)/ -e s/__RELEASE__/$(RPM_RELEASE)/ \
- rpm/$(PACKAGE).spec.in > $(RPMBUILD)/SPECS/$(PACKAGE).spec
+
srpms: rpmroot srpmdistdir tarballs rpmbuildprep
rpmbuild --define "_topdir $(RPMBUILD)" -bs $(RPMBUILD)/SPECS/$(PACKAGE).spec
diff --git a/rpm/389-ds-base.spec.in b/rpm/389-ds-base.spec.in
index ff8586397..6e4bc4818 100644
--- a/rpm/389-ds-base.spec.in
+++ b/rpm/389-ds-base.spec.in
@@ -12,13 +12,19 @@
# If perl-Socket-2.000 or newer is available, set 0 to use_Socket6.
%global use_Socket6 0
# nunc-stans only builds on x86_64 for now
-# To build without nunc-stans, set 0 to use_nunc_stans.
+# To build without nunc-stans, set use_nunc_stans to 0.
%global use_nunc_stans __NUNC_STANS_ON__
-
%if %{use_nunc_stans}
%global nunc_stans_ver 0.1.7
%endif
+# Are we bundling jemalloc?
+%global bundle_jemalloc __BUNDLE_JEMALLOC__
+%if %{bundle_jemalloc}
+# The version used in the source tarball
+%global jemalloc_ver 3.6.0
+%endif
+
# fedora 15 and later uses tmpfiles.d
# otherwise, comment this out
%{!?with_tmpfiles_d: %global with_tmpfiles_d %{_sysconfdir}/tmpfiles.d}
@@ -32,7 +38,6 @@
Summary: 389 Directory Server (base)
Name: 389-ds-base
Version: __VERSION__
-#Release: %{?relprefix}1%{?prerel}%{?dist}
Release: __RELEASE__%{?dist}
License: GPLv2 with exceptions
URL: http://port389.org/
@@ -122,8 +127,12 @@ Source0: http://port389.org/sources/%{name}-%{version}%{?prerel}.tar.bz
# 389-ds-git.sh should be used to generate the source tarball from git
Source1: %{name}-git.sh
Source2: %{name}-devel.README
+
+%if %{bundle_jemalloc}
+Source3: http://www.port389.org/binaries/jemalloc-%{jemalloc_ver}.tar.bz2
+%endif
%if %{use_nunc_stans}
-Source3: https://git.fedorahosted.org/cgit/nunc-stans.git/snapshot/nunc-stans-%{nunc_stans_ver}.tar.xz
+Source4: https://git.fedorahosted.org/cgit/nunc-stans.git/snapshot/nunc-stans-%{nunc_stans_ver}.tar.xz
%endif
%description
@@ -154,6 +163,12 @@ BuildRequires: libtalloc-devel
BuildRequires: libevent-devel
BuildRequires: libtevent-devel
%endif
+%if %{bundle_jemalloc}
+BuildRequires: /usr/bin/xsltproc
+%ifnarch s390
+BuildRequires: valgrind-devel
+%endif
+%endif
%description libs
Core libraries for the 389 Directory Server base package. These libraries
@@ -184,9 +199,13 @@ Development Libraries and headers for the 389 Directory Server base package.
%prep
%setup -q -n %{name}-%{version}%{?prerel}
-%if %{use_nunc_stans}
+
+%if %{bundle_jemalloc}
%setup -q -n %{name}-%{version}%{?prerel} -T -D -b 3
%endif
+%if %{use_nunc_stans}
+%setup -q -n %{name}-%{version}%{?prerel} -T -D -b 4
+%endif
cp %{SOURCE2} README.devel
%build
@@ -201,6 +220,13 @@ cp nunc-stans.h include/nunc-stans/nunc-stans.h
popd
%endif
+%if %{bundle_jemalloc}
+pushd ../jemalloc-%{jemalloc_ver}
+%configure CFLAGS='%{optflags} -msse2' --libdir=%{_libdir}/%{pkgname}
+make %{?_smp_mflags}
+popd
+%endif
+
%if %{use_openldap}
OPENLDAP_FLAG="--with-openldap"
%endif
@@ -236,6 +262,12 @@ rm -rf $RPM_BUILD_ROOT%{_includedir} $RPM_BUILD_ROOT%{_datadir} \
popd
%endif
+%if %{bundle_jemalloc}
+pushd ../jemalloc-%{jemalloc_ver}
+cp --preserve=links lib/libjemalloc.so* $RPM_BUILD_ROOT%{_libdir}/%{pkgname}
+popd
+%endif
+
make DESTDIR="$RPM_BUILD_ROOT" install
mkdir -p $RPM_BUILD_ROOT/var/log/%{pkgname}
@@ -390,6 +422,9 @@ fi
%if %{use_nunc_stans}
%{_libdir}/%{pkgname}/libnunc-stans.so
%endif
+%if %{bundle_jemalloc}
+%{_libdir}/%{pkgname}/libjemalloc.so
+%endif
%{_libdir}/pkgconfig/*
%files libs
@@ -401,8 +436,14 @@ fi
%if %{use_nunc_stans}
%{_libdir}/%{pkgname}/libnunc-stans.so*
%endif
+%if %{bundle_jemalloc}
+%{_libdir}/%{pkgname}/libjemalloc.so*
+%endif
%changelog
+* Mon Dec 14 2015 Mark Reynolds <[email protected]> - 1.3.4.1-2
+- Ticket 48377 - Include the jemalloc library
+
* Tue Jun 23 2015 Noriko Hosoi <[email protected]> - 1.3.4.1-1
- Release 1.3.4.1-1
| 0 |
8fc67410fd491c72e5b52a7e0f2d49b5c721e896
|
389ds/389-ds-base
|
Resolves: bug 224291
Bug Description: Move script-templates from sysconfdir to datadir
Reviewed by: nkinder, dennis (Thanks!)
Files: see diff
Branch: HEAD
Fix Description: Just change sysconfdir to datadir in both the Makefile.am (which controls where the files are put during installation) and in create_instance.c (which controls where instance creation looks for the templates to use them to create the real scripts).
Platforms tested: RHEL4
Flag Day: no
Doc impact: no
|
commit 8fc67410fd491c72e5b52a7e0f2d49b5c721e896
Author: Rich Megginson <[email protected]>
Date: Thu Jan 25 16:47:45 2007 +0000
Resolves: bug 224291
Bug Description: Move script-templates from sysconfdir to datadir
Reviewed by: nkinder, dennis (Thanks!)
Files: see diff
Branch: HEAD
Fix Description: Just change sysconfdir to datadir in both the Makefile.am (which controls where the files are put during installation) and in create_instance.c (which controls where instance creation looks for the templates to use them to create the real scripts).
Platforms tested: RHEL4
Flag Day: no
Doc impact: no
diff --git a/Makefile.am b/Makefile.am
index 4de0cfdf1..9feb53e58 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -60,7 +60,7 @@ propertydir = $(sysconfdir)@propertydir@
schemadir = $(sysconfdir)@schemadir@
serverdir = $(libdir)@serverdir@
serverplugindir = $(libdir)@serverplugindir@
-taskdir = $(sysconfdir)@scripttemplatedir@
+taskdir = $(datadir)@scripttemplatedir@
#------------------------
# Build Products
diff --git a/Makefile.in b/Makefile.in
index 01618bccb..57f4fed8e 100644
--- a/Makefile.in
+++ b/Makefile.in
@@ -951,7 +951,7 @@ PAM_LINK = -lpam
#------------------------
BUILT_SOURCES = dirver.h dberrstrs.h
CLEANFILES = dirver.h dberrstrs.h ns-slapd.properties
-taskdir = $(sysconfdir)@scripttemplatedir@
+taskdir = $(datadir)@scripttemplatedir@
server_LTLIBRARIES = libslapd.la libback-ldbm.la libds_admin.la libns-dshttpd.la
serverplugin_LTLIBRARIES = libacl-plugin.la libattr-unique-plugin.la libchainingdb-plugin.la \
libcos-plugin.la libdes-plugin.la libdistrib-plugin.la \
diff --git a/configure b/configure
index 504b19e91..682eaeda0 100755
--- a/configure
+++ b/configure
@@ -24602,7 +24602,7 @@ schemadir=/fedora-ds/schema
serverdir=/fedora-ds
# relative to libdir
serverplugindir=/fedora-ds/plugins
-# relative to sysconfdir
+# relative to datadir
scripttemplatedir=/fedora-ds/script-templates
diff --git a/configure.ac b/configure.ac
index fa3f0a06e..b4b35c35c 100644
--- a/configure.ac
+++ b/configure.ac
@@ -130,7 +130,7 @@ schemadir=/fedora-ds/schema
serverdir=/fedora-ds
# relative to libdir
serverplugindir=/fedora-ds/plugins
-# relative to sysconfdir
+# relative to datadir
scripttemplatedir=/fedora-ds/script-templates
AC_SUBST(configdir)
AC_SUBST(sampledatadir)
diff --git a/ldap/admin/src/create_instance.c b/ldap/admin/src/create_instance.c
index 86ca3fa0e..be353c2f3 100644
--- a/ldap/admin/src/create_instance.c
+++ b/ldap/admin/src/create_instance.c
@@ -708,7 +708,7 @@ char *gen_script_auto(char *s_root, char *cs_path,
}
PR_snprintf(ofn, sizeof(ofn), "%s%c%s%cscript-templates%ctemplate-%s",
- cf->sysconfdir, FILE_PATHSEP, cf->brand_ds,
+ cf->datadir, FILE_PATHSEP, cf->brand_ds,
FILE_PATHSEP, FILE_PATHSEP, name);
PR_snprintf(fn, sizeof(fn), "%s%c%s", cs_path, FILE_PATHSEP, name);
create_instance_mkdir(cs_path, NEWDIR_MODE);
@@ -774,7 +774,7 @@ char *gen_perl_script_auto_for_migration(char *s_root, char *cs_path, char *name
int fnlen = 0;
PR_snprintf(ofn, sizeof(ofn), "%s%c%s%cscript-templates%ctemplate-%s",
- cf->sysconfdir, FILE_PATHSEP, cf->brand_ds,
+ cf->datadir, FILE_PATHSEP, cf->brand_ds,
FILE_PATHSEP, FILE_PATHSEP, name);
PR_snprintf(fn, sizeof(fn), "%s%c%s%cbin",
cf->sysconfdir, FILE_PATHSEP, cf->brand_ds, FILE_PATHSEP);
| 0 |
60b2d126a4220b1ecd02fe8be6442c6198223728
|
389ds/389-ds-base
|
Fixing compiler warnings
They were introduced by the commit add880accaa28de8304da1c2c2f58fe8af002ebb.
|
commit 60b2d126a4220b1ecd02fe8be6442c6198223728
Author: Noriko Hosoi <[email protected]>
Date: Wed Feb 29 17:02:29 2012 -0800
Fixing compiler warnings
They were introduced by the commit add880accaa28de8304da1c2c2f58fe8af002ebb.
diff --git a/ldap/servers/slapd/pagedresults.c b/ldap/servers/slapd/pagedresults.c
index 1690e889e..ea64b5cb0 100644
--- a/ldap/servers/slapd/pagedresults.c
+++ b/ldap/servers/slapd/pagedresults.c
@@ -267,8 +267,8 @@ pagedresults_free_one_msgid( Connection *conn, ber_int_t msgid )
int i;
LDAPDebug1Arg(LDAP_DEBUG_TRACE,
- "--> pagedresults_free_one: idx=%d\n", index);
- if (conn && (index > -1)) {
+ "--> pagedresults_free_one: msgid=%d\n", msgid);
+ if (conn && (msgid > -1)) {
PR_Lock(conn->c_mutex);
if (conn->c_pagedresults.prl_count <= 0) {
LDAPDebug2Args(LDAP_DEBUG_TRACE, "pagedresults_free_one_msgid: "
diff --git a/ldap/servers/slapd/proto-slap.h b/ldap/servers/slapd/proto-slap.h
index ad665955b..a62418777 100644
--- a/ldap/servers/slapd/proto-slap.h
+++ b/ldap/servers/slapd/proto-slap.h
@@ -1408,6 +1408,7 @@ int pagedresults_is_timedout(Connection *conn);
int pagedresults_reset_timedout(Connection *conn);
int pagedresults_in_use(Connection *conn);
int pagedresults_free_one(Connection *conn, int index);
+int pagedresults_free_one_msgid( Connection *conn, ber_int_t msgid );
int op_is_pagedresults(Operation *op);
int pagedresults_cleanup_all(Connection *conn, int needlock);
void op_set_pagedresults(Operation *op);
| 0 |
0cfdea7abcacfca6686a6cf84dbf7ae1167f3022
|
389ds/389-ds-base
|
Issue 4764 - replicated operation sometime checks ACI (#4783)
|
commit 0cfdea7abcacfca6686a6cf84dbf7ae1167f3022
Author: progier389 <[email protected]>
Date: Wed May 26 16:07:43 2021 +0200
Issue 4764 - replicated operation sometime checks ACI (#4783)
diff --git a/ldap/servers/slapd/connection.c b/ldap/servers/slapd/connection.c
index c7a15e775..e0c1a52d2 100644
--- a/ldap/servers/slapd/connection.c
+++ b/ldap/servers/slapd/connection.c
@@ -1771,6 +1771,14 @@ connection_threadmain()
}
}
+ /*
+ * Fix bz 1931820 issue (the check to set OP_FLAG_REPLICATED may be done
+ * before replication session is properly set).
+ */
+ if (replication_connection) {
+ operation_set_flag(op, OP_FLAG_REPLICATED);
+ }
+
/*
* Call the do_<operation> function to process this request.
*/
| 0 |
4eb1cb606f23a9e88834785f330007e4402246ce
|
389ds/389-ds-base
|
Issue 6117 - Fix the UTC offset print (#6118)
Bug Description: UTC offset is mistakenly displayed as <sign><hour><seconds>
-03:30 was displayed as -031800
Fix Description: UTC offset is now displayed as <sign><hour><minutes>
-03.30 is displayed as -0330
Fixes: https://github.com/389ds/389-ds-base/issues/6117
Author: Ding-Yi Chen <[email protected]>
Reviewed by: Simon Pichugin
|
commit 4eb1cb606f23a9e88834785f330007e4402246ce
Author: Ding-Yi Chen <[email protected]>
Date: Thu Mar 7 12:36:19 2024 +1000
Issue 6117 - Fix the UTC offset print (#6118)
Bug Description: UTC offset is mistakenly displayed as <sign><hour><seconds>
-03:30 was displayed as -031800
Fix Description: UTC offset is now displayed as <sign><hour><minutes>
-03.30 is displayed as -0330
Fixes: https://github.com/389ds/389-ds-base/issues/6117
Author: Ding-Yi Chen <[email protected]>
Reviewed by: Simon Pichugin
diff --git a/ldap/servers/slapd/time.c b/ldap/servers/slapd/time.c
index 2db0c0b20..5bd9279db 100644
--- a/ldap/servers/slapd/time.c
+++ b/ldap/servers/slapd/time.c
@@ -194,7 +194,7 @@ format_localTime_log(time_t t, int initsize __attribute__((unused)), char *buf,
return 1;
}
if (PR_snprintf(buf, *bufsize, "[%s %c%02d%02d] ", tbuf, sign,
- (int)(tz / 3600), (int)(tz % 3600)) == (PRUint32)-1) {
+ (int)(tz / 3600), (int)(tz % 3600 / 60)) == (PRUint32)-1) {
return 1;
}
*bufsize = strlen(buf);
@@ -245,7 +245,7 @@ format_localTime_hr_log(time_t t, long nsec, int initsize __attribute__((unused)
return 1;
}
if (PR_snprintf(buf, *bufsize, "[%s.%09ld %c%02d%02d] ", tbuf, nsec, sign,
- (int)(tz / 3600), (int)(tz % 3600)) == (PRUint32)-1) {
+ (int)(tz / 3600), (int)(tz % 3600 / 60)) == (PRUint32)-1) {
return 1;
}
*bufsize = strlen(buf);
| 0 |
5eb19778f7939967e8ca714c4d4cb03ffa11064d
|
389ds/389-ds-base
|
Ticket 48925 - slapd crash with SIGILL: Dsktune should detect lack of CMPXCHG16B
Bug Description: On older AMD the CMPXCHG16B is not present. This is critical
to the correct operation of lfds. Without out it we are unable to use nunc-stans
Fix Description: dsktune should warn if CMPXCHG16B (flag cx16) is not present.
In a future release we will NOT allow installation upon a platform that lacks
this instruction.
https://fedorahosted.org/389/ticket/48925
Author: wibrown
Review by: nhosoi (Thank you!)
|
commit 5eb19778f7939967e8ca714c4d4cb03ffa11064d
Author: William Brown <[email protected]>
Date: Thu Jul 14 13:47:11 2016 +1000
Ticket 48925 - slapd crash with SIGILL: Dsktune should detect lack of CMPXCHG16B
Bug Description: On older AMD the CMPXCHG16B is not present. This is critical
to the correct operation of lfds. Without out it we are unable to use nunc-stans
Fix Description: dsktune should warn if CMPXCHG16B (flag cx16) is not present.
In a future release we will NOT allow installation upon a platform that lacks
this instruction.
https://fedorahosted.org/389/ticket/48925
Author: wibrown
Review by: nhosoi (Thank you!)
diff --git a/ldap/systools/idsktune.c b/ldap/systools/idsktune.c
index c7e76e73c..b6c352aa7 100644
--- a/ldap/systools/idsktune.c
+++ b/ldap/systools/idsktune.c
@@ -11,11 +11,12 @@
# include <config.h>
#endif
+#define _GNU_SOURCE
/* !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
* Don't forget to update build_date when the patch sets are updated.
* !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! */
-static char *build_date = "23-FEBRUARY-2012";
+static char *build_date = "14-JULY-2016";
#if defined(linux) || defined(__linux) || defined(__linux__)
#define IDDS_LINUX_INCLUDE 1
@@ -32,10 +33,12 @@ static char *build_date = "23-FEBRUARY-2012";
#include <sys/resource.h>
#include <unistd.h>
#endif
+
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <ctype.h>
+
#if !defined(__VMS) && !defined(IDDS_LINUX_INCLUDE)
#if defined(__hpux) && defined(f_type)
#undef f_type
@@ -864,6 +867,39 @@ done:
free(cmd);
}
}
+
+
+static void
+linux_check_cpu_features(void)
+{
+ FILE *cpuinfo = fopen("/proc/cpuinfo", "rb");
+ char *arg = 0;
+ char *token = NULL;
+ size_t size = 0;
+ int found = 0;
+ while(getline(&arg, &size, cpuinfo) != -1)
+ {
+ if (strncmp("flags", arg, 5) == 0) {
+ token = strtok(arg, " ");
+ while (token != NULL) {
+ if (strncmp(token, "cx16", 4) == 0) {
+ found += 1;
+ }
+ token = strtok(NULL, " ");
+ }
+ }
+ }
+ free(arg);
+ fclose(cpuinfo);
+
+ if (found == 0) {
+ flag_os_bad = 1;
+ printf("ERROR: This system does not support CMPXCHG16B instruction (cpuflag cx16).\n");
+ printf(" nsslapd-enable-nunc-stans must be set to "off" on this system. \n");
+ printf(" In a future release of Directory Server this platform will NOT be supported.\n\n");
+ }
+
+}
#endif /* IDDS_LINUX_INCLUDE */
@@ -976,6 +1012,8 @@ static void gen_tests (void)
#if defined(IDDS_LINUX_INCLUDE)
linux_check_release();
+
+ linux_check_cpu_features();
#endif
| 0 |
4c5a4316b7933e9d65639430044ecdf325ef995a
|
389ds/389-ds-base
|
Ticket 50581 - ns-slapd crashes during ldapi search
Bug Description:
Using ldapi, if the length of the socket file path exceeds
46 bytes it triggers a buffer overflow while reseting a connection.
Reset happens at open/close/error.
Fix Description:
Use a buffer sized for a PRNetAddr.local.path (~100bytes)
Use of MAXPATHLEN (4kb) is too much.
https://pagure.io/389-ds-base/issue/50581
Reviewed by: William Brown, Alexander Bokovoy, Mark Reynolds, Simon Pichugi
Platforms tested: F30 (thanks !!)
Flag Day: no
Doc impact: no
|
commit 4c5a4316b7933e9d65639430044ecdf325ef995a
Author: Thierry Bordaz <[email protected]>
Date: Mon Sep 2 16:48:45 2019 +0200
Ticket 50581 - ns-slapd crashes during ldapi search
Bug Description:
Using ldapi, if the length of the socket file path exceeds
46 bytes it triggers a buffer overflow while reseting a connection.
Reset happens at open/close/error.
Fix Description:
Use a buffer sized for a PRNetAddr.local.path (~100bytes)
Use of MAXPATHLEN (4kb) is too much.
https://pagure.io/389-ds-base/issue/50581
Reviewed by: William Brown, Alexander Bokovoy, Mark Reynolds, Simon Pichugi
Platforms tested: F30 (thanks !!)
Flag Day: no
Doc impact: no
diff --git a/dirsrvtests/tests/suites/basic/basic_test.py b/dirsrvtests/tests/suites/basic/basic_test.py
index 8a51f9c42..662357f44 100644
--- a/dirsrvtests/tests/suites/basic/basic_test.py
+++ b/dirsrvtests/tests/suites/basic/basic_test.py
@@ -12,6 +12,7 @@
"""
from subprocess import check_output, Popen
+from lib389 import DirSrv
from lib389.idm.user import UserAccounts
import pytest
from lib389.tasks import *
@@ -24,6 +25,9 @@ from lib389.topologies import topology_st
from lib389.paths import Paths
from lib389.idm.directorymanager import DirectoryManager
from lib389.config import LDBMConfig
+from lib389.dseldif import DSEldif
+from lib389.rootdse import RootDSE
+
pytestmark = pytest.mark.tier0
@@ -1270,6 +1274,109 @@ sample_entries = yes
request.addfinalizer(fin)
[email protected](scope="module")
+def dscreate_ldapi_instance(request):
+ template_file = "/tmp/dssetup.inf"
+ longname_serverid = "test_longname_deadbeef_deadbeef_deadbeef_deadbeef_deadbeef"
+ template_text = """[general]
+config_version = 2
+# This invalid hostname ...
+full_machine_name = localhost.localdomain
+# Means we absolutely require this.
+strict_host_checking = False
+# In tests, we can be run in containers, NEVER trust
+# that systemd is there, or functional in any capacity
+systemd = False
+
+[slapd]
+instance_name = %s
+root_dn = cn=directory manager
+root_password = someLongPassword_123
+# We do not have access to high ports in containers,
+# so default to something higher.
+port = 38999
+secure_port = 63699
+
+
+[backend-userroot]
+suffix = dc=example,dc=com
+sample_entries = yes
+""" % longname_serverid
+
+ with open(template_file, "w") as template_fd:
+ template_fd.write(template_text)
+
+ # Unset PYTHONPATH to avoid mixing old CLI tools and new lib389
+ tmp_env = os.environ
+ if "PYTHONPATH" in tmp_env:
+ del tmp_env["PYTHONPATH"]
+ try:
+ subprocess.check_call([
+ 'dscreate',
+ 'from-file',
+ template_file
+ ], env=tmp_env)
+ except subprocess.CalledProcessError as e:
+ log.fatal("dscreate failed! Error ({}) {}".format(e.returncode, e.output))
+ assert False
+
+ inst = DirSrv(verbose=True, external_log=log)
+ dse_ldif = DSEldif(inst,
+ serverid=longname_serverid)
+
+ socket_path = dse_ldif.get("cn=config", "nsslapd-ldapifilepath")
+ inst.local_simple_allocate(
+ serverid=longname_serverid,
+ ldapuri=f"ldapi://{socket_path[0].replace('/', '%2f')}",
+ password="someLongPassword_123"
+ )
+ inst.ldapi_enabled = 'on'
+ inst.ldapi_socket = socket_path
+ inst.ldapi_autobind = 'off'
+ try:
+ inst.open()
+ except:
+ log.fatal("Failed to connect via ldapi to %s instance" % longname_serverid)
+ os.remove(template_file)
+ try:
+ subprocess.check_call(['dsctl', longname_serverid, 'remove', '--do-it'])
+ except subprocess.CalledProcessError as e:
+ log.fatal("Failed to remove test instance Error ({}) {}".format(e.returncode, e.output))
+
+ def fin():
+ os.remove(template_file)
+ try:
+ subprocess.check_call(['dsctl', longname_serverid, 'remove', '--do-it'])
+ except subprocess.CalledProcessError as e:
+ log.fatal("Failed to remove test instance Error ({}) {}".format(e.returncode, e.output))
+
+ request.addfinalizer(fin)
+
+ return inst
+
+
[email protected](not get_user_is_root() or not default_paths.perl_enabled or ds_is_older('1.4.0.0'),
+ reason="This test is only required with new admin cli, and requires root.")
[email protected]
[email protected]
+def test_dscreate_longname(dscreate_ldapi_instance):
+ """Test that an instance with a long name can
+ handle ldapi connection using a long socket name
+
+ :id: 5d72d955-aff8-4741-8c9a-32c1c707cf1f
+ :setup: None
+ :steps:
+ 1. create an instance with a long serverId name, that open a ldapi connection
+ 2. Connect with ldapi, that hit 50581 and crash the instance
+ :expectedresults:
+ 1. Should succeeds
+ 2. Should succeeds
+ """
+
+ root_dse = RootDSE(dscreate_ldapi_instance)
+ log.info(root_dse.get_supported_ctrls())
+
+
if __name__ == '__main__':
# Run isolated
# -s for DEBUG mode
diff --git a/ldap/servers/slapd/connection.c b/ldap/servers/slapd/connection.c
index e6ce0f012..3600d3dc3 100644
--- a/ldap/servers/slapd/connection.c
+++ b/ldap/servers/slapd/connection.c
@@ -275,6 +275,8 @@ connection_reset(Connection *conn, int ns, PRNetAddr *from, int fromLen __attrib
{
char *pTmp = is_SSL ? "SSL " : "";
char *str_ip = NULL, *str_destip;
+ char buf_ldapi[sizeof(from->local.path) + 1] = {0};
+ char buf_destldapi[sizeof(from->local.path) + 1] = {0};
char buf_ip[INET6_ADDRSTRLEN + 1] = {0};
char buf_destip[INET6_ADDRSTRLEN + 1] = {0};
char *str_unknown = "unknown";
@@ -296,18 +298,18 @@ connection_reset(Connection *conn, int ns, PRNetAddr *from, int fromLen __attrib
slapi_ch_free((void **)&conn->cin_addr); /* just to be conservative */
if (from->raw.family == PR_AF_LOCAL) { /* ldapi */
conn->cin_addr = (PRNetAddr *)slapi_ch_malloc(sizeof(PRNetAddr));
- PL_strncpyz(buf_ip, from->local.path, sizeof(from->local.path));
+ PL_strncpyz(buf_ldapi, from->local.path, sizeof(from->local.path));
memcpy(conn->cin_addr, from, sizeof(PRNetAddr));
- if (!buf_ip[0]) {
+ if (!buf_ldapi[0]) {
PR_GetPeerName(conn->c_prfd, from);
- PL_strncpyz(buf_ip, from->local.path, sizeof(from->local.path));
+ PL_strncpyz(buf_ldapi, from->local.path, sizeof(from->local.path));
memcpy(conn->cin_addr, from, sizeof(PRNetAddr));
- if (!buf_ip[0]) {
+ if (!buf_ldapi[0]) {
/* Cannot derive local address, need something for logging */
- PL_strncpyz(buf_ip, "local", sizeof(buf_ip));
+ PL_strncpyz(buf_ldapi, "local", sizeof(buf_ldapi));
}
}
- str_ip = buf_ip;
+ str_ip = buf_ldapi;
} else if (((from->ipv6.ip.pr_s6_addr32[0] != 0) || /* from contains non zeros */
(from->ipv6.ip.pr_s6_addr32[1] != 0) ||
(from->ipv6.ip.pr_s6_addr32[2] != 0) ||
@@ -362,21 +364,24 @@ connection_reset(Connection *conn, int ns, PRNetAddr *from, int fromLen __attrib
memset(conn->cin_destaddr, 0, sizeof(PRNetAddr));
if (PR_GetSockName(conn->c_prfd, conn->cin_destaddr) == 0) {
if (conn->cin_destaddr->raw.family == PR_AF_LOCAL) { /* ldapi */
- PL_strncpyz(buf_destip, conn->cin_destaddr->local.path,
+ PL_strncpyz(buf_destldapi, conn->cin_destaddr->local.path,
sizeof(conn->cin_destaddr->local.path));
- if (!buf_destip[0]) {
- PL_strncpyz(buf_destip, "unknown local file", sizeof(buf_destip));
+ if (!buf_destldapi[0]) {
+ PL_strncpyz(buf_destldapi, "unknown local file", sizeof(buf_destldapi));
}
- } else if (PR_IsNetAddrType(conn->cin_destaddr, PR_IpAddrV4Mapped)) {
- PRNetAddr v4destaddr = {{0}};
- v4destaddr.inet.family = PR_AF_INET;
- v4destaddr.inet.ip = conn->cin_destaddr->ipv6.ip.pr_s6_addr32[3];
- PR_NetAddrToString(&v4destaddr, buf_destip, sizeof(buf_destip));
+ str_destip = buf_destldapi;
} else {
- PR_NetAddrToString(conn->cin_destaddr, buf_destip, sizeof(buf_destip));
+ if (PR_IsNetAddrType(conn->cin_destaddr, PR_IpAddrV4Mapped)) {
+ PRNetAddr v4destaddr = {{0}};
+ v4destaddr.inet.family = PR_AF_INET;
+ v4destaddr.inet.ip = conn->cin_destaddr->ipv6.ip.pr_s6_addr32[3];
+ PR_NetAddrToString(&v4destaddr, buf_destip, sizeof (buf_destip));
+ } else {
+ PR_NetAddrToString(conn->cin_destaddr, buf_destip, sizeof (buf_destip));
+ }
+ buf_destip[sizeof (buf_destip) - 1] = '\0';
+ str_destip = buf_destip;
}
- buf_destip[sizeof(buf_destip) - 1] = '\0';
- str_destip = buf_destip;
} else {
str_destip = str_unknown;
}
| 0 |
3d73e3877f162514f0c495c4adab8e6932edd808
|
389ds/389-ds-base
|
Ticket 49154 - Nunc Stans stress should assert it has 95% success rate
Bug Description: We should assert that the nunc-stans stress test
is able to pass 95% of it's connections during an overload scenario.
Fix Description: Assert we pass 95% of connections. Additionally, we
were not actually running the tests properly, so fix that. Improve
the the work thread function to be slightly faster by better using our
atomic shutdown check.
https://pagure.io/389-ds-base/issue/49154
Author: wibrown
Review by: vashirov (Thanks!)
|
commit 3d73e3877f162514f0c495c4adab8e6932edd808
Author: William Brown <[email protected]>
Date: Fri Mar 10 13:46:57 2017 +1000
Ticket 49154 - Nunc Stans stress should assert it has 95% success rate
Bug Description: We should assert that the nunc-stans stress test
is able to pass 95% of it's connections during an overload scenario.
Fix Description: Assert we pass 95% of connections. Additionally, we
were not actually running the tests properly, so fix that. Improve
the the work thread function to be slightly faster by better using our
atomic shutdown check.
https://pagure.io/389-ds-base/issue/49154
Author: wibrown
Review by: vashirov (Thanks!)
diff --git a/Makefile.am b/Makefile.am
index ccbb5303f..1cf61cd68 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -553,7 +553,8 @@ dist_noinst_HEADERS += \
test/test_slapd.h \
src/libsds/test/test_sds.h \
src/libsds/test/benchmark.h \
- src/libsds/test/benchmark_par.h
+ src/libsds/test/benchmark_par.h \
+ src/nunc-stans/test/test_nuncstans_stress.h
endif
dist_noinst_DATA = \
@@ -1985,12 +1986,13 @@ check_PROGRAMS = test_slapd \
benchmark_sds \
benchmark_par_sds \
test_nuncstans \
- test_nuncstans_stress
+ test_nuncstans_stress_small \
+ test_nuncstans_stress_large
# Mark all check programs for testing
TESTS = test_slapd \
test_libsds \
test_nuncstans \
- test_nuncstans_stress
+ test_nuncstans_stress_small
test_slapd_SOURCES = test/main.c \
test/libslapd/test.c \
@@ -2034,10 +2036,15 @@ test_nuncstans_CPPFLAGS = $(AM_CPPFLAGS) $(CMOCKA_INCLUDES) $(NUNCSTANS_CPPFLAGS
test_nuncstans_LDADD = libnunc-stans.la libsds.la
test_nuncstans_LDFLAGS = $(ASAN_DEFINES) $(PROFILING_LINKS) $(CMOCKA_LINKS) $(EVENT_LINK)
-test_nuncstans_stress_SOURCES = src/nunc-stans/test/test_nuncstans.c
-test_nuncstans_stress_CPPFLAGS = $(AM_CPPFLAGS) $(CMOCKA_INCLUDES) $(NUNCSTANS_CPPFLAGS)
-test_nuncstans_stress_LDADD = libnunc-stans.la libsds.la
-test_nuncstans_stress_LDFLAGS = $(ASAN_DEFINES) $(PROFILING_LINKS) $(CMOCKA_LINKS) $(EVENT_LINK)
+test_nuncstans_stress_large_SOURCES = src/nunc-stans/test/test_nuncstans_stress_large.c src/nunc-stans/test/test_nuncstans_stress_core.c
+test_nuncstans_stress_large_CPPFLAGS = $(AM_CPPFLAGS) $(CMOCKA_INCLUDES) $(NUNCSTANS_CPPFLAGS)
+test_nuncstans_stress_large_LDADD = libnunc-stans.la libsds.la
+test_nuncstans_stress_large_LDFLAGS = $(ASAN_DEFINES) $(PROFILING_LINKS) $(CMOCKA_LINKS) $(EVENT_LINK)
+
+test_nuncstans_stress_small_SOURCES = src/nunc-stans/test/test_nuncstans_stress_small.c src/nunc-stans/test/test_nuncstans_stress_core.c
+test_nuncstans_stress_small_CPPFLAGS = $(AM_CPPFLAGS) $(CMOCKA_INCLUDES) $(NUNCSTANS_CPPFLAGS)
+test_nuncstans_stress_small_LDADD = libnunc-stans.la libsds.la
+test_nuncstans_stress_small_LDFLAGS = $(ASAN_DEFINES) $(PROFILING_LINKS) $(CMOCKA_LINKS) $(EVENT_LINK)
endif
diff --git a/src/nunc-stans/ns/ns_thrpool.c b/src/nunc-stans/ns/ns_thrpool.c
index 744749b15..10791eeab 100644
--- a/src/nunc-stans/ns/ns_thrpool.c
+++ b/src/nunc-stans/ns/ns_thrpool.c
@@ -165,7 +165,6 @@ os_free(void *ptr)
int32_t
ns_thrpool_is_shutdown(struct ns_thrpool_t *tp)
{
- /* We need to barrier this somehow? */
int32_t result = 0;
__atomic_load(&(tp->shutdown), &result, __ATOMIC_SEQ_CST);
return result;
@@ -174,7 +173,6 @@ ns_thrpool_is_shutdown(struct ns_thrpool_t *tp)
int32_t
ns_thrpool_is_event_shutdown(struct ns_thrpool_t *tp)
{
- /* We need to barrier this somehow? */
int32_t result = 0;
__atomic_load(&(tp->shutdown_event_loop), &result, __ATOMIC_SEQ_CST);
return result;
@@ -239,6 +237,8 @@ internal_ns_job_rearm(ns_job_t *job)
#endif
job->state = NS_JOB_ARMED;
+ /* I think we need to check about is_shutdown here? */
+
if (NS_JOB_IS_IO(job->job_type) || NS_JOB_IS_TIMER(job->job_type) || NS_JOB_IS_SIGNAL(job->job_type)) {
event_q_notify(job);
} else {
@@ -274,21 +274,7 @@ work_job_execute(ns_job_t *job)
#ifdef DEBUG
ns_log(LOG_DEBUG, "work_job_execute PERSIST and RUNNING, remarking %x as NS_JOB_NEEDS_ARM\n", job);
#endif
- /*
- * So at this point, if this is an IO or a SIGNAL job then, we are
- * still in the event framework's io event queue. So we actually
- * are already rearmed!!!
- *
- * This is *exactly* why it's impossible to disarm a persist IO job
- * once we start it from external threads! Too many dangers abound!
- */
- if (NS_JOB_IS_IO(job->job_type) || NS_JOB_IS_SIGNAL(job->job_type)) {
- job->state = NS_JOB_ARMED;
- pthread_mutex_unlock(job->monitor);
- return;
- } else {
- job->state = NS_JOB_NEEDS_ARM;
- }
+ job->state = NS_JOB_NEEDS_ARM;
}
if (job->state == NS_JOB_NEEDS_DELETE) {
@@ -360,6 +346,8 @@ worker_thread_func(void *arg)
{
ns_thread_t *thr = (ns_thread_t *)arg;
ns_thrpool_t *tp = thr->tp;
+ sds_result result = SDS_SUCCESS;
+ int32_t is_shutdown = ns_thrpool_is_shutdown(tp);
/* Get ready to use lock free ds */
sds_lqueue_tprep(tp->work_q);
@@ -367,18 +355,23 @@ worker_thread_func(void *arg)
/*
* Execute jobs until shutdown is set and the queues are empty.
*/
- while (!ns_thrpool_is_shutdown(tp)) {
+ while (!is_shutdown) {
ns_job_t *job = NULL;
+ result = sds_lqueue_dequeue(tp->work_q, (void **)&job);
/* Don't need monitor here, job_dequeue barriers the memory for us. Job will be valid */
- while(sds_lqueue_dequeue(tp->work_q, (void **)&job) == SDS_LIST_EXHAUSTED && !ns_thrpool_is_shutdown(tp))
- {
+ /* Is it possible for a worker thread to get stuck here during shutdown? */
+ if (result == SDS_LIST_EXHAUSTED && !is_shutdown) {
work_q_wait(tp);
- }
-
- if (job) {
+ } else if (result == SDS_SUCCESS && job != NULL) {
+ /* Even if we are shutdown here, we can process a job. */
+ /* Should we just keep dequeing until we exhaust the list? */
work_job_execute(job);
/* MUST NOT ACCESS JOB FROM THIS POINT */
+ } else {
+ ns_log(LOG_ERR, "worker_thread_func encountered a recoverable issue during processing of the queue\n");
}
+
+ is_shutdown = ns_thrpool_is_shutdown(tp);
}
/* With sds, it cleans the thread on join automatically. */
@@ -480,8 +473,7 @@ event_q_wake(ns_thrpool_t *tp)
}
static void
-event_q_notify(ns_job_t *job)
-{
+event_q_notify(ns_job_t *job) {
ns_thrpool_t *tp = job->tp;
/* if we are being called from a thread other than the
event loop thread, we have to notify that thread to
@@ -1133,6 +1125,11 @@ ns_job_rearm(ns_job_t *job)
PR_ASSERT(job);
pthread_mutex_lock(job->monitor);
PR_ASSERT(job->state == NS_JOB_WAITING || job->state == NS_JOB_RUNNING);
+
+ if (ns_thrpool_is_shutdown(job->tp)) {
+ return PR_FAILURE;
+ }
+
if (job->state == NS_JOB_WAITING) {
#ifdef DEBUG
ns_log(LOG_DEBUG, "ns_rearm_job %x state %d moving to NS_JOB_NEEDS_ARM\n", job, job->state);
@@ -1494,6 +1491,7 @@ ns_thrpool_shutdown(struct ns_thrpool_t *tp)
__atomic_add_fetch(&(tp->shutdown), 1, __ATOMIC_SEQ_CST);
/* Wake up the idle worker threads so they can exit. */
+ /* Do we need this to be run in conjuction with our thread join loop incase threads are still active? */
pthread_mutex_lock(&(tp->work_q_lock));
pthread_cond_broadcast(&(tp->work_q_cv));
pthread_mutex_unlock(&(tp->work_q_lock));
@@ -1510,6 +1508,12 @@ ns_thrpool_wait(ns_thrpool_t *tp)
while (sds_queue_dequeue(tp->thread_stack, (void **)&thr) == SDS_SUCCESS)
{
+ /* LAST CHANCE! Really make sure the thread workers are ready to go! */
+ /* In theory, they could still be blocked up here, but we hope not ... */
+ pthread_mutex_lock(&(tp->work_q_lock));
+ pthread_cond_broadcast(&(tp->work_q_cv));
+ pthread_mutex_unlock(&(tp->work_q_lock));
+
void *retval = NULL;
int32_t rc = pthread_join(thr->thr, &retval);
#ifdef DEBUG
diff --git a/src/nunc-stans/test/test_nuncstans_stress.h b/src/nunc-stans/test/test_nuncstans_stress.h
new file mode 100644
index 000000000..6c198033e
--- /dev/null
+++ b/src/nunc-stans/test/test_nuncstans_stress.h
@@ -0,0 +1,42 @@
+/** BEGIN COPYRIGHT BLOCK
+ * Copyright (c) 2017, Red Hat, Inc
+ * All rights reserved.
+ *
+ * License: GPL (version 3 or any later version).
+ * See LICENSE for details.
+ * END COPYRIGHT BLOCK **/
+
+#ifdef HAVE_CONFIG_H
+# include <config.h>
+#endif
+
+/* For cmocka */
+#include <stdarg.h>
+#include <stddef.h>
+#include <setjmp.h>
+#include <cmocka.h>
+
+#include <nunc-stans.h>
+
+#include <stdio.h>
+#include <signal.h>
+
+#include <syslog.h>
+#include <string.h>
+#include <inttypes.h>
+
+#include <time.h>
+#include <sys/time.h>
+
+#include <assert.h>
+
+struct test_params {
+ int32_t client_thread_count;
+ int32_t server_thread_count;
+ int32_t jobs;
+ int32_t test_timeout;
+};
+
+int ns_stress_teardown(void **state);
+void ns_stress_test(void **state);
+
diff --git a/src/nunc-stans/test/test_nuncstans_stress.c b/src/nunc-stans/test/test_nuncstans_stress_core.c
similarity index 92%
rename from src/nunc-stans/test/test_nuncstans_stress.c
rename to src/nunc-stans/test/test_nuncstans_stress_core.c
index 29585d5dc..ca4e45d8d 100644
--- a/src/nunc-stans/test/test_nuncstans_stress.c
+++ b/src/nunc-stans/test/test_nuncstans_stress_core.c
@@ -35,35 +35,8 @@
* removal, timers, and more.
*/
-#ifdef HAVE_CONFIG_H
-# include <config.h>
-#endif
-
-/* For cmocka */
-#include <stdarg.h>
-#include <stddef.h>
-#include <setjmp.h>
-#include <cmocka.h>
-
-/*
-#include <nspr.h>
-#include <plstr.h>
-#include <prlog.h>
-*/
-
-#include <nunc-stans.h>
-
-#include <stdio.h>
-#include <signal.h>
-
-#include <syslog.h>
-#include <string.h>
-#include <inttypes.h>
-
-#include <time.h>
-#include <sys/time.h>
-
-#include <assert.h>
+/* Our local stress test header */
+#include "test_nuncstans_stress.h"
struct conn_ctx {
size_t offset; /* current offset into buffer for reading or writing */
@@ -84,11 +57,13 @@ int32_t server_success_count = 0;
int32_t client_fail_count = 0;
int32_t client_timeout_count = 0;
int32_t server_fail_count = 0;
-int32_t job_count = 0;
-int32_t client_thread_count = 80;
-int32_t server_thread_count = 20;
-int32_t jobs = 200;
-int32_t test_timeout = 70;
+
+int
+ns_stress_teardown(void **state) {
+ struct test_params *tparams = (struct test_params *)*state;
+ free(tparams);
+ return 0;
+}
#define PR_WOULD_BLOCK(iii) (iii == PR_PENDING_INTERRUPT_ERROR) || (iii == PR_WOULD_BLOCK_ERROR)
@@ -320,7 +295,7 @@ static void
test_client_shutdown(struct ns_job_t *job)
{
do_logging(LOG_DEBUG, "Received shutdown signal\n");
- do_logging(LOG_DEBUG, "status .... job_count: %d fail_count: %d success_count: %d\n", job_count, client_fail_count, client_success_count);
+ do_logging(LOG_DEBUG, "status .... fail_count: %d success_count: %d\n", client_fail_count, client_success_count);
ns_thrpool_shutdown(ns_job_get_tp(job));
/* This also needs to start the thrpool shutdown for the server. */
ns_thrpool_shutdown(ns_job_get_data(job));
@@ -377,7 +352,9 @@ client_initiate_connection_cb(struct ns_job_t *job)
sock = PR_OpenTCPSocket(PR_AF_INET6);
if (sock == NULL) {
- do_logging(LOG_ERR, "Socket failed\n");
+ char *err = NULL;
+ PR_GetErrorText(err);
+ do_logging(LOG_ERR, "FAIL: Socket failed, %d -> %s\n", PR_GetError(), err);
PR_AtomicAdd(&client_fail_count, 1);
goto done;
}
@@ -411,11 +388,13 @@ done:
static void
client_create_work(struct ns_job_t *job)
{
+ struct test_params *tparams = ns_job_get_data(job);
+
struct timespec ts;
PR_Sleep(PR_SecondsToInterval(1));
clock_gettime(CLOCK_MONOTONIC, &ts);
printf("BEGIN: %ld.%ld\n", ts.tv_sec, ts.tv_nsec);
- for (int32_t i = 0; i < jobs; i++) {
+ for (int32_t i = 0; i < tparams->jobs; i++) {
assert_int_equal(ns_add_job(ns_job_get_tp(job), NS_JOB_NONE|NS_JOB_THREAD, client_initiate_connection_cb, NULL, NULL), 0);
}
assert_int_equal(ns_job_done(job), 0);
@@ -423,15 +402,17 @@ client_create_work(struct ns_job_t *job)
printf("Create work thread complete!\n");
}
-static void
-ns_stress_test(void **state __attribute__((unused)))
+void
+ns_stress_test(void **state)
{
+ struct test_params *tparams = *state;
+
/* Setup both thread pools. */
/* Client first */
- int32_t job_count = jobs * client_thread_count;
+ int32_t job_count = tparams->jobs * tparams->client_thread_count;
struct ns_thrpool_t *ctp;
struct ns_thrpool_config client_ns_config;
struct ns_job_t *sigterm_job = NULL;
@@ -442,12 +423,12 @@ ns_stress_test(void **state __attribute__((unused)))
struct ns_job_t *sigusr2_job = NULL;
struct ns_job_t *final_job = NULL;
- struct timeval timeout = { test_timeout, 0 };
+ struct timeval timeout = { tparams->test_timeout, 0 };
setup_logging();
ns_thrpool_config_init(&client_ns_config);
- client_ns_config.max_threads = client_thread_count;
+ client_ns_config.max_threads = tparams->client_thread_count;
client_ns_config.log_fct = do_vlogging;
ctp = ns_thrpool_new(&client_ns_config);
@@ -458,7 +439,7 @@ ns_stress_test(void **state __attribute__((unused)))
struct ns_job_t *listen_job = NULL;
ns_thrpool_config_init(&server_ns_config);
- server_ns_config.max_threads = server_thread_count;
+ server_ns_config.max_threads = tparams->server_thread_count;
server_ns_config.log_fct = do_vlogging;
stp = ns_thrpool_new(&server_ns_config);
@@ -493,8 +474,8 @@ ns_stress_test(void **state __attribute__((unused)))
assert_int_equal(ns_add_timeout_job(ctp, &timeout, NS_JOB_NONE|NS_JOB_THREAD, test_client_shutdown, stp, &final_job), 0);
/* While true, add connect / write jobs */
- for (PRInt32 i = 0; i < client_thread_count; i++) {
- assert_int_equal(ns_add_job(ctp, NS_JOB_NONE|NS_JOB_THREAD, client_create_work, NULL, NULL), 0);
+ for (PRInt32 i = 0; i < tparams->client_thread_count; i++) {
+ assert_int_equal(ns_add_job(ctp, NS_JOB_NONE|NS_JOB_THREAD, client_create_work, tparams, NULL), 0);
}
/* Wait for all the clients to be done dispatching jobs to the server */
@@ -542,17 +523,9 @@ ns_stress_test(void **state __attribute__((unused)))
assert_int_equal(client_success_count, job_count);
*/
assert_int_equal(server_success_count, client_success_count);
+ int32_t job_threshold = (tparams->jobs * tparams->client_thread_count) * 0.95;
+ assert_true(client_success_count >= job_threshold);
PR_Cleanup();
}
-
-int
-main (void)
-{
- const struct CMUnitTest tests[] = {
- cmocka_unit_test(ns_stress_test),
- };
- return cmocka_run_group_tests(tests, NULL, NULL);
-}
-
diff --git a/src/nunc-stans/test/test_nuncstans_stress_large.c b/src/nunc-stans/test/test_nuncstans_stress_large.c
new file mode 100644
index 000000000..a3e0d5e58
--- /dev/null
+++ b/src/nunc-stans/test/test_nuncstans_stress_large.c
@@ -0,0 +1,33 @@
+/** BEGIN COPYRIGHT BLOCK
+ * Copyright (c) 2017, Red Hat, Inc
+ * All rights reserved.
+ *
+ * License: GPL (version 3 or any later version).
+ * See LICENSE for details.
+ * END COPYRIGHT BLOCK **/
+
+#include "test_nuncstans_stress.h"
+
+int
+ns_stress_large_setup(void **state) {
+ struct test_params *tparams = malloc(sizeof(struct test_params));
+ tparams->client_thread_count = 80;
+ tparams->server_thread_count = 20;
+ tparams->jobs = 200;
+ tparams->test_timeout = 70;
+ *state = tparams;
+ return 0;
+}
+
+int
+main (void)
+{
+ const struct CMUnitTest tests[] = {
+ cmocka_unit_test_setup_teardown(ns_stress_test,
+ ns_stress_large_setup,
+ ns_stress_teardown),
+ };
+ return cmocka_run_group_tests(tests, NULL, NULL);
+}
+
+
diff --git a/src/nunc-stans/test/test_nuncstans_stress_small.c b/src/nunc-stans/test/test_nuncstans_stress_small.c
new file mode 100644
index 000000000..da57ab0db
--- /dev/null
+++ b/src/nunc-stans/test/test_nuncstans_stress_small.c
@@ -0,0 +1,33 @@
+/** BEGIN COPYRIGHT BLOCK
+ * Copyright (c) 2017, Red Hat, Inc
+ * All rights reserved.
+ *
+ * License: GPL (version 3 or any later version).
+ * See LICENSE for details.
+ * END COPYRIGHT BLOCK **/
+
+#include "test_nuncstans_stress.h"
+
+int
+ns_stress_small_setup(void **state) {
+ struct test_params *tparams = malloc(sizeof(struct test_params));
+ tparams->client_thread_count = 4;
+ tparams->server_thread_count = 1;
+ tparams->jobs = 64;
+ tparams->test_timeout = 30;
+ *state = tparams;
+ return 0;
+}
+
+int
+main (void)
+{
+ const struct CMUnitTest tests[] = {
+ cmocka_unit_test_setup_teardown(ns_stress_test,
+ ns_stress_small_setup,
+ ns_stress_teardown),
+ };
+ return cmocka_run_group_tests(tests, NULL, NULL);
+}
+
+
| 0 |
def84df10423671ce628aad2f63468efa7f57fed
|
389ds/389-ds-base
|
Issue #5113 - Increase timestamp precision for development builds
Bug Description:
We use %Y%m%d timestamp format for development and copr builds.
Copr builds are done on each commit. If multiple commits were done during
the same day, packages would have the same version.
Additionally, git is missing in the buildroot, this makes prerel version
to be empty and copr builds do not contain commit hash information in their
verison.
Fix Description:
* Change timestamp format to %Y%m%d%H%M.
* Modify rpm.mk to evaluate version information only once, instead of
calling shell script every time and get different results.
* Update copr Makefile to install git as a dependency.
Fixes: https://github.com/389ds/389-ds-base/issues/5113
Reviewed by: @mreynolds389 (Thanks!)
|
commit def84df10423671ce628aad2f63468efa7f57fed
Author: Viktor Ashirov <[email protected]>
Date: Tue Jan 18 08:57:31 2022 +0100
Issue #5113 - Increase timestamp precision for development builds
Bug Description:
We use %Y%m%d timestamp format for development and copr builds.
Copr builds are done on each commit. If multiple commits were done during
the same day, packages would have the same version.
Additionally, git is missing in the buildroot, this makes prerel version
to be empty and copr builds do not contain commit hash information in their
verison.
Fix Description:
* Change timestamp format to %Y%m%d%H%M.
* Modify rpm.mk to evaluate version information only once, instead of
calling shell script every time and get different results.
* Update copr Makefile to install git as a dependency.
Fixes: https://github.com/389ds/389-ds-base/issues/5113
Reviewed by: @mreynolds389 (Thanks!)
diff --git a/.copr/Makefile b/.copr/Makefile
index b8aba9279..2c18f2230 100644
--- a/.copr/Makefile
+++ b/.copr/Makefile
@@ -1,4 +1,6 @@
srpm:
+ # Install git in the buildroot to correctly generate commit hash
+ dnf install -y git
# Generate spec file
make -f rpm.mk rpmroot
# Install build dependencies
diff --git a/VERSION.sh b/VERSION.sh
index 017d4818d..aa0fa0509 100644
--- a/VERSION.sh
+++ b/VERSION.sh
@@ -13,7 +13,7 @@ VERSION_MINOR=1
VERSION_MAINT=0
# NOTE: VERSION_PREREL is automatically set for builds made out of a git tree
VERSION_PREREL=
-VERSION_DATE=$(date -u +%Y%m%d)
+VERSION_DATE=$(date -u +%Y%m%d%H%M)
# Set the version and release numbers for local developer RPM builds. We
# set these here because we do not want the git commit hash in the RPM
diff --git a/rpm.mk b/rpm.mk
index ab0da6725..901fb404c 100644
--- a/rpm.mk
+++ b/rpm.mk
@@ -1,9 +1,9 @@
PWD ?= $(shell pwd)
RPMBUILD ?= $(PWD)/rpmbuild
RPM_VERSION ?= $(shell $(PWD)/rpm/rpmverrel.sh version)
-RPM_RELEASE ?= $(shell $(PWD)/rpm/rpmverrel.sh release)
-VERSION_PREREL ?= $(shell $(PWD)/rpm/rpmverrel.sh prerel)
-RPM_VERSION_PREREL ?= $(shell $(PWD)/rpm/rpmverrel.sh prerel | sed -e 's/\./-/')
+RPM_RELEASE := $(shell $(PWD)/rpm/rpmverrel.sh release)
+VERSION_PREREL := $(shell $(PWD)/rpm/rpmverrel.sh prerel)
+RPM_VERSION_PREREL := $(subst .,-,$(VERSION_PREREL))
PACKAGE = 389-ds-base
RPM_NAME_VERSION = $(PACKAGE)-$(RPM_VERSION)$(RPM_VERSION_PREREL)
NAME_VERSION = $(PACKAGE)-$(RPM_VERSION)$(VERSION_PREREL)
| 0 |
e21c31b7fd30cb31cdb886a84450a5b5f395ffac
|
389ds/389-ds-base
|
Issue 6663 - RFE - Add option to write error log in JSON
Description:
Just like our other logs we should add an option to write error log in JSON
{
local_time: "",
severity: "",
subsystem: "",
msg: "",
}
https://github.com/389ds/389-ds-base/issues/6663
Reviewed by: spichugi(Thanks!)
|
commit e21c31b7fd30cb31cdb886a84450a5b5f395ffac
Author: Mark Reynolds <[email protected]>
Date: Tue Mar 11 09:40:53 2025 -0400
Issue 6663 - RFE - Add option to write error log in JSON
Description:
Just like our other logs we should add an option to write error log in JSON
{
local_time: "",
severity: "",
subsystem: "",
msg: "",
}
https://github.com/389ds/389-ds-base/issues/6663
Reviewed by: spichugi(Thanks!)
diff --git a/dirsrvtests/tests/suites/logging/error_json_logging_test.py b/dirsrvtests/tests/suites/logging/error_json_logging_test.py
new file mode 100644
index 000000000..87e1840a6
--- /dev/null
+++ b/dirsrvtests/tests/suites/logging/error_json_logging_test.py
@@ -0,0 +1,61 @@
+# --- BEGIN COPYRIGHT BLOCK ---
+# Copyright (C) 2025 Red Hat, Inc.
+# All rights reserved.
+#
+# License: GPL (version 3 or any later version).
+# See LICENSE for details.
+# --- END COPYRIGHT BLOCK ---
+#
+import logging
+import os
+import time
+import pytest
+from lib389.properties import TASK_WAIT
+from lib389.topologies import topology_st as topo
+from lib389.dirsrv_log import DirsrvErrorJSONLog
+
+log = logging.getLogger(__name__)
+
+MAIN_KEYS = [
+ "local_time",
+ "severity",
+ "subsystem",
+ "msg",
+]
+
+
[email protected]("log_format", ["json", "json-pretty"])
+def test_error_json_format(topo, log_format):
+ """Test error log is in JSON
+
+ :id: c9afb295-43de-4581-af8b-ec8f25a06d75
+ :setup: Standalone
+ :steps:
+ 1. Check error log has json and the expected data is present
+ :expectedresults:
+ 1. Success
+ """
+
+ inst = topo.standalone
+ inst.config.replace('nsslapd-errorlog-logbuffering', 'off') # Just in case
+ inst.config.set("nsslapd-errorlog-log-format", log_format)
+ inst.stop()
+ inst.deleteErrorLogs()
+ inst.start()
+ time.sleep(1)
+
+ error_log = DirsrvErrorJSONLog(inst)
+ log_lines = error_log.parse_log()
+ for event in log_lines:
+ if event is None or 'header' in event:
+ # Skip non-json or header line
+ continue
+ for key in MAIN_KEYS:
+ assert key in event and event[key] != ""
+
+
+if __name__ == '__main__':
+ # Run isolated
+ # -s for DEBUG mode
+ CURRENT_FILE = os.path.realpath(__file__)
+ pytest.main(["-s", CURRENT_FILE])
diff --git a/ldap/servers/slapd/libglobs.c b/ldap/servers/slapd/libglobs.c
index 4e72a2b0c..b58f231f8 100644
--- a/ldap/servers/slapd/libglobs.c
+++ b/ldap/servers/slapd/libglobs.c
@@ -861,6 +861,14 @@ static struct config_get_and_set
NULL, 0,
(void **)&global_slapdFrontendConfig.errorlogbuffering,
CONFIG_ON_OFF, NULL, &init_errorlogbuffering, NULL},
+ {CONFIG_ERRORLOG_LOG_FORMAT_ATTRIBUTE, config_set_errorlog_log_format,
+ NULL, 0,
+ (void **)&global_slapdFrontendConfig.errorlog_log_format,
+ CONFIG_STRING, NULL, SLAPD_INIT_LOG_FORMAT, NULL},
+ {CONFIG_ERRORLOG_TIME_FORMAT_ATTRIBUTE, config_set_errorlog_time_format,
+ NULL, 0,
+ (void **)&global_slapdFrontendConfig.errorlog_time_format,
+ CONFIG_STRING, NULL, SLAPD_INIT_ERROR_LOG_TIME_FORMAT, NULL},
{CONFIG_CSNLOGGING_ATTRIBUTE, config_set_csnlogging,
NULL, 0,
(void **)&global_slapdFrontendConfig.csnlogging,
@@ -1928,6 +1936,8 @@ FrontendConfig_init(void)
cfg->errorlog_exptime = SLAPD_DEFAULT_LOG_EXPTIME;
cfg->errorlog_exptimeunit = slapi_ch_strdup(SLAPD_INIT_LOG_EXPTIMEUNIT);
cfg->errorloglevel = SLAPD_DEFAULT_FE_ERRORLOG_LEVEL;
+ cfg->errorlog_log_format = slapi_ch_strdup(SLAPD_INIT_LOG_FORMAT);
+ cfg->errorlog_time_format = slapi_ch_strdup(SLAPD_INIT_ERROR_LOG_TIME_FORMAT);
init_errorlog_compress_enabled = cfg->errorlog_compress = LDAP_OFF;
init_errorlogbuffering = cfg->errorlogbuffering = LDAP_OFF;
@@ -5664,7 +5674,6 @@ config_set_pw_warning(const char *attrname, char *value, char *errorbuf, int app
return retVal;
}
-
int
config_set_errorlog_level(const char *attrname, char *value, char *errorbuf, int apply)
{
@@ -6920,6 +6929,101 @@ config_get_errorlog()
return retVal;
}
+int32_t
+config_set_errorlog_log_format(const char *attrname, char *value, char *errorbuf, int apply)
+{
+ slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
+
+ if (config_value_is_null(attrname, value, errorbuf, 0)) {
+ return LDAP_OPERATIONS_ERROR;
+ }
+
+ if (strcasecmp(value, "default") && strcasecmp(value, "json") && strcasecmp(value, "json-pretty")) {
+ slapi_create_errormsg(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE,
+ "%s: \"%s\" is invalid, the acceptable values "
+ "are \"default\", \"json\", and \"json-pretty\"",
+ attrname, value);
+ return LDAP_UNWILLING_TO_PERFORM;
+ }
+
+ if (apply) {
+ CFG_LOCK_WRITE(slapdFrontendConfig);
+ slapi_ch_free_string(&slapdFrontendConfig->errorlog_log_format);
+ slapdFrontendConfig->errorlog_log_format = slapi_ch_strdup(value);
+ CFG_UNLOCK_WRITE(slapdFrontendConfig);
+ }
+
+ return LDAP_SUCCESS;
+}
+
+int
+config_get_errorlog_log_format()
+{
+ slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
+ char *value;
+ int retVal;
+
+ /* map string value to int to avoid excessive freeing and duping */
+ CFG_LOCK_READ(slapdFrontendConfig);
+ value = slapdFrontendConfig->errorlog_log_format;
+ if (strcasecmp(value, "default") == 0) {
+ retVal = LOG_FORMAT_DEFAULT;
+ } else if (strcasecmp(value, "json") == 0) {
+ retVal = LOG_FORMAT_JSON;
+ } else {
+ retVal = LOG_FORMAT_JSON_PRETTY;
+ }
+ CFG_UNLOCK_READ(slapdFrontendConfig);
+
+ return retVal;
+}
+
+int32_t
+config_set_errorlog_time_format(const char *attrname, char *value, char *errorbuf, int apply)
+{
+ slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
+ time_t curtime;
+ char local_time[75] = "";
+ struct tm tms;
+ int32_t retVal = LDAP_SUCCESS;
+
+ if (config_value_is_null(attrname, value, errorbuf, 0)) {
+ retVal = LDAP_OPERATIONS_ERROR;
+ }
+
+ /* validate the value */
+ curtime = slapi_current_utc_time();
+ (void)localtime_r(&curtime, &tms);
+ if (strftime(local_time, 75, value, &tms) == 0) {
+ slapi_create_errormsg(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE,
+ "%s: \"%s\" is not a valid string format for strftime",
+ attrname, value);
+ return LDAP_UNWILLING_TO_PERFORM;
+ }
+
+ if (apply) {
+ CFG_LOCK_WRITE(slapdFrontendConfig);
+ slapi_ch_free_string(&slapdFrontendConfig->errorlog_time_format);
+ slapdFrontendConfig->errorlog_time_format = slapi_ch_strdup(value);
+ CFG_UNLOCK_WRITE(slapdFrontendConfig);
+ }
+
+ return retVal;
+}
+
+char *
+config_get_errorlog_time_format(void)
+{
+ slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
+ char *ret;
+
+ CFG_LOCK_READ(slapdFrontendConfig);
+ ret = config_copy_strval(slapdFrontendConfig->errorlog_time_format);
+ CFG_UNLOCK_READ(slapdFrontendConfig);
+
+ return ret;
+}
+
int32_t
config_get_external_libs_debug_enabled()
{
diff --git a/ldap/servers/slapd/log.c b/ldap/servers/slapd/log.c
index f4bd62ed1..59b9a1bfd 100644
--- a/ldap/servers/slapd/log.c
+++ b/ldap/servers/slapd/log.c
@@ -2765,7 +2765,12 @@ slapd_log_error_proc_internal(
}
if (loginfo.log_error_fdes != NULL) {
if (loginfo.log_error_state & LOGGING_NEED_TITLE) {
- log_write_title(loginfo.log_error_fdes);
+ int32_t errorlog_format = config_get_errorlog_log_format();
+ if (errorlog_format != LOG_FORMAT_DEFAULT) {
+ log_write_json_title(loginfo.log_error_fdes, errorlog_format);
+ } else {
+ log_write_title(loginfo.log_error_fdes);
+ }
loginfo.log_error_state &= ~LOGGING_NEED_TITLE;
}
rc = vslapd_log_error(loginfo.log_error_fdes, sev_level, subsystem, fmt, ap_file, 1);
@@ -2890,55 +2895,91 @@ vslapd_log_error(
va_list ap,
int locked)
{
+ int32_t log_format = config_get_errorlog_log_format();
time_t tnl = slapi_current_utc_time();
char buffer[SLAPI_LOG_BUFSIZ];
+ char local_time[TBUFSIZE] = {0};
struct timespec tsnow;
char sev_name[10];
int blen = TBUFSIZE;
+ int32_t vlen = 0;
char *vbuf = NULL;
+ json_object *json_obj = NULL;
- if (vasprintf(&vbuf, fmt, ap) == -1) {
+ if ((vlen = vasprintf(&vbuf, fmt, ap)) == -1) {
log__error_emergency("vslapd_log_error, Unable to format message", 1, locked);
return -1;
}
- if (clock_gettime(CLOCK_REALTIME, &tsnow) != 0) {
- PR_snprintf(buffer, sizeof(buffer),
- "vslapd_log_error, Unable to determine system time for message :: %s",
- vbuf);
- log__error_emergency(buffer, 1, locked);
- return -1;
- }
- if (format_localTime_hr_log(tsnow.tv_sec, tsnow.tv_nsec, sizeof(buffer), buffer, &blen) != 0) {
- /* MSG may be truncated */
- PR_snprintf(buffer, sizeof(buffer),
- "vslapd_log_error, Unable to format system time for message :: %s",
- vbuf);
- log__error_emergency(buffer, 1, locked);
- return -1;
- }
+ if (log_format != LOG_FORMAT_DEFAULT) {
+ /* JSON format */
+ char *time_format = config_get_errorlog_time_format();
+ int32_t ltlen = TBUFSIZE;
+ struct timespec curr_time = slapi_current_utc_time_hr();
- /*
- * To be able to remove timestamp to not over pollute the syslog, we may
- * need to skip the timestamp part of the message.
- *
- * The size of the header is:
- * the size of the time string
- * + size of space
- * + size of one char (sign)
- * + size of 2 char
- * + size of 2 char
- * + size of [
- * + size of ]
- */
+ if (format_localTime_hr_json_log(&curr_time, local_time, <len,
+ time_format) != 0)
+ {
+ /* MSG may be truncated */
+ PR_snprintf(local_time, sizeof(local_time),
+ "build_base_obj, Unable to format system time");
+ log__error_emergency(local_time, 1, 0);
+ slapi_ch_free_string(&time_format);
+ return -1;
+ }
+ slapi_ch_free_string(&time_format);
+
+ /* strip off "\n" */
+ if (vbuf[vlen-1] == '\n') {
+ vbuf[vlen-1] = '\0';
+ }
+
+ json_obj = json_object_new_object();
+ json_object_object_add(json_obj, "local_time", json_object_new_string(local_time));
+ json_object_object_add(json_obj, "severity", json_object_new_string(get_log_sev_name(sev_level, sev_name)));
+ json_object_object_add(json_obj, "subsystem", json_object_new_string(subsystem));
+ json_object_object_add(json_obj, "msg", json_object_new_string(vbuf));
- /* This truncates again... But we have the nice smprintf from above! */
- if (subsystem == NULL) {
- snprintf(buffer + blen, sizeof(buffer) - blen, "- %s - %s",
- get_log_sev_name(sev_level, sev_name), vbuf);
+ PR_snprintf(buffer, sizeof(buffer), "%s\n",
+ json_object_to_json_string_ext(json_obj, log_format));
} else {
- snprintf(buffer + blen, sizeof(buffer) - blen, "- %s - %s - %s",
- get_log_sev_name(sev_level, sev_name), subsystem, vbuf);
+ /* Old format. This truncates again... But we have the nice smprintf
+ * from above!
+ *
+ * To be able to remove timestamp to not over pollute the syslog, we may
+ * need to skip the timestamp part of the message.
+ *
+ * The size of the header is:
+ * the size of the time string
+ * + size of space
+ * + size of one char (sign)
+ * + size of 2 char
+ * + size of 2 char
+ * + size of [
+ * + size of ]
+ */
+ if (clock_gettime(CLOCK_REALTIME, &tsnow) != 0) {
+ PR_snprintf(buffer, sizeof(buffer),
+ "vslapd_log_error, Unable to determine system time for message :: %s",
+ vbuf);
+ log__error_emergency(buffer, 1, locked);
+ return -1;
+ }
+ if (format_localTime_hr_log(tsnow.tv_sec, tsnow.tv_nsec, sizeof(buffer), buffer, &blen) != 0) {
+ /* MSG may be truncated */
+ PR_snprintf(buffer, sizeof(buffer),
+ "vslapd_log_error, Unable to format system time for message :: %s",
+ vbuf);
+ log__error_emergency(buffer, 1, locked);
+ return -1;
+ }
+ if (subsystem == NULL) {
+ snprintf(buffer + blen, sizeof(buffer) - blen, "- %s - %s",
+ get_log_sev_name(sev_level, sev_name), vbuf);
+ } else {
+ snprintf(buffer + blen, sizeof(buffer) - blen, "- %s - %s - %s",
+ get_log_sev_name(sev_level, sev_name), subsystem, vbuf);
+ }
}
buffer[sizeof(buffer) - 1] = '\0';
@@ -2951,6 +2992,9 @@ vslapd_log_error(
}
slapi_ch_free_string(&vbuf);
+ if (json_obj) {
+ json_object_put(json_obj);
+ }
return (0);
}
@@ -6903,6 +6947,7 @@ log_flush_buffer(LogBufferInfo *lbi, int log_type, int sync_now, int locked)
int32_t log_state;
PRBool log_buffering = PR_FALSE;
open_log *open_log_file = NULL;
+ int32_t log_format = 0;
int rc = 0;
/*
@@ -6929,6 +6974,7 @@ log_flush_buffer(LogBufferInfo *lbi, int log_type, int sync_now, int locked)
log_state = loginfo.log_access_state;
log_buffering = slapdFrontendConfig->accesslogbuffering ? PR_TRUE : PR_FALSE;
log_name = "access";
+ log_format = config_get_accesslog_log_format();
break;
case SLAPD_SECURITY_LOG:
@@ -6941,6 +6987,7 @@ log_flush_buffer(LogBufferInfo *lbi, int log_type, int sync_now, int locked)
log_state = loginfo.log_security_state;
log_buffering = slapdFrontendConfig->securitylogbuffering ? PR_TRUE : PR_FALSE;
log_name = "security audit";
+ log_format = LOG_FORMAT_JSON;
break;
case SLAPD_AUDIT_LOG:
@@ -6953,6 +7000,7 @@ log_flush_buffer(LogBufferInfo *lbi, int log_type, int sync_now, int locked)
log_state = loginfo.log_audit_state;
log_buffering = slapdFrontendConfig->auditlogbuffering ? PR_TRUE : PR_FALSE;
log_name = "audit";
+ log_format = config_get_auditlog_log_format();
break;
case SLAPD_AUDITFAIL_LOG:
@@ -6966,6 +7014,7 @@ log_flush_buffer(LogBufferInfo *lbi, int log_type, int sync_now, int locked)
/* Audit fail log still uses the audit log buffering setting */
log_buffering = slapdFrontendConfig->auditlogbuffering ? PR_TRUE : PR_FALSE;
log_name = "audit fail";
+ log_format = config_get_auditlog_log_format();
break;
case SLAPD_ERROR_LOG:
@@ -6978,6 +7027,7 @@ log_flush_buffer(LogBufferInfo *lbi, int log_type, int sync_now, int locked)
log_state = loginfo.log_error_state;
log_buffering = slapdFrontendConfig->errorlogbuffering ? PR_TRUE : PR_FALSE;
log_name = "error";
+ log_format = config_get_errorlog_log_format();
break;
default:
@@ -7002,34 +7052,11 @@ log_flush_buffer(LogBufferInfo *lbi, int log_type, int sync_now, int locked)
}
if (log_state & LOGGING_NEED_TITLE) {
- int32_t accesslog_format = config_get_accesslog_log_format();
- int32_t auditlog_format = config_get_auditlog_log_format();
- int32_t securitylog_format = LOG_FORMAT_JSON;
-
- switch (log_type) {
- case SLAPD_ACCESS_LOG:
- if (accesslog_format != LOG_FORMAT_DEFAULT) {
- log_write_json_title(fd, accesslog_format);
- } else {
- log_write_title(fd);
- }
- break;
- case SLAPD_AUDIT_LOG:
- case SLAPD_AUDITFAIL_LOG:
- if (auditlog_format != LOG_FORMAT_DEFAULT) {
- log_write_json_title(fd, auditlog_format);
- } else {
- log_write_title(fd);
- }
- break;
- case SLAPD_SECURITY_LOG:
- log_write_json_title(fd, securitylog_format);
- break;
- default:
- /* Error log - standard title */
+ if (log_format != LOG_FORMAT_DEFAULT) {
+ log_write_json_title(fd, log_format);
+ } else {
log_write_title(fd);
}
-
log_state_remove_need_title(log_type);
}
diff --git a/ldap/servers/slapd/proto-slap.h b/ldap/servers/slapd/proto-slap.h
index 8a3424e4e..135939dd3 100644
--- a/ldap/servers/slapd/proto-slap.h
+++ b/ldap/servers/slapd/proto-slap.h
@@ -405,6 +405,8 @@ int config_set_auditfaillog_unhashed_pw(const char *attrname, char *value, char
int32_t config_set_auditlog_display_attrs(const char *attrname, char *value, char *errorbuf, int apply);
int config_set_accesslog_log_format(const char *attrname, char *value, char *errorbuf, int apply);
int config_set_accesslog_time_format(const char *attrname, char *value, char *errorbuf, int apply);
+int config_set_errorlog_log_format(const char *attrname, char *value, char *errorbuf, int apply);
+int config_set_errorlog_time_format(const char *attrname, char *value, char *errorbuf, int apply);
int config_set_external_libs_debug_enabled(const char *attrname, char *value, char *errorbuf, int apply);
int config_set_ndn_cache_enabled(const char *attrname, char *value, char *errorbuf, int apply);
int config_set_ndn_cache_max_size(const char *attrname, char *value, char *errorbuf, int apply);
@@ -529,6 +531,8 @@ int config_get_auditlog_log_format(void);
char *config_get_auditlog_time_format(void);
int config_get_accesslog_log_format(void);
char *config_get_accesslog_time_format(void);
+int config_get_errorlog_log_format(void);
+char *config_get_errorlog_time_format(void);
char *config_get_referral_mode(void);
int config_get_num_listeners(void);
int config_check_referral_mode(void);
diff --git a/ldap/servers/slapd/slap.h b/ldap/servers/slapd/slap.h
index 98993a405..88cb2ded4 100644
--- a/ldap/servers/slapd/slap.h
+++ b/ldap/servers/slapd/slap.h
@@ -330,6 +330,7 @@ typedef void (*VFPV)(); /* takes undefined arguments */
#define SLAPD_INIT_LOG_EXPTIMEUNIT "month"
#define SLAPD_INIT_LOG_TIME_FORMAT "%FT%TZ"
#define SLAPD_INIT_ACCESS_LOG_TIME_FORMAT "%FT%T"
+#define SLAPD_INIT_ERROR_LOG_TIME_FORMAT "%FT%T"
#define SLAPD_INIT_LOG_FORMAT "default"
#define LOG_FORMAT_DEFAULT 1
#define LOG_FORMAT_JSON 0
@@ -2218,6 +2219,8 @@ typedef struct _slapdEntryPoints
#define CONFIG_AUDITLOG_TIME_FORMAT_ATTRIBUTE "nsslapd-auditlog-time-format"
#define CONFIG_ACCESSLOG_LOG_FORMAT_ATTRIBUTE "nsslapd-accesslog-log-format"
#define CONFIG_ACCESSLOG_TIME_FORMAT_ATTRIBUTE "nsslapd-accesslog-time-format"
+#define CONFIG_ERRORLOG_LOG_FORMAT_ATTRIBUTE "nsslapd-errorlog-log-format"
+#define CONFIG_ERRORLOG_TIME_FORMAT_ATTRIBUTE "nsslapd-errorlog-time-format"
#define CONFIG_UNHASHED_PW_SWITCH_ATTRIBUTE "nsslapd-unhashed-pw-switch"
#define CONFIG_ROOTDN_ATTRIBUTE "nsslapd-rootdn"
#define CONFIG_ROOTPW_ATTRIBUTE "nsslapd-rootpw"
@@ -2575,6 +2578,8 @@ typedef struct _slapdFrontendConfig
/* ERROR LOG */
slapi_onoff_t errorlog_logging_enabled;
+ char *errorlog_log_format;
+ char *errorlog_time_format;
char *errorlog_mode;
int errorlog_maxnumlogs;
int errorlog_maxlogsize;
diff --git a/src/cockpit/389-console/src/lib/server/errorLog.jsx b/src/cockpit/389-console/src/lib/server/errorLog.jsx
index 0ad36e594..b4193f106 100644
--- a/src/cockpit/389-console/src/lib/server/errorLog.jsx
+++ b/src/cockpit/389-console/src/lib/server/errorLog.jsx
@@ -38,6 +38,8 @@ const settings_attrs = [
'nsslapd-errorlog',
'nsslapd-errorlog-level',
'nsslapd-errorlog-logging-enabled',
+ 'nsslapd-errorlog-log-format',
+ 'nsslapd-errorlog-time-format',
];
const _ = cockpit.gettext;
@@ -349,7 +351,7 @@ export class ServerErrorLog extends React.Component {
loading: true,
loaded: false,
});
- };
+ }
const cmd = [
"dsconf", "-j", "ldapi://%2fvar%2frun%2fslapd-" + this.props.serverId + ".socket",
@@ -403,6 +405,8 @@ export class ServerErrorLog extends React.Component {
'nsslapd-errorlog-maxlogsize': attrs['nsslapd-errorlog-maxlogsize'][0],
'nsslapd-errorlog-maxlogsperdir': attrs['nsslapd-errorlog-maxlogsperdir'][0],
'nsslapd-errorlog-compress': compressed,
+ 'nsslapd-errorlog-log-format': attrs['nsslapd-errorlog-log-format'][0],
+ 'nsslapd-errorlog-time-format': attrs['nsslapd-errorlog-time-format'][0],
rows,
// Record original values
_rows: JSON.parse(JSON.stringify(rows)),
@@ -421,6 +425,8 @@ export class ServerErrorLog extends React.Component {
'_nsslapd-errorlog-maxlogsize': attrs['nsslapd-errorlog-maxlogsize'][0],
'_nsslapd-errorlog-maxlogsperdir': attrs['nsslapd-errorlog-maxlogsperdir'][0],
'_nsslapd-errorlog-compress': compressed,
+ '_nsslapd-errorlog-log-format': attrs['nsslapd-errorlog-log-format'][0],
+ '_nsslapd-errorlog-time-format': attrs['nsslapd-errorlog-time-format'][0],
})
);
})
@@ -483,6 +489,8 @@ export class ServerErrorLog extends React.Component {
'nsslapd-errorlog-maxlogsize': attrs['nsslapd-errorlog-maxlogsize'][0],
'nsslapd-errorlog-maxlogsperdir': attrs['nsslapd-errorlog-maxlogsperdir'][0],
'nsslapd-errorlog-compress': compressed,
+ 'nsslapd-errorlog-log-format': attrs['nsslapd-errorlog-log-format'][0],
+ 'nsslapd-errorlog-time-format': attrs['nsslapd-errorlog-time-format'][0],
rows,
// Record original values
_rows: JSON.parse(JSON.stringify(rows)),
@@ -501,6 +509,8 @@ export class ServerErrorLog extends React.Component {
'_nsslapd-errorlog-maxlogsize': attrs['nsslapd-errorlog-maxlogsize'][0],
'_nsslapd-errorlog-maxlogsperdir': attrs['nsslapd-errorlog-maxlogsperdir'][0],
'_nsslapd-errorlog-compress': compressed,
+ '_nsslapd-errorlog-log-format': attrs['nsslapd-errorlog-log-format'][0],
+ '_nsslapd-errorlog-time-format': attrs['nsslapd-errorlog-time-format'][0],
}, this.props.enableTree);
}
@@ -560,6 +570,12 @@ export class ServerErrorLog extends React.Component {
}
rotationTime = hour + ":" + min;
+ const time_format_title = (
+ <>
+ {_("Time Format")} <font size="1">({_("JSON only")})</font>
+ </>
+ );
+
let body = (
<div className="ds-margin-top-lg ds-left-margin">
<Tabs className="ds-margin-top-xlg" activeKey={this.state.activeTabKey} onSelect={this.handleNavSelect}>
@@ -591,6 +607,40 @@ export class ServerErrorLog extends React.Component {
}}
/>
</FormGroup>
+ <FormGroup
+ label={time_format_title}
+ fieldId="nsslapd-errorlog-time-format"
+ title="Time format using strftime formatting (nsslapd-errorlog-time-format). This only applies to the JSON log format"
+ >
+ <TextInput
+ value={this.state['nsslapd-errorlog-time-format']}
+ type="text"
+ id="nsslapd-errorlog-time-format"
+ aria-describedby="horizontal-form-name-helper"
+ name="nsslapd-errorlog-time-format"
+ onChange={(e, str) => {
+ this.handleChange(e, "settings");
+ }}
+ />
+ </FormGroup>
+ <FormGroup
+ label={_("Log Format")}
+ fieldId="nsslapd-errorlog-log-format"
+ title={_("Choose the log format (nsslapd-errorlog-log-format).")}
+ >
+ <FormSelect
+ id="nsslapd-errorlog-log-format"
+ value={this.state['nsslapd-errorlog-log-format']}
+ onChange={(e, str) => {
+ this.handleChange(e, "settings");
+ }}
+ aria-label="FormSelect Input"
+ >
+ <FormSelectOption key="0" value="default" label="Default" />
+ <FormSelectOption key="1" value="json" label="JSON" />
+ <FormSelectOption key="2" value="json-pretty" label="JSON (pretty)" />
+ </FormSelect>
+ </FormGroup>
</Form>
<ExpandableSection
diff --git a/src/lib389/lib389/dirsrv_log.py b/src/lib389/lib389/dirsrv_log.py
index 9572a65ef..e40105ad3 100644
--- a/src/lib389/lib389/dirsrv_log.py
+++ b/src/lib389/lib389/dirsrv_log.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2022 Red Hat, Inc.
+# Copyright (C) 2025 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
@@ -281,7 +281,8 @@ class DirsrvAccessJSONLog(DirsrvLog):
if len(searches) > 0:
report = copy.deepcopy(lint_report)
report['items'].append(self._get_log_path())
- report['detail'] = report['detail'].replace('NUMBER', str(count))
+ report['detail'] = report['detail'].replace('NUMBER',
+ str(count))
for srch in searches:
report['detail'] += srch
report['check'] = 'logs:notes'
@@ -315,12 +316,32 @@ class DirsrvAccessJSONLog(DirsrvLog):
self.log.info(action)
return action
- def parse_lines(self, lines):
- """Parse multiple log lines
- @param lines - a list of log lines
- @return - A dictionary of the log parts for each line
+ def parse_log(self):
"""
- return map(self.parse_line, lines)
+ Take the entire logs and parse it into a list of objects, this can
+ handle "json_pretty" format
+ """
+
+ json_objects = []
+ jobj = ""
+
+ lines = self.readlines()
+ for line in lines:
+ line = line.rstrip()
+ if line == '{':
+ jobj = "{"
+ elif line == '}':
+ jobj += "}"
+ json_objects.append(json.loads(jobj))
+ else:
+ if line[0] == '{' and line[-1] == '}':
+ # Complete json log line
+ json_objects.append(json.loads(line))
+ else:
+ # Json pretty - append the line
+ jobj += line.strip()
+
+ return json_objects
class DirsrvAccessLog(DirsrvLog):
@@ -373,9 +394,9 @@ class DirsrvAccessLog(DirsrvLog):
for line in lines:
if ' RESULT err=' in line:
# Looks like a valid notes=A/F
- conn = line.split(' conn=', 1)[1].split(' ',1)[0]
- op = line.split(' op=', 1)[1].split(' ',1)[0]
- etime = line.split(' etime=', 1)[1].split(' ',1)[0]
+ conn = line.split(' conn=', 1)[1].split(' ', 1)[0]
+ op = line.split(' op=', 1)[1].split(' ', 1)[0]
+ etime = line.split(' etime=', 1)[1].split(' ', 1)[0]
stats = self._log_get_search_stats(conn, op)
if stats is not None:
timestamp = stats['timestamp']
@@ -399,7 +420,8 @@ class DirsrvAccessLog(DirsrvLog):
if len(searches) > 0:
report = copy.deepcopy(lint_report)
report['items'].append(self._get_log_path())
- report['detail'] = report['detail'].replace('NUMBER', str(count))
+ report['detail'] = report['detail'].replace('NUMBER',
+ str(count))
for srch in searches:
report['detail'] += srch
report['check'] = 'logs:notes'
@@ -484,8 +506,91 @@ class DirsrvErrorLog(DirsrvLog):
return map(self.parse_line, lines)
+class DirsrvErrorJSONLog(DirsrvLog):
+ """Directory Server Error JSON log class"""
+ def __init__(self, dirsrv):
+ """Init the Error log class
+ @param dirsrv - A DirSrv object
+ """
+ super(DirsrvErrorJSONLog, self).__init__(dirsrv)
+ self.jsonFormat = True
+ self.lpath = ""
+
+ def _get_log_path(self):
+ """Return the current log file location"""
+ return self.dirsrv.ds_paths.error_log
+
+ def readlines(self):
+ """Returns an array of all the lines in the log.
+
+ @return - an array of all the lines in the log.
+ """
+ lines = []
+ self.lpath = self._get_log_path()
+ if self.lpath is not None:
+ # Open the log
+ with open(self.lpath, 'r', errors='ignore') as lf:
+ lines = lf.readlines()
+ return lines
+
+ def parse_line(self, line):
+ """Parse a error log line
+ @line - a text string from a error log
+ @return - A dictionary of the log parts
+ """
+ line = line.strip()
+
+ try:
+ action = json.loads(line)
+ if 'header' in action:
+ # This is the log title, return it as is
+ return action
+
+ action['datetime'] = self.parse_timestamp(action['local_time'],
+ json_format=True)
+ return action
+
+ except json.decoder.JSONDecodeError:
+ # Maybe it's json pretty, regardless we can not parse this single
+ # line
+ pass
+
+ return None
+
+ def parse_log(self):
+ """
+ Take the entire logs and parse it into a list of objects, this can
+ handle "json_pretty" format
+ """
+
+ json_objects = []
+ jobj = ""
+
+ lines = self.readlines()
+ for line in lines:
+ line = line.rstrip()
+ if line == '{':
+ jobj = "{"
+ elif line == '}':
+ jobj += "}"
+ json_objects.append(json.loads(jobj))
+ else:
+ if line[0] == '{' and line[-1] == '}':
+ # Complete json log line
+ json_objects.append(json.loads(line))
+ else:
+ # Json pretty - append the line
+ jobj += line.strip()
+
+ return json_objects
+
+
class DirsrvSecurityLog(DirsrvLog):
- """Directory Server Security log class"""
+ """
+ Directory Server Security log class
+
+ Currently this is only written in "json", not "json-pretty"
+ """
def __init__(self, dirsrv):
"""Init the Security log class
@param dirsrv - A DirSrv object
@@ -554,6 +659,7 @@ class DirsrvAuditJSONLog(DirsrvLog):
"""
super(DirsrvAuditJSONLog, self).__init__(dirsrv)
self.jsonFormat = True
+ self.lpath = ""
def _get_log_path(self):
"""Return the current log file location"""
@@ -579,16 +685,43 @@ class DirsrvAuditJSONLog(DirsrvLog):
@return - A dictionary of the log parts
"""
line = line.strip()
- action = json.loads(line)
- if 'header' in action:
- # This is the log title, return it as is
+ try:
+ action = json.loads(line)
+ if 'header' in action:
+ # This is the log title, return it as is
+ return action
+ action['datetime'] = action['gm_time']
return action
- action['datetime'] = action['gm_time']
- return action
+ except json.decoder.JSONDecodeError:
+ # Maybe it's json pretty, regardless we can not parse this single
+ # line
+ pass
- def parse_lines(self, lines):
- """Parse multiple lines from a audit log
- @param lines - a lits of strings/lines from a audit log
- @return - A dictionary of the log parts for each line
+ return None
+
+ def parse_log(self):
"""
- return map(self.parse_line, lines)
+ Take the entire logs and parse it into a list of objects, this can
+ handle "json_pretty" format
+ """
+
+ json_objects = []
+ jobj = ""
+
+ lines = self.readlines()
+ for line in lines:
+ line = line.rstrip()
+ if line == '{':
+ jobj = "{"
+ elif line == '}':
+ jobj += "}"
+ json_objects.append(json.loads(jobj))
+ else:
+ if line[0] == '{' and line[-1] == '}':
+ # Complete json log line
+ json_objects.append(json.loads(line))
+ else:
+ # Json pretty - append the line
+ jobj += line.strip()
+
+ return json_objects
| 0 |
01272f7538b1c2c4ba291e092b9a9a592c7cc810
|
389ds/389-ds-base
|
Ticket 49367 - missing braces in idsktune
Bug Description: Missing braces in case switch in dsktune.
Fix Description: Add the missing braces
https://pagure.io/389-ds-base/issue/49367
Author: cgrzemba
Review by: wibrown (Thanks)
|
commit 01272f7538b1c2c4ba291e092b9a9a592c7cc810
Author: William Brown <[email protected]>
Date: Wed Aug 30 10:37:34 2017 +1000
Ticket 49367 - missing braces in idsktune
Bug Description: Missing braces in case switch in dsktune.
Fix Description: Add the missing braces
https://pagure.io/389-ds-base/issue/49367
Author: cgrzemba
Review by: wibrown (Thanks)
diff --git a/ldap/systools/idsktune.c b/ldap/systools/idsktune.c
index 03b8b6e0a..c7aefd5bf 100644
--- a/ldap/systools/idsktune.c
+++ b/ldap/systools/idsktune.c
@@ -1475,21 +1475,22 @@ sun_check_network_device(void)
if (devfd == -1) {
switch (errno) {
- case EACCES:
- if (flag_debug) {
- printf("DEBUG : got EACCES opening %s\n", SUN_NETWORK_DEVICE);
- }
- break;
- case ENOENT:
- if (flag_debug) {
- printf("DEBUG : got ENOENT opening %s\n", SUN_NETWORK_DEVICE);
- }
- break;
- default:
- if (flag_debug) {
- printf("DEBUG : got %d opening %s\n", errno, SUN_NETWORK_DEVICE);
+ case EACCES:
+ if (flag_debug) {
+ printf("DEBUG : got EACCES opening %s\n", SUN_NETWORK_DEVICE);
+ }
+ break;
+ case ENOENT:
+ if (flag_debug) {
+ printf("DEBUG : got ENOENT opening %s\n", SUN_NETWORK_DEVICE);
+ }
+ break;
+ default:
+ if (flag_debug) {
+ printf("DEBUG : got %d opening %s\n", errno, SUN_NETWORK_DEVICE);
+ }
+ return;
}
- return;
}
else
{
@@ -1501,8 +1502,9 @@ sun_check_network_device(void)
printf("DEBUG : %s\n", buf);
}
if (iii_pio_getnum(buf, &ls) == -1) {
- if (flag_debug)
+ if (flag_debug) {
printf("DEBUG : %s link_speed variable not available\n", SUN_NETWORK_DEVICE);
+ }
} else {
/* XXX look at link speed */
if (flag_debug) {
| 0 |
590f57f5343f64e9d6927f0f09db2a5d2377d5fc
|
389ds/389-ds-base
|
Resolves: #483167
Summary: db2ldif -s "" crashes with segmentation fault
Change description: adding a check to see if there is no entries.
|
commit 590f57f5343f64e9d6927f0f09db2a5d2377d5fc
Author: Noriko Hosoi <[email protected]>
Date: Sat Jan 31 00:06:11 2009 +0000
Resolves: #483167
Summary: db2ldif -s "" crashes with segmentation fault
Change description: adding a check to see if there is no entries.
diff --git a/ldap/servers/slapd/main.c b/ldap/servers/slapd/main.c
index 2f05094f7..c611b9a2d 100644
--- a/ldap/servers/slapd/main.c
+++ b/ldap/servers/slapd/main.c
@@ -1917,7 +1917,7 @@ lookup_instance_name_by_suffix(char *suffix,
}
rval = 0;
- for (ep = entries; *ep; ep++) {
+ for (ep = entries; ep && *ep; ep++) {
backend = slapi_entry_attr_get_charptr(*ep, "nsslapd-backend");
if (backend) {
charray_add(instances, backend);
| 0 |
d88253df42157df3d112122c3551b0969e9ce8a0
|
389ds/389-ds-base
|
Bug 630090 - (cov#11974) Remove unused ACL functions
Coverity flagged a memory corruption issue in an old unused
ACL function. It is best to just remove these unused functions.
The functions removed are:
ACL_ParseFile
ACL_WriteFile
ACL_WriteString
ACL_Decompose
acl_to_str_*
acl_decompose_*
|
commit d88253df42157df3d112122c3551b0969e9ce8a0
Author: Nathan Kinder <[email protected]>
Date: Thu Sep 9 10:18:59 2010 -0700
Bug 630090 - (cov#11974) Remove unused ACL functions
Coverity flagged a memory corruption issue in an old unused
ACL function. It is best to just remove these unused functions.
The functions removed are:
ACL_ParseFile
ACL_WriteFile
ACL_WriteString
ACL_Decompose
acl_to_str_*
acl_decompose_*
diff --git a/include/libaccess/aclproto.h b/include/libaccess/aclproto.h
index 5f273842b..fe6545c86 100644
--- a/include/libaccess/aclproto.h
+++ b/include/libaccess/aclproto.h
@@ -58,13 +58,7 @@ NSPR_BEGIN_EXTERN_C
/*********************************************************************
* ACL language and file interfaces
*********************************************************************/
-
-NSAPI_PUBLIC ACLListHandle_t * ACL_ParseFile(NSErr_t *errp, char *filename);
NSAPI_PUBLIC ACLListHandle_t * ACL_ParseString(NSErr_t *errp, char *buffer);
-NSAPI_PUBLIC int ACL_Decompose(NSErr_t *errp, char **acl, ACLListHandle_t *acl_list);
-NSAPI_PUBLIC int ACL_WriteString(NSErr_t *errp, char **acl, ACLListHandle_t *acllist);
-NSAPI_PUBLIC int ACL_WriteFile(NSErr_t *errp, char *filename, ACLListHandle_t *acllist);
-
/*********************************************************************
* ACL Expression construction interfaces
diff --git a/include/public/nsacl/aclapi.h b/include/public/nsacl/aclapi.h
index 197d9d38e..617cabfe9 100644
--- a/include/public/nsacl/aclapi.h
+++ b/include/public/nsacl/aclapi.h
@@ -352,14 +352,7 @@ NSAPI_PUBLIC extern ACLDispatchVector_t *__nsacl_table;
/* ACL language and file interfaces */
-#define ACL_ParseFile (*__nsacl_table->f_ACL_ParseFile)
#define ACL_ParseString (*__nsacl_table->f_ACL_ParseString)
-#define ACL_WriteString (*__nsacl_table->f_ACL_WriteString)
-#define ACL_WriteFile (*__nsacl_table->f_ACL_WriteFile)
-#define ACL_FileRenameAcl (*__nsacl_table->f_ACL_FileRenameAcl)
-#define ACL_FileDeleteAcl (*__nsacl_table->f_ACL_FileDeleteAcl)
-#define ACL_FileGetAcl (*__nsacl_table->f_ACL_FileGetAcl)
-#define ACL_FileSetAcl (*__nsacl_table->f_ACL_FileSetAcl)
/* ACL Expression construction interfaces
* These are low-level interfaces that may be useful to those who are not
diff --git a/lib/libaccess/acltools.cpp b/lib/libaccess/acltools.cpp
index 32ab5d748..28df59995 100644
--- a/lib/libaccess/acltools.cpp
+++ b/lib/libaccess/acltools.cpp
@@ -1360,74 +1360,6 @@ Symbol_t *sym;
return( result );
}
-/*
- * Function parses an input ACL file and resturns an
- * ACLListHandle_t pointer that represents the entire
- * file without the comments.
- *
- * Input:
- * filename the name of the target ACL text file
- * errp a pointer to an error stack
- *
- * Returns:
- * NULL parse failed
- *
- */
-
-NSAPI_PUBLIC ACLListHandle_t *
-ACL_ParseFile( NSErr_t *errp, char *filename )
-{
-ACLListHandle_t *handle = NULL;
-int eid = 0;
-int rv = 0;
-char *errmsg;
-
- ACL_InitAttr2Index();
-
- if ( acl_parse_crit == NULL )
- acl_parse_crit = crit_init();
-
- crit_enter( acl_parse_crit );
-
- if ( acl_InitScanner( errp, filename, NULL ) < 0 ) {
- rv = ACLERROPEN;
- eid = ACLERR1900;
- errmsg = system_errmsg();
- nserrGenerate(errp, rv, eid, ACL_Program, 2, filename, errmsg);
- } else {
-
- handle = ACL_ListNew(errp);
- if ( handle == NULL ) {
- rv = ACLERRNOMEM;
- eid = ACLERR1920;
- nserrGenerate(errp, rv, eid, ACL_Program, 0);
- } else if ( acl_PushListHandle( handle ) < 0 ) {
- rv = ACLERRNOMEM;
- eid = ACLERR1920;
- nserrGenerate(errp, rv, eid, ACL_Program, 0);
- } else if ( acl_Parse() ) {
- rv = ACLERRPARSE;
- eid = ACLERR1780;
- }
-
- if ( acl_EndScanner() < 0 ) {
- rv = ACLERROPEN;
- eid = ACLERR1500;
- errmsg = system_errmsg();
- nserrGenerate(errp, rv, eid, ACL_Program, 2, filename, errmsg);
- }
-
- }
-
- if ( rv || eid ) {
- ACL_ListDestroy(errp, handle);
- handle = NULL;
- }
-
- crit_exit( acl_parse_crit );
- return(handle);
-
-}
/*
* Function parses an input ACL string and returns an
@@ -1628,374 +1560,6 @@ char *tmp;
return(0);
}
-/*
- * LOCAL FUNCTION
- *
- * Appends str2 to str1.
- *
- * Input:
- * str1 an existing dynamically allocated string
- * str2 a text string
- * Returns:
- * 0 success
- * < 0 failure
- */
-
-static int
-acl_to_str_append(acl_string_t * p_aclstr, const char *str2)
-{
- int str2len, newlen;
-
- if (p_aclstr == NULL || str2 == NULL)
- return (ACLERRINTERNAL);
- if (p_aclstr->str == NULL) {
- p_aclstr->str = (char *) PERM_MALLOC(4096);
- if (p_aclstr->str == NULL)
- return (ACLERRNOMEM);
- p_aclstr->str_size = 4096;
- p_aclstr->str_len = 0;
- }
-
- str2len = strlen(str2);
- newlen = p_aclstr->str_len + str2len;
- if (newlen >= p_aclstr->str_size) {
- p_aclstr->str_size = str2len > 4095 ? str2len+p_aclstr->str_size+1 : 4096+p_aclstr->str_size ;
- p_aclstr->str = (char *) PERM_REALLOC(p_aclstr->str, p_aclstr->str_size);
- if (p_aclstr->str == NULL)
- return (ACLERRNOMEM);
- }
- memcpy((void *)&(p_aclstr->str[p_aclstr->str_len]), (void *) str2, str2len+1);
- p_aclstr->str_len += str2len;
- return 0;
-}
-
-/*
- * LOCAL FUNCTION
- *
- * Output Authorization Expression type either "Allow" or "Deny"
- */
-
-static int
-acl_to_str_expr_type( acl_string_t *str_t, ACLExprHandle_t *expr )
-{
- switch (expr->expr_type) {
- case ACL_EXPR_TYPE_ALLOW:
- acl_to_str_append(str_t, "allow ");
- if ( IS_ABSOLUTE(expr->expr_flags) )
- acl_to_str_append(str_t, "absolute ");
- return(0);
- case ACL_EXPR_TYPE_DENY:
- acl_to_str_append(str_t, "deny ");
- if ( IS_ABSOLUTE(expr->expr_flags) )
- acl_to_str_append(str_t, "absolute ");
- return(0);
- case ACL_EXPR_TYPE_AUTH:
- acl_to_str_append(str_t, "authenticate ");
- if ( IS_ABSOLUTE(expr->expr_flags) )
- acl_to_str_append(str_t, "absolute ");
- return(0);
- case ACL_EXPR_TYPE_RESPONSE:
- acl_to_str_append(str_t, "deny with ");
- return(0);
- default:
- return(ACLERRINTERNAL);
- }
-}
-
-/*
- * LOCAL FUNCTION
- *
- * Output Authorization Expression Rights "(right, right)"
- */
-
-static int
-acl_to_str_expr_arg( acl_string_t *str_t, ACLExprHandle_t *expr )
-{
-int ii;
-
- if ( expr->expr_argc <= 0 ) {
- return(ACLERRINTERNAL);
- }
-
- if ( expr->expr_type == ACL_EXPR_TYPE_RESPONSE ) {
- acl_to_str_append(str_t, expr->expr_argv[0]);
- acl_to_str_append(str_t, "=\"");
- acl_to_str_append(str_t, expr->expr_argv[1]);
- acl_to_str_append(str_t, "\";\n");
- return(0);
- }
-
- acl_to_str_append(str_t, "(");
- for (ii = 0; ii < expr->expr_argc; ii++) {
- acl_to_str_append(str_t, expr->expr_argv[ii]);
- if ( ii < expr->expr_argc - 1 ) {
- acl_to_str_append(str_t, ",");
- }
- }
- acl_to_str_append(str_t, ") ");
-
- return(0);
-}
-
-/*
- * LOCAL FUNCTION
- *
- * Walks through the authentication statement PList_t and
- * prints the structure to a string.
- */
-
-static void
-acl_to_str_auth_expr(char *lval, const void *rval, void *user_data)
-{
- // ###### char **str = (char **) user_data;
- acl_string_t * p_aclstr = (acl_string_t *) user_data;
-
- acl_to_str_append(p_aclstr, "\t");
- acl_to_str_append(p_aclstr, lval);
- acl_to_str_append(p_aclstr, " = \"");
- acl_to_str_append(p_aclstr, (char *) rval);
- acl_to_str_append(p_aclstr, "\";\n");
-
- return;
-}
-
-/*
- * LOCAL FUNCTION
- *
- * Output the logic part of the authencation statement to a string.
- */
-
-static int
-acl_to_str_auth_logic( acl_string_t *str_t, ACLExprHandle_t *expr)
-{
-
- if ( expr->expr_auth == NULL ) {
- acl_to_str_append(str_t, "{\n");
- acl_to_str_append(str_t, "# Authenticate statement with no body?\n");
- acl_to_str_append(str_t, "\tnull=null;\n");
- acl_to_str_append(str_t, "};\n");
- return(0);
- }
-
- acl_to_str_append(str_t, "{\n");
- PListEnumerate(expr->expr_auth, acl_to_str_auth_expr, (void *) str_t);
- acl_to_str_append(str_t, "};\n");
-
- return(0);
-}
-
-
-/*
- * LOCAL FUNCTION
- *
- * Output the logic part of the authorization statement to a string.
- */
-
-static int
-acl_to_str_expr_logic( acl_string_t *str_t, ACLExprHandle_t *expr, ACLExprStack_t *expr_stack)
-{
-int rv = 0;
-int ii;
-
- expr_stack->stack_index = 0;
- expr_stack->found_subexpression = 0;
- expr_stack->last_subexpression = -1;
-
- for (ii = 0; ii < expr->expr_raw_index; ii++) {
- rv = acl_reduce_expr_logic(expr_stack, &expr->expr_raw[ii]);
- if (rv) break;
- }
-
- if (!rv && expr_stack->expr_text[0]) {
- acl_to_str_append(str_t, "\n ");
- acl_to_str_append(str_t, expr_stack->expr_text[0]);
- acl_to_str_append(str_t, ";\n");
- PERM_FREE(expr_stack->expr_text[0]);
- }
-
- return(rv);
-}
-
-/*
- * LOCAL FUNCTION
- *
- * Output an ACL list to a string.
- */
-
-static int
-acl_to_str_create( acl_string_t *str_t, ACLListHandle_t *acl_list )
-{
-ACLWrapper_t *wrap;
-ACLHandle_t *acl;
-ACLExprHandle_t *expr;
-int rv = 0;
-ACLExprStack_t *expr_stack;
-
- expr_stack = (ACLExprStack_t *) PERM_MALLOC(sizeof(ACLExprStack_t));
- if ( expr_stack == NULL )
- return(ACLERRNOMEM);
-
- acl_to_str_append(str_t, "# File automatically written\n");
- acl_to_str_append(str_t, "#\n");
- acl_to_str_append(str_t, "# You may edit this file by hand\n");
- acl_to_str_append(str_t, "#\n\n");
- if ( acl_list->acl_list_head == NULL ) {
- PERM_FREE(expr_stack);
- return(0);
- }
-
- acl_to_str_append(str_t, "version 3.0;\n");
- for (wrap = acl_list->acl_list_head; wrap && !rv;
- wrap = wrap->wrap_next ) {
- acl = wrap->acl;
- if ( acl->tag ) {
- acl_to_str_append(str_t, "\nacl \"");
- acl_to_str_append(str_t, acl->tag);
- acl_to_str_append(str_t, "\";\n");
- } else {
- acl_to_str_append(str_t, "\nacl;\n");
- }
-
- for (expr = acl->expr_list_head; expr && rv == 0;
- expr = expr->expr_next ) {
-
- if ( (rv = acl_to_str_expr_type(str_t, expr)) < 0 )
- break;
-
- if ( (rv = acl_to_str_expr_arg(str_t, expr)) < 0)
- break;
-
- switch (expr->expr_type) {
- case ACL_EXPR_TYPE_DENY:
- case ACL_EXPR_TYPE_ALLOW:
- rv = acl_to_str_expr_logic(str_t, expr, expr_stack);
- break;
- case ACL_EXPR_TYPE_AUTH:
- rv = acl_to_str_auth_logic(str_t, expr);
- break;
- case ACL_EXPR_TYPE_RESPONSE:
- break;
- }
-
- }
- }
-
- PERM_FREE(expr_stack);
- return(rv);
-}
-
-
-/*
- * Creates an ACL text string from an ACL handle
- *
- * Input:
- * errp error stack
- * acl target text string pointer
- * acl_list Source ACL list handle
- * Ouput:
- * acl a chunk of dynamic memory pointing to ACL text
- * Returns:
- * 0 success
- * < 0 failure
- */
-
-NSAPI_PUBLIC int
-ACL_WriteString(NSErr_t *errp, char **acl, ACLListHandle_t *acl_list)
-{
- int rv;
- acl_string_t str_t = {NULL,0,0};
-
- if ( acl_list == NULL || acl == NULL )
- return(ACLERRUNDEF);
-
- rv = acl_to_str_create(&str_t, acl_list);
- *acl = str_t.str;
-
- return ( rv );
-}
-
-/*
- * Write an ACL text file from an input ACL list structure.
- *
- * Input:
- * filename name for the output text file
- * acl_list a list of ACLs to convert to text
- * Output:
- * errp an error stack, set if there are errors
- * to report
- * Returns:
- * 0 success
- * ACLERROPEN,
- * ACLERRNOMEM on failure
- */
-
-NSAPI_PUBLIC int
-ACL_WriteFile( NSErr_t *errp, char *filename, ACLListHandle_t *acl_list )
-{
-int rv;
-int eid;
-char *errmsg;
-#ifdef UTEST
-FILE *ofp;
-#else
-SYS_FILE ofp;
-#endif
-acl_string_t aclstr = {NULL,0,0};
-char *acl_text = NULL;
-
- if ( filename == NULL || acl_list == NULL ) {
- rv = ACLERROPEN;
- eid = ACLERR1900;
- errmsg = system_errmsg();
- nserrGenerate(errp, rv, eid, ACL_Program, 2, filename, errmsg);
- return(ACLERROPEN);
- }
-
-#ifdef UTEST
- ofp = fopen(filename, "w");
- if ( ofp == NULL ) {
-#else
- ofp = system_fopenWT(filename);
- if ( ofp == SYS_ERROR_FD ) {
-#endif
- rv = ACLERROPEN;
- eid = ACLERR1900;
- errmsg = system_errmsg();
- nserrGenerate(errp, rv, eid, ACL_Program, 2, filename, errmsg);
- return(ACLERROPEN);
- }
-
- rv = acl_to_str_create(&aclstr, acl_list);
- acl_text = aclstr.str;
-
- if ( rv ) {
- eid = ACLERR3000;
- rv = ACLERRNOMEM;
- nserrGenerate(errp, rv, eid, ACL_Program, 0);
- } else {
-#ifdef UTEST
- if (fputs(acl_text, ofp) == 0) {
-#else
- if (system_fwrite_atomic(ofp, acl_text, strlen(acl_text))==IO_ERROR) {
-#endif
- eid = ACLERR3200;
- rv = ACLERRIO;
- errmsg = system_errmsg();
- nserrGenerate(errp, rv, eid, ACL_Program, 2, filename, errmsg);
- }
- }
-
- if ( acl_text )
- PERM_FREE(acl_text);
-
-#ifdef UTEST
- fclose(ofp);
-#else
- system_fclose(ofp);
-#endif
-
- return(rv);
-}
/*
* Delete a named ACL from an ACL list
@@ -2456,211 +2020,6 @@ ACL_ListPostParseForAuth(NSErr_t *errp, ACLListHandle_t *acl_list )
}
-/*
- * LOCAL FUNCTION
- *
- * Output Authorization Expression Rights "right, right"
- */
-
-static int
-acl_decompose_expr_arg( acl_string_t *str_t, ACLExprHandle_t *expr )
-{
-int ii;
-
- if ( expr->expr_argc <= 0 ) {
- return(ACLERRINTERNAL);
- }
-
- if ( expr->expr_type == ACL_EXPR_TYPE_RESPONSE ) {
- acl_to_str_append(str_t, expr->expr_argv[0]);
- acl_to_str_append(str_t, " \"");
- acl_to_str_append(str_t, expr->expr_argv[1]);
- acl_to_str_append(str_t, "\";\n");
- return(0);
- }
-
- for (ii = 0; ii < expr->expr_argc; ii++) {
- acl_to_str_append(str_t, expr->expr_argv[ii]);
- if ( ii < expr->expr_argc - 1 ) {
- acl_to_str_append(str_t, ",");
- }
- }
- acl_to_str_append(str_t, ";\n");
-
- return(0);
-}
-
-/*
- * LOCAL FUNCTION
- *
- * Walks through the authentication statement PList_t and
- * prints the structure to a string.
- */
-
-static void
-acl_decompose_auth_expr(char *lval, const void *rval, void *user_data)
-{
- acl_string_t * p_aclstr = (acl_string_t *) user_data;
- // ####
-
- acl_to_str_append(p_aclstr, " ");
- acl_to_str_append(p_aclstr, lval);
- acl_to_str_append(p_aclstr, "=\"");
- acl_to_str_append(p_aclstr, (char *) rval);
- acl_to_str_append(p_aclstr, "\"");
-
- return;
-}
-
-/*
- * LOCAL FUNCTION
- *
- * Output the logic part of the authencation statement to a string.
- */
-
-static int
-acl_decompose_auth_logic( acl_string_t * str_t, ACLExprHandle_t *expr)
-{
-
- if ( expr->expr_auth == NULL )
- return(0);
-
- acl_to_str_append(str_t, "exprs");
- PListEnumerate(expr->expr_auth, acl_decompose_auth_expr, (void *) str_t);
- acl_to_str_append(str_t, ";\n");
-
- return(0);
-}
-
-/*
- * LOCAL FUNCTION
- *
- * Output the logic part of the authorization statement to a string.
- */
-
-static int
-acl_decompose_expr_logic( acl_string_t *str_t, ACLExprHandle_t *expr, ACLExprStack_t *expr_stack)
-{
-int rv = 0;
-int ii;
-
- expr_stack->stack_index = 0;
- expr_stack->found_subexpression = 0;
- expr_stack->last_subexpression = -1;
-
- for (ii = 0; ii < expr->expr_raw_index; ii++) {
- rv = acl_reduce_expr_logic(expr_stack, &expr->expr_raw[ii]);
- if (rv) break;
- }
-
- if (!rv && expr_stack->expr_text[0]) {
- acl_to_str_append(str_t, "exprs ");
- acl_to_str_append(str_t, expr_stack->expr_text[0]);
- acl_to_str_append(str_t, ";\n");
- PERM_FREE(expr_stack->expr_text[0]);
- }
-
- return(rv);
-}
-
-static int
-acl_decompose(acl_string_t *str_t, ACLListHandle_t *acl_list)
-{
-ACLWrapper_t *wrap;
-ACLHandle_t *acl;
-ACLExprHandle_t *expr;
-int rv = 0;
-ACLExprStack_t *expr_stack;
-
- expr_stack = (ACLExprStack_t *) PERM_MALLOC(sizeof(ACLExprStack_t));
- if ( expr_stack == NULL )
- return(ACLERRNOMEM);
-
- if ( acl_list->acl_list_head == NULL ) {
- PERM_FREE(expr_stack);
- return(0);
- }
-
- acl_to_str_append(str_t, "version 3.0;");
- for (wrap = acl_list->acl_list_head; wrap && !rv;
- wrap = wrap->wrap_next ) {
- acl = wrap->acl;
- if ( acl->tag ) {
- acl_to_str_append(str_t, "\nname \"");
- acl_to_str_append(str_t, acl->tag);
- acl_to_str_append(str_t, "\";\n");
- } else {
- acl_to_str_append(str_t, "\nname;\n");
- }
-
- for (expr = acl->expr_list_head; expr && rv == 0;
- expr = expr->expr_next ) {
-
- switch (expr->expr_type) {
- case ACL_EXPR_TYPE_DENY:
- acl_to_str_append(str_t, "type deny;\nrights ");
- if ( (rv = acl_decompose_expr_arg(str_t, expr)) < 0 )
- break;
- if ( IS_ABSOLUTE(expr->expr_flags) )
- acl_to_str_append(str_t, "absolute true;\n");
- rv = acl_decompose_expr_logic(str_t, expr, expr_stack);
- break;
- case ACL_EXPR_TYPE_ALLOW:
- acl_to_str_append(str_t, "type allow;\nrights ");
- if ( (rv = acl_decompose_expr_arg(str_t, expr)) < 0 )
- break;
- if ( IS_ABSOLUTE(expr->expr_flags) )
- acl_to_str_append(str_t, "absolute true;\n");
- rv = acl_decompose_expr_logic(str_t, expr, expr_stack);
- break;
- case ACL_EXPR_TYPE_AUTH:
- acl_to_str_append(str_t, "type authenticate;\nattrs ");
- if ( (rv = acl_decompose_expr_arg(str_t, expr)) < 0 )
- break;
- if ( IS_ABSOLUTE(expr->expr_flags) )
- acl_to_str_append(str_t, "absolute true;\n");
- rv = acl_decompose_auth_logic(str_t, expr);
- break;
- case ACL_EXPR_TYPE_RESPONSE:
- acl_to_str_append(str_t, "type response;\nattrs ");
- rv = acl_decompose_expr_arg(str_t, expr);
- break;
- }
- }
- }
-
- PERM_FREE(expr_stack);
- return(rv);
-}
-
-/*
- * Converts an ACLListHandle_t to a parameter list suitable for passing
- * to the ACL UI.
- *
- * Input:
- * errp error stack
- * acl a pointer to a string, holds the result of the
- * decomposition.
- * acl_list Target ACL list handle
- * Returns:
- * 0 success
- * < 0 failure
- */
-
-NSAPI_PUBLIC int
-ACL_Decompose(NSErr_t *errp, char **acl, ACLListHandle_t *acl_list)
-{
- int rv ;
- acl_string_t aclstr={NULL,0,0};
-
- if ( acl_list == NULL || acl == NULL )
- return(ACLERRUNDEF);
-
- rv = acl_decompose(&aclstr, acl_list);
- *acl = aclstr.str;
-
- return ( rv );
-}
/*
* The following routines are used to validate input parameters. They always
diff --git a/lib/libaccess/oneeval.cpp b/lib/libaccess/oneeval.cpp
index ed29ee47a..f3283b699 100644
--- a/lib/libaccess/oneeval.cpp
+++ b/lib/libaccess/oneeval.cpp
@@ -137,10 +137,10 @@ static ACLDispatchVector_t __nsacl_vector = {
/* ACL language and file interfaces */
- ACL_ParseFile,
+ NULL /* ex ACL_ParseFile*/,
ACL_ParseString,
- ACL_WriteString,
- ACL_WriteFile,
+ NULL /* ex ACL_WriteString*/,
+ NULL /* ex ACL_WriteFile */,
NULL /* ex ACL_FileRenameAcl */,
NULL /* ex ACL_FileDeleteAcl */,
NULL /* ex ACL_FileGetAcl */,
| 0 |
b377d7da2b03fcbc43abaf2195b418debde27ce4
|
389ds/389-ds-base
|
Ticket #47701 - Make retro changelog trim interval programmable
Description: Fixing 3 syntax errors introduced by commit
bb4f0c428f9e53bccb875a552f5cae1ee6f733be
|
commit b377d7da2b03fcbc43abaf2195b418debde27ce4
Author: Noriko Hosoi <[email protected]>
Date: Tue Feb 18 13:26:39 2014 -0800
Ticket #47701 - Make retro changelog trim interval programmable
Description: Fixing 3 syntax errors introduced by commit
bb4f0c428f9e53bccb875a552f5cae1ee6f733be
diff --git a/ldap/servers/plugins/retrocl/retrocl_trim.c b/ldap/servers/plugins/retrocl/retrocl_trim.c
index f8c6f5194..cb18889ae 100644
--- a/ldap/servers/plugins/retrocl/retrocl_trim.c
+++ b/ldap/servers/plugins/retrocl/retrocl_trim.c
@@ -506,19 +506,19 @@ void retrocl_init_trimming (void)
return;
}
ageval = age_str2time (cl_maxage);
- slapi_ch_free_string(&cl_maxage);
+ slapi_ch_free_string((char **)&cl_maxage);
if (cl_trim_interval) {
trim_interval = strtol(cl_trim_interval, (char **)NULL, 10);
if (0 == trim_interval) {
slapi_log_error(SLAPI_LOG_FATAL, RETROCL_PLUGIN_NAME,
"retrocl_init_trimming: ignoring invalid %s value %s; "
- "resetting the default %s\n",
+ "resetting the default %d\n",
CONFIG_CHANGELOG_TRIM_INTERVAL, cl_trim_interval,
DEFAULT_CHANGELOGDB_TRIM_INTERVAL);
trim_interval = DEFAULT_CHANGELOGDB_TRIM_INTERVAL;
}
- slapi_ch_free_string(&cl_trim_interval);
+ slapi_ch_free_string((char **)&cl_trim_interval);
}
ts.ts_c_max_age = ageval;
| 0 |
6647fba182179bde0ac3e7e6f89e146ccca180a6
|
389ds/389-ds-base
|
Issue 49312 - Added a new test case for "-D configdir"
Bug Description: "pwdhash -D configdir" uses the DS default hashing algorithm.
Fix Description: Add a test case that tests the change of default pwdhash scheme.
https://pagure.io/389-ds-base/issue/49312
Reviewed by: wibrown, spichugi
Signed-off-by: Simon Pichugin <[email protected]>
|
commit 6647fba182179bde0ac3e7e6f89e146ccca180a6
Author: Akshay Adhikari <[email protected]>
Date: Thu Jan 4 17:14:53 2018 +0530
Issue 49312 - Added a new test case for "-D configdir"
Bug Description: "pwdhash -D configdir" uses the DS default hashing algorithm.
Fix Description: Add a test case that tests the change of default pwdhash scheme.
https://pagure.io/389-ds-base/issue/49312
Reviewed by: wibrown, spichugi
Signed-off-by: Simon Pichugin <[email protected]>
diff --git a/dirsrvtests/tests/suites/clu/clu_test.py b/dirsrvtests/tests/suites/clu/clu_test.py
index cec17623f..ea2cb4ba7 100644
--- a/dirsrvtests/tests/suites/clu/clu_test.py
+++ b/dirsrvtests/tests/suites/clu/clu_test.py
@@ -6,7 +6,8 @@
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
#
-
+import time
+import subprocess
import pytest
from lib389.tasks import *
from lib389.utils import *
@@ -37,7 +38,6 @@ def test_clu_pwdhash(topology_st):
log.info('Running test_clu_pwdhash...')
cmd = '%s -s ssha testpassword' % os.path.join(topology_st.standalone.get_bin_dir(), 'pwdhash')
-
p = os.popen(cmd)
result = p.readline()
p.close()
@@ -49,14 +49,45 @@ def test_clu_pwdhash(topology_st):
if len(result) < 20:
log.fatal('test_clu_pwdhash: Encrypted password is too short')
assert False
-
log.info('pwdhash generated: ' + result)
log.info('test_clu_pwdhash: PASSED')
+def test_clu_pwdhash_mod(topology_st):
+ """Test the pwdhash script output with -D configdir
+
+ :id: 874ab5e2-207b-4a95-b4c0-22d97b8ab643
+
+ :setup: Standalone instance
+
+ :steps:
+ 1. Set nsslapd-rootpwstoragescheme & passwordStorageScheme to SSHA256 & SSHA384 respectively
+ 2. Execute /usr/bin/pwdhash -D /etc/dirsrv/slapd-instance_name/ <password>
+ 3. Check if there is any output
+ 4. Check if the command returns the hashed string using the algorithm set in nsslapd-rootpwstoragescheme
+
+ :expectedresults:
+ 1. nsslapd-rootpwstoragescheme & passwordStorageScheme should set to SSHA256 & SSHA384 respectively
+ 2. Execution should PASS
+ 3. There should be an output from the command
+ 4. Command should return the hashed string using the algorithm set in nsslapd-rootpwstoragescheme
+ """
+
+ log.info('Running test_clu_pwdhash_mod...')
+ topology_st.standalone.config.set('nsslapd-rootpwstoragescheme', 'SSHA256')
+ topology_st.standalone.config.set('passwordStorageScheme', 'SSHA384')
+ cmd = [os.path.join(topology_st.standalone.get_bin_dir(), 'pwdhash'), '-D', '/etc/dirsrv/slapd-standalone1',
+ 'password']
+ result = subprocess.check_output(cmd)
+ stdout = ensure_str(result)
+ assert result, "Failed to run pwdhash"
+ assert 'SSHA256' in stdout
+ log.info('pwdhash generated: ' + stdout)
+ log.info('returned the hashed string using the algorithm set in nsslapd-rootpwstoragescheme')
+
+
if __name__ == '__main__':
# Run isolated
# -s for DEBUG mode
CURRENT_FILE = os.path.realpath(__file__)
pytest.main("-s %s" % CURRENT_FILE)
-
| 0 |
34fe20be0a1ea40e2fb5325f690683b260eae32f
|
389ds/389-ds-base
|
Resolves: bug 447353
Summary: RFE: search optimization and single character substring searches
Fix Description: missing close comment
|
commit 34fe20be0a1ea40e2fb5325f690683b260eae32f
Author: Rich Megginson <[email protected]>
Date: Tue Aug 5 21:03:42 2008 +0000
Resolves: bug 447353
Summary: RFE: search optimization and single character substring searches
Fix Description: missing close comment
diff --git a/ldap/servers/slapd/back-ldbm/back-ldbm.h b/ldap/servers/slapd/back-ldbm/back-ldbm.h
index 6dcca5b45..d589f0d6e 100644
--- a/ldap/servers/slapd/back-ldbm/back-ldbm.h
+++ b/ldap/servers/slapd/back-ldbm/back-ldbm.h
@@ -414,6 +414,7 @@ struct attrinfo {
* an index instance (dse.ldif), the substr key
* len value(s) are stored here. If not specified,
* the default length triplet is 2, 3, 2.
+ */
};
#define MAXDBCACHE 20
| 0 |
836e84b385260e72b38b10fa55b7fbc026caecb8
|
389ds/389-ds-base
|
Issue 4093 - Fix MEP test case
Bug Description: Once some compiler warnings were fixed it
accidentally fixed the modrdn behavior. Previously
the modrdn code accidentally ignored errors that the
test case was taking for granted. Once these checks
were properly inforced the teset case started to fail.
Fix Description: Revise test case to "properly" check modrdn operations
by creating the Managed Entry before assignign it to
an entry, and then check for the revise managhed entry
DN after the modrdn takes place.
Also, improved CI debugging logging settings
relates: https://github.com/389ds/389-ds-base/issues/4093
Reviewed by: spichugi(Thanks!)
|
commit 836e84b385260e72b38b10fa55b7fbc026caecb8
Author: Mark Reynolds <[email protected]>
Date: Fri Jun 11 16:29:13 2021 -0400
Issue 4093 - Fix MEP test case
Bug Description: Once some compiler warnings were fixed it
accidentally fixed the modrdn behavior. Previously
the modrdn code accidentally ignored errors that the
test case was taking for granted. Once these checks
were properly inforced the teset case started to fail.
Fix Description: Revise test case to "properly" check modrdn operations
by creating the Managed Entry before assignign it to
an entry, and then check for the revise managhed entry
DN after the modrdn takes place.
Also, improved CI debugging logging settings
relates: https://github.com/389ds/389-ds-base/issues/4093
Reviewed by: spichugi(Thanks!)
diff --git a/dirsrvtests/tests/suites/plugins/managed_entry_test.py b/dirsrvtests/tests/suites/plugins/managed_entry_test.py
index 662044ccd..8ecadb24f 100644
--- a/dirsrvtests/tests/suites/plugins/managed_entry_test.py
+++ b/dirsrvtests/tests/suites/plugins/managed_entry_test.py
@@ -9,8 +9,8 @@
import pytest
import time
from lib389.topologies import topology_st as topo
-from lib389.idm.user import UserAccount, UserAccounts
-from lib389.idm.account import Account, Accounts
+from lib389.idm.user import UserAccounts
+from lib389.idm.account import Account
from lib389._constants import DEFAULT_SUFFIX
from lib389.idm.group import Groups
from lib389.config import Config
@@ -18,7 +18,6 @@ from lib389.idm.organizationalunit import OrganizationalUnits, OrganizationalUni
from lib389.plugins import MEPTemplates, MEPConfigs, ManagedEntriesPlugin, MEPTemplate
from lib389.idm.nscontainer import nsContainers
from lib389.idm.domain import Domain
-from lib389.tasks import Entry
import ldap
pytestmark = pytest.mark.tier1
@@ -112,8 +111,7 @@ class WithObjectClass(Account):
self._rdn_attribute = 'uid'
self._create_objectclasses = ['top', 'person', 'inetorgperson']
-#unstable or unstatus tests, skipped for now
[email protected](max_runs=2, min_passes=1)
+
def test_mentry01(topo, _create_inital):
"""Test Managed Entries basic functionality
@@ -155,6 +153,7 @@ def test_mentry01(topo, _create_inital):
# Check the plug-in status
mana = ManagedEntriesPlugin(topo.standalone)
assert mana.status()
+
# Add Template and definition entry
org1 = OrganizationalUnits(topo.standalone, DEFAULT_SUFFIX).create(properties={'ou': 'Users'})
org2 = OrganizationalUnit(topo.standalone, f'ou=Groups,{DEFAULT_SUFFIX}')
@@ -165,29 +164,35 @@ def test_mentry01(topo, _create_inital):
'mepStaticAttr': 'objectclass: posixGroup',
'mepMappedAttr': 'cn: $uid|gidNumber: $gidNumber|description: User private group for $uid'.split('|')})
conf_mep = MEPConfigs(topo.standalone)
- mep_config = conf_mep.create(properties={
+ conf_mep.create(properties={
'cn': 'UPG Definition2',
'originScope': org1.dn,
'originFilter': 'objectclass=posixaccount',
'managedBase': org2.dn,
'managedTemplate': mep_template1.dn})
+
# Add users with PosixAccount ObjectClass and verify creation of User Private Group
user = UserAccounts(topo.standalone, f'ou=Users,{DEFAULT_SUFFIX}', rdn=None).create_test_user()
assert user.get_attr_val_utf8('mepManagedEntry') == f'cn=test_user_1000,ou=Groups,{DEFAULT_SUFFIX}'
+
# Disable the plug-in and check the status
mana.disable()
user.delete()
topo.standalone.restart()
+
# Add users with PosixAccount ObjectClass when the plug-in is disabled and creation of UPG should fail
user = UserAccounts(topo.standalone, f'ou=Users,{DEFAULT_SUFFIX}', rdn=None).create_test_user()
assert not user.get_attr_val_utf8('mepManagedEntry')
+
# Enable the plug-in and check the status
mana.enable()
user.delete()
topo.standalone.restart()
+
# Add users with PosixAccount ObjectClass and verify creation of User Private Group
user = UserAccounts(topo.standalone, f'ou=Users,{DEFAULT_SUFFIX}', rdn=None).create_test_user()
assert user.get_attr_val_utf8('mepManagedEntry') == f'cn=test_user_1000,ou=Groups,{DEFAULT_SUFFIX}'
+
# Add users, run ModRDN operation and check the User Private group
# Add users, run LDAPMODIFY to change the gidNumber and check the User Private group
user.rename(new_rdn='uid=UserNewRDN', newsuperior='ou=Users,dc=example,dc=com')
@@ -198,18 +203,21 @@ def test_mentry01(topo, _create_inital):
user.replace_many(('sn', 'new_modified_sn'), ('gidNumber', '31309'))
assert entry.get_attr_val_utf8('gidNumber') == '31309'
user.delete()
+
# Checking whether creation of User Private group fails for existing group entry
- grp = Groups(topo.standalone, f'ou=Groups,{DEFAULT_SUFFIX}', rdn=None).create(properties={'cn': 'MENTRY_14'})
+ Groups(topo.standalone, f'ou=Groups,{DEFAULT_SUFFIX}', rdn=None).create(properties={'cn': 'MENTRY_14'})
user = UserAccounts(topo.standalone, f'ou=Users,{DEFAULT_SUFFIX}', rdn=None).create_test_user()
with pytest.raises(ldap.NO_SUCH_OBJECT):
entry.status()
user.delete()
+
# Checking whether adding of posixAccount objectClass to existing user creates UPG
# Add Users without posixAccount objectClass
users = WithObjectClass(topo.standalone, f'uid=test_test, ou=Users,{DEFAULT_SUFFIX}')
user_properties1 = {'uid': 'test_test', 'cn': 'test', 'sn': 'test', 'mail': '[email protected]', 'telephoneNumber': '123'}
user = users.create(properties=user_properties1)
assert not user.get_attr_val_utf8('mepManagedEntry')
+
# Add posixAccount objectClass
user.replace_many(('objectclass', ['top', 'person', 'inetorgperson', 'posixAccount']),
('homeDirectory', '/home/ok'),
@@ -217,6 +225,7 @@ def test_mentry01(topo, _create_inital):
assert not user.get_attr_val_utf8('mepManagedEntry')
user = UserAccounts(topo.standalone, f'ou=Users,{DEFAULT_SUFFIX}', rdn=None).create_test_user()
entry = Account(topo.standalone, 'cn=test_user_1000,ou=Groups,dc=example,dc=com')
+
# Add inetuser objectClass
user.replace_many(
('objectclass', ['top', 'account', 'posixaccount', 'inetOrgPerson',
@@ -227,6 +236,7 @@ def test_mentry01(topo, _create_inital):
user.delete()
user = UserAccounts(topo.standalone, f'ou=Users,{DEFAULT_SUFFIX}', rdn=None).create_test_user()
entry = Account(topo.standalone, 'cn=test_user_1000,ou=Groups,dc=example,dc=com')
+
# Add groupofNames objectClass
user.replace_many(
('objectclass', ['top', 'account', 'posixaccount', 'inetOrgPerson',
@@ -234,18 +244,34 @@ def test_mentry01(topo, _create_inital):
'person', 'mepOriginEntry', 'groupofNames']),
('memberOf', user.dn))
assert entry.status()
- # Running ModRDN operation and checking the user private groups mepManagedBy attribute
- user.replace('mepManagedEntry', f'uid=CheckModRDN,ou=Users,{DEFAULT_SUFFIX}')
+
+ # Running ModRDN operation and checking the user private groups mepManagedBy
+ # attribute was also reset because the modrdn on the origin will do a modrdn
+ # on checkManagedEntry to match the new rdn value of the origin entry
+ checkManagedEntry = UserAccounts(topo.standalone, f'ou=Groups,{DEFAULT_SUFFIX}', rdn=None)
+ check_entry = checkManagedEntry.create(properties={
+ 'objectclass': ['top', 'extensibleObject'],
+ 'uid': 'CheckModRDN',
+ 'uidNumber': '12',
+ 'gidNumber': '12',
+ 'homeDirectory': '/home',
+ 'sn': 'tmp',
+ 'cn': 'tmp',
+ })
+ user.replace('mepManagedEntry', check_entry.dn)
user.rename(new_rdn='uid=UserNewRDN', newsuperior='ou=Users,dc=example,dc=com')
- assert user.get_attr_val_utf8('mepManagedEntry') == f'uid=CheckModRDN,ou=Users,{DEFAULT_SUFFIX}'
+ assert user.get_attr_val_utf8_l('mepManagedEntry') == f'cn=UserNewRDN,ou=Groups,{DEFAULT_SUFFIX}'.lower()
+
# Deleting mepManagedBy attribute and running ModRDN operation to check if it creates a new UPG
- user.remove('mepManagedEntry', f'uid=CheckModRDN,ou=Users,{DEFAULT_SUFFIX}')
+ user.remove('mepManagedEntry', f'cn=UserNewRDN,ou=Groups,{DEFAULT_SUFFIX}')
user.rename(new_rdn='uid=UserNewRDN1', newsuperior='ou=Users,dc=example,dc=com')
assert user.get_attr_val_utf8('mepManagedEntry') == f'cn=UserNewRDN1,ou=Groups,{DEFAULT_SUFFIX}'
+
# Change the RDN of template entry, DSA Unwilling to perform error expected
mep = MEPTemplate(topo.standalone, f'cn=UPG Template,{DEFAULT_SUFFIX}')
with pytest.raises(ldap.UNWILLING_TO_PERFORM):
mep.rename(new_rdn='cn=UPG Template2', newsuperior='dc=example,dc=com')
+
# Change the RDN of cn=Users to cn=TestUsers and check UPG are deleted
before = user.get_attr_val_utf8('mepManagedEntry')
user.rename(new_rdn='uid=Anuj', newsuperior='ou=Users,dc=example,dc=com')
diff --git a/ldap/servers/plugins/replication/repl5_plugins.c b/ldap/servers/plugins/replication/repl5_plugins.c
index 248e2e56c..2d9c7091c 100644
--- a/ldap/servers/plugins/replication/repl5_plugins.c
+++ b/ldap/servers/plugins/replication/repl5_plugins.c
@@ -882,8 +882,6 @@ copy_operation_parameters(Slapi_PBlock *pb)
/* we are only interested in the updates to replicas */
if (NULL == replica) {
- slapi_log_err(SLAPI_LOG_REPL, REPLICATION_SUBSYSTEM,
- "copy_operation_parameters - replica is null.\n");
return;
}
/* we only save the original operation parameters for replicated operations
diff --git a/src/lib389/lib389/topologies.py b/src/lib389/lib389/topologies.py
index 6a327f4c4..fb4121003 100644
--- a/src/lib389/lib389/topologies.py
+++ b/src/lib389/lib389/topologies.py
@@ -105,7 +105,10 @@ def _create_instances(topo_dict, suffix):
if DEBUGGING:
instance.config.set('nsslapd-accesslog-logbuffering','off')
instance.config.set('nsslapd-errorlog-level','8192')
+ instance.config.set('nsslapd-accesslog-level','260')
instance.config.set('nsslapd-auditlog-logging-enabled','on')
+ instance.config.set('nsslapd-auditfaillog-logging-enabled','on')
+ instance.config.set('nsslapd-plugin-logging', 'on')
log.info("Instance with parameters {} was created.".format(args_instance))
if "standalone1" in instances and len(instances) == 1:
| 0 |
e80ce4e03d91c386fc11135bf459e67f85afd7de
|
389ds/389-ds-base
|
Resolves: 311851
Summary: Remove hard-coded SASL mappings and use config based regex mappings instead.
|
commit e80ce4e03d91c386fc11135bf459e67f85afd7de
Author: Nathan Kinder <[email protected]>
Date: Fri Sep 28 23:34:55 2007 +0000
Resolves: 311851
Summary: Remove hard-coded SASL mappings and use config based regex mappings instead.
diff --git a/ldap/admin/src/scripts/DSCreate.pm.in b/ldap/admin/src/scripts/DSCreate.pm.in
index 0c43f7274..c42158a80 100644
--- a/ldap/admin/src/scripts/DSCreate.pm.in
+++ b/ldap/admin/src/scripts/DSCreate.pm.in
@@ -295,7 +295,8 @@ sub createConfigFile {
}
my @ldiffiles = ("$inf->{General}->{prefix}@templatedir@/template-dse.ldif",
- "$inf->{General}->{prefix}@templatedir@/template-suffix-db.ldif");
+ "$inf->{General}->{prefix}@templatedir@/template-suffix-db.ldif",
+ "$inf->{General}->{prefix}@templatedir@/template-sasl.ldif");
if ("@enable_pam_passthru@") {
push @ldiffiles, "$inf->{General}->{prefix}@templatedir@/template-pampta.ldif";
}
diff --git a/ldap/ldif/template-sasl.ldif.in b/ldap/ldif/template-sasl.ldif.in
new file mode 100644
index 000000000..0455a956c
--- /dev/null
+++ b/ldap/ldif/template-sasl.ldif.in
@@ -0,0 +1,32 @@
+# replace the Suffix token with your suffix e.g. dc=example,dc=com
+dn: cn=Kerberos uid mapping,cn=mapping,cn=sasl,cn=config
+objectClass: top
+objectClass: nsSaslMapping
+cn: Kerberos uid mapping
+nsSaslMapRegexString: \(.*\)@\(.*\)\.\(.*\)
+nsSaslMapBaseDNTemplate: dc=\2,dc=\3
+nsSaslMapFilterTemplate: (uid=\1)
+
+dn: cn=rfc 2829 dn syntax,cn=mapping,cn=sasl,cn=config
+objectClass: top
+objectClass: nsSaslMapping
+cn: rfc 2829 dn syntax
+nsSaslMapRegexString: ^dn:\(.*\)
+nsSaslMapBaseDNTemplate: \1
+nsSaslMapFilterTemplate: (objectclass=*)
+
+dn: cn=rfc 2829 u syntax,cn=mapping,cn=sasl,cn=config
+objectClass: top
+objectClass: nsSaslMapping
+cn: rfc 2829 u syntax
+nsSaslMapRegexString: ^u:\(.*\)
+nsSaslMapBaseDNTemplate: %ds_suffix%
+nsSaslMapFilterTemplate: (uid=\1)
+
+dn: cn=uid mapping,cn=mapping,cn=sasl,cn=config
+objectClass: top
+objectClass: nsSaslMapping
+cn: uid mapping
+nsSaslMapRegexString: ^[^:@]+$
+nsSaslMapBaseDNTemplate: %ds_suffix%
+nsSaslMapFilterTemplate: (uid=&)
diff --git a/ldap/servers/slapd/saslbind.c b/ldap/servers/slapd/saslbind.c
index f7435ce46..cd0ef62eb 100644
--- a/ldap/servers/slapd/saslbind.c
+++ b/ldap/servers/slapd/saslbind.c
@@ -290,115 +290,55 @@ static Slapi_Entry *ids_sasl_user_to_entry(
)
{
int found = 0;
- unsigned fsize = 0, ulen, rlen = 0;
int attrsonly = 0, scope = LDAP_SCOPE_SUBTREE;
- char filter[1024], *fptr = filter;
LDAPControl **ctrls = NULL;
Slapi_Entry *entry = NULL;
Slapi_DN *sdn;
char **attrs = NULL;
- char *userattr = "uid", *realmattr = NULL, *ufilter = NULL;
- void *node;
int regexmatch = 0;
- char *regex_ldap_search_base = NULL;
- char *regex_ldap_search_filter = NULL;
-
- /* TODO: userattr & realmattr should be configurable */
+ char *base = NULL;
+ char *filter = NULL;
+
+ /* Check for wildcards in the authid and realm. If we encounter one,
+ * just fail the mapping without performing a costly internal search. */
+ if (user && strchr(user, '*')) {
+ LDAPDebug(LDAP_DEBUG_TRACE, "sasl user search encountered a wildcard in "
+ "the authid. Not attempting to map to entry. (authid=%s)\n", user, 0, 0);
+ return NULL;
+ } else if (user_realm && strchr(user_realm, '*')) {
+ LDAPDebug(LDAP_DEBUG_TRACE, "sasl user search encountered a wildcard in "
+ "the realm. Not attempting to map to entry. (realm=%s)\n", user_realm, 0, 0);
+ return NULL;
+ }
- /*
- * Check for dn: prefix. See RFC 2829 section 9.
- */
- if (strncasecmp(user, "dn:", 3) == 0) {
- sprintf(fptr, "(objectclass=*)");
- scope = LDAP_SCOPE_BASE;
- ids_sasl_user_search((char*)user+3, scope, filter,
+ /* New regex-based identity mapping */
+ regexmatch = sasl_map_domap((char*)user, (char*)user_realm, &base, &filter);
+ if (regexmatch) {
+ ids_sasl_user_search(base, scope, filter,
ctrls, attrs, attrsonly,
&entry, &found);
- } else {
- int offset = 0;
- if (strncasecmp(user,"u:",2) == 0 )
- offset = 2;
- /* TODO: quote the filter values */
-
- /* New regex-based identity mapping : we call it here before the old code.
- * If there's a match, we skip the old way, otherwise we plow ahead for backwards compatibility reasons
- */
-
- regexmatch = sasl_map_domap((char*)user, (char*)user_realm, ®ex_ldap_search_base, ®ex_ldap_search_filter);
- if (regexmatch) {
-
- ids_sasl_user_search(regex_ldap_search_base, scope, regex_ldap_search_filter,
- ctrls, attrs, attrsonly,
- &entry, &found);
- /* Free the filter etc */
- slapi_ch_free((void**)®ex_ldap_search_base);
- slapi_ch_free((void**)®ex_ldap_search_filter);
- } else {
-
- /* Ensure no buffer overflow. */
- /* We don't know what the upper limits on username and
- * realm lengths are. There don't seem to be any defined
- * in the relevant standards. We may find in the future
- * that a 1K buffer is insufficient for some mechanism,
- * but it seems unlikely given that the values are exposed
- * to the end user.
- */
- ulen = strlen(user+offset);
- fsize += strlen(userattr) + ulen;
- if (realmattr && user_realm) {
- rlen = strlen(user_realm);
- fsize += strlen(realmattr) + rlen;
- }
- if (ufilter) fsize += strlen(ufilter);
- fsize += 100; /* includes a good safety margin */
- if (fsize > 1024) {
- LDAPDebug(LDAP_DEBUG_ANY, "sasl user name and/or realm too long"
- " (ulen=%u, rlen=%u)\n", ulen, rlen, 0);
- return NULL;
- }
-
- /* now we can safely write the filter */
- sprintf(fptr, "(&(%s=%s)", userattr, user+offset);
- fptr += strlen(fptr);
- if (realmattr && user_realm) {
- sprintf(fptr, "(%s=%s)", realmattr, user_realm);
- fptr += strlen(fptr);
- }
- if (ufilter) {
- if (*ufilter == '(') {
- sprintf(fptr, "%s", ufilter);
- } else {
- sprintf(fptr, "(%s)", ufilter);
- }
- fptr += strlen(fptr);
- }
- sprintf(fptr, ")");
-
- /* iterate through the naming contexts */
- for (sdn = slapi_get_first_suffix(&node, 0); sdn != NULL;
- sdn = slapi_get_next_suffix(&node, 0)) {
-
- ids_sasl_user_search((char*)slapi_sdn_get_dn(sdn), scope, filter,
- ctrls, attrs, attrsonly,
- &entry, &found);
+ if (found == 1) {
+ LDAPDebug(LDAP_DEBUG_TRACE, "sasl user search found this entry: dn:%s, "
+ "matching filter=%s\n", entry->e_sdn.dn, filter, 0);
+ } else if (found == 0) {
+ LDAPDebug(LDAP_DEBUG_TRACE, "sasl user search found no entries matching "
+ "filter=%s\n", filter, 0, 0);
+ } else {
+ LDAPDebug(LDAP_DEBUG_TRACE, "sasl user search found more than one entry "
+ "matching filter=%s\n", filter, 0, 0);
+ if (entry) {
+ slapi_entry_free(entry);
+ entry = NULL;
}
}
- }
- if (found == 1) {
- LDAPDebug(LDAP_DEBUG_TRACE, "sasl user search found this entry: dn:%s, matching filter=%s\n", entry->e_sdn.dn, filter, 0);
- return entry;
- }
-
- if (found == 0) {
- LDAPDebug(LDAP_DEBUG_TRACE, "sasl user search found no entries matching filter=%s\n", filter, 0, 0);
- } else {
- LDAPDebug(LDAP_DEBUG_TRACE, "sasl user search found more than one entry matching filter=%s\n", filter, 0, 0);
+ /* Free the filter etc */
+ slapi_ch_free_string(&base);
+ slapi_ch_free_string(&filter);
}
- if (entry) slapi_entry_free(entry);
- return NULL;
+ return entry;
}
static char *buf2str(const char *buf, unsigned buflen)
| 0 |
36c593d32d092ff1b4bec1595ebe1ed0726f5240
|
389ds/389-ds-base
|
Issue 50875 - Refactor passwordUserAttributes's and passwordBadWords's code
Bug Description: Searches on cn=config takes values with spaces and
makes multiple attributes out of them. If we set passwordUserAttributes
to "cn uid givenname", it will transform it in a multi-valued attribute.
Fix Description: Change passwordUserAttributes's and passwordBadWords's type
to CONFIG_STRING (it was CONFIG_CHARRAY). Add an additional parameter
to store the array (and use it in pw.c).
The string and array processing is similar to nsslapd-allowed-sasl-mechanisms.
Add tests for both attributes.
https://pagure.io/389-ds-base/issue/50875
Reviewed by: mreynolds, tbordaz, firstyear (Thanks!)
|
commit 36c593d32d092ff1b4bec1595ebe1ed0726f5240
Author: Simon Pichugin <[email protected]>
Date: Sun Apr 5 21:19:11 2020 +0200
Issue 50875 - Refactor passwordUserAttributes's and passwordBadWords's code
Bug Description: Searches on cn=config takes values with spaces and
makes multiple attributes out of them. If we set passwordUserAttributes
to "cn uid givenname", it will transform it in a multi-valued attribute.
Fix Description: Change passwordUserAttributes's and passwordBadWords's type
to CONFIG_STRING (it was CONFIG_CHARRAY). Add an additional parameter
to store the array (and use it in pw.c).
The string and array processing is similar to nsslapd-allowed-sasl-mechanisms.
Add tests for both attributes.
https://pagure.io/389-ds-base/issue/50875
Reviewed by: mreynolds, tbordaz, firstyear (Thanks!)
diff --git a/dirsrvtests/tests/suites/password/pwdPolicy_syntax_test.py b/dirsrvtests/tests/suites/password/pwdPolicy_syntax_test.py
index 82d1a9788..291a6fd19 100644
--- a/dirsrvtests/tests/suites/password/pwdPolicy_syntax_test.py
+++ b/dirsrvtests/tests/suites/password/pwdPolicy_syntax_test.py
@@ -48,6 +48,7 @@ def create_user(topology_st):
'gidNumber': '4000',
'homeDirectory': '/home/user',
'description': 'd_e_s_c',
+ 'loginShell': USER_RDN,
'userPassword': PASSWORD
})
@@ -61,7 +62,8 @@ def setPolicy(inst, attr, value):
value = str(value)
inst.config.set(attr, value)
- inst.simple_bind_s(USER_DN, PASSWORD)
+ policy = inst.config.get_attr_val_utf8(attr)
+ assert policy == value
def resetPasswd(inst):
@@ -84,6 +86,7 @@ def tryPassword(inst, policy_attr, value, reset_value, pw_bad, pw_good, msg):
"""
setPolicy(inst, policy_attr, value)
+ inst.simple_bind_s(USER_DN, PASSWORD)
users = UserAccounts(inst, DEFAULT_SUFFIX)
user = users.get(USER_RDN)
try:
@@ -250,17 +253,17 @@ def test_basic(topology_st, create_user, password_policy):
# Sequences
tryPassword(topology_st.standalone, 'passwordMaxSequence', 3, 0, 'Za1_1234',
- '13_#Kad472h', 'Max montonic sequence is not allowed')
+ '13_#Kad472h', 'Max monotonic sequence is not allowed')
tryPassword(topology_st.standalone, 'passwordMaxSequence', 3, 0, 'Za1_4321',
- '13_#Kad472h', 'Max montonic sequence is not allowed')
+ '13_#Kad472h', 'Max monotonic sequence is not allowed')
tryPassword(topology_st.standalone, 'passwordMaxSequence', 3, 0, 'Za1_abcd',
- '13_#Kad472h', 'Max montonic sequence is not allowed')
+ '13_#Kad472h', 'Max monotonic sequence is not allowed')
tryPassword(topology_st.standalone, 'passwordMaxSequence', 3, 0, 'Za1_dcba',
- '13_#Kad472h', 'Max montonic sequence is not allowed')
+ '13_#Kad472h', 'Max monotonic sequence is not allowed')
# Sequence Sets
tryPassword(topology_st.standalone, 'passwordMaxSeqSets', 2, 0, 'Za1_123--123',
- '13_#Kad472h', 'Max montonic sequence is not allowed')
+ '13_#Kad472h', 'Max monotonic sequence is not allowed')
# Max characters in a character class
tryPassword(topology_st.standalone, 'passwordMaxClassChars', 3, 0, 'Za1_9376',
@@ -273,16 +276,94 @@ def test_basic(topology_st, create_user, password_policy):
'13_#Kad472h', 'Too may consecutive characters from the same class')
# Bad words
- tryPassword(topology_st.standalone, 'passwordBadWords', 'redhat fedora', 'none', 'Za1_redhat',
- '13_#Kad472h', 'Too may consecutive characters from the same class')
- tryPassword(topology_st.standalone, 'passwordBadWords', 'redhat fedora', 'none', 'Za1_fedora',
+ tryPassword(topology_st.standalone, 'passwordBadWords', 'redhat', 'none', 'Za1_redhat',
'13_#Kad472h', 'Too may consecutive characters from the same class')
# User Attributes
tryPassword(topology_st.standalone, 'passwordUserAttributes', 'description', 0, 'Za1_d_e_s_c',
'13_#Kad472h', 'Password found in user entry')
- log.info('pwdPolicy tests PASSED')
+
[email protected]
[email protected]
[email protected](ds_is_older("1.4.1.18"), reason="Not implemented")
+def test_config_set_few_user_attributes(topology_st, create_user, password_policy):
+ """Test that we can successfully set multiple values to passwordUserAttributes
+
+ :id: 188e0aee-6e29-4857-910c-27d5606f8c08
+ :setup: Standalone instance
+ :steps:
+ 1. Set passwordUserAttributes to "description loginShell"
+ 2. Verify passwordUserAttributes has the values
+ 3. Verify passwordUserAttributes enforced the policy
+ :expectedresults:
+ 1. Operation should be successful
+ 2. Operation should be successful
+ 3. Operation should be successful
+ """
+
+ standalone = topology_st.standalone
+
+ standalone.log.info('Set passwordUserAttributes to "description loginShell"')
+ standalone.config.set('passwordUserAttributes', 'description loginshell')
+
+ standalone.restart()
+
+ standalone.log.info("Verify passwordUserAttributes has the values")
+ user_attrs = standalone.config.get_attr_val_utf8('passwordUserAttributes')
+ assert "description" in user_attrs
+ assert "loginshell" in user_attrs
+ standalone.log.info("Reset passwordUserAttributes")
+ standalone.config.remove_all('passwordUserAttributes')
+
+ standalone.log.info("Verify passwordUserAttributes enforced the policy")
+ attributes = ['description, loginShell', 'description,loginShell', 'description loginShell']
+ values = ['Za1_d_e_s_c', f'Za1_{USER_RDN}', f'Za1_d_e_s_c{USER_RDN}']
+ for attr in attributes:
+ for value in values:
+ tryPassword(standalone, 'passwordUserAttributes', attr, 0, value,
+ '13_#Kad472h', 'Password found in user entry')
+
+
[email protected]
[email protected]
[email protected](ds_is_older("1.4.1.18"), reason="Not implemented")
+def test_config_set_few_bad_words(topology_st, create_user, password_policy):
+ """Test that we can successfully set multiple values to passwordBadWords
+
+ :id: 2977094c-921c-4b2f-af91-4c7a45ded48b
+ :setup: Standalone instance
+ :steps:
+ 1. Set passwordBadWords to "fedora redhat"
+ 2. Verify passwordBadWords has the values
+ 3. Verify passwordBadWords enforced the policy
+ :expectedresults:
+ 1. Operation should be successful
+ 2. Operation should be successful
+ 3. Operation should be successful
+ """
+
+ standalone = topology_st.standalone
+
+ standalone.log.info('Set passwordBadWords to "fedora redhat"')
+ standalone.config.set('passwordBadWords', 'fedora redhat')
+
+ standalone.restart()
+
+ standalone.log.info("Verify passwordBadWords has the values")
+ user_attrs = standalone.config.get_attr_val_utf8('passwordBadWords')
+ assert "fedora" in user_attrs
+ assert "redhat" in user_attrs
+ standalone.log.info("Reset passwordBadWords")
+ standalone.config.remove_all('passwordBadWords')
+
+ standalone.log.info("Verify passwordBadWords enforced the policy")
+ attributes = ['redhat, fedora', 'redhat,fedora', 'redhat fedora']
+ values = ['Za1_redhat_fedora', 'Za1_fedora', 'Za1_redhat']
+ for attr in attributes:
+ for value in values:
+ tryPassword(standalone, 'passwordBadWords', attr, 'none', value,
+ '13_#Kad472h', 'Too may consecutive characters from the same class')
if __name__ == '__main__':
diff --git a/ldap/servers/slapd/back-ldbm/vlv.c b/ldap/servers/slapd/back-ldbm/vlv.c
index b50bb5aff..ef7f26ef0 100644
--- a/ldap/servers/slapd/back-ldbm/vlv.c
+++ b/ldap/servers/slapd/back-ldbm/vlv.c
@@ -1962,20 +1962,6 @@ vlv_find_index_by_filter(struct backend *be, const char *base, Slapi_Filter *f)
return vlv_find_index_by_filter_txn(be, base, f, NULL);
}
-/* replace c with c2 in string -- probably exists somewhere but I can't find it slapi maybe? */
-
-static void
-replace_char(char *name, char c, char c2)
-{
- int x;
-
- for (x = 0; name[x] != '\0'; x++) {
- if (c == name[x]) {
- name[x] = c2;
- }
- }
-}
-
/* similar to what the console GUI does */
char *
diff --git a/ldap/servers/slapd/libglobs.c b/ldap/servers/slapd/libglobs.c
index f1078154f..0d3d9a924 100644
--- a/ldap/servers/slapd/libglobs.c
+++ b/ldap/servers/slapd/libglobs.c
@@ -173,7 +173,6 @@ typedef enum {
static int32_t config_set_onoff(const char *attrname, char *value, int32_t *configvalue, char *errorbuf, int apply);
static int config_set_schemareplace(const char *attrname, char *value, char *errorbuf, int apply);
-static void remove_commas(char *str);
static int invalid_sasl_mech(char *str);
@@ -535,12 +534,12 @@ static struct config_get_and_set
{CONFIG_PW_USERATTRS_ATTRIBUTE, config_set_pw_user_attrs,
NULL, 0,
(void **)&global_slapdFrontendConfig.pw_policy.pw_cmp_attrs,
- CONFIG_CHARRAY, NULL, NULL, NULL},
+ CONFIG_STRING, NULL, "", NULL},
/* password bad work list */
{CONFIG_PW_BAD_WORDS_ATTRIBUTE, config_set_pw_bad_words,
NULL, 0,
(void **)&global_slapdFrontendConfig.pw_policy.pw_bad_words,
- CONFIG_CHARRAY, NULL, NULL, NULL},
+ CONFIG_STRING, NULL, "", NULL},
/* password max sequence */
{CONFIG_PW_MAX_SEQ_ATTRIBUTE, config_set_pw_max_seq,
NULL, 0,
@@ -2946,70 +2945,118 @@ config_set_pw_dict_path(const char *attrname, char *value, char *errorbuf, int a
return retVal;
}
+char **
+config_get_pw_user_attrs_array(void)
+{
+ /*
+ * array of password user attributes. If is null, returns NULL thanks to ch_array_dup.
+ * Caller must free!
+ */
+ char **retVal;
+ slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
+
+ CFG_LOCK_READ(slapdFrontendConfig);
+ retVal = slapi_ch_array_dup(slapdFrontendConfig->pw_policy.pw_cmp_attrs_array);
+ CFG_UNLOCK_READ(slapdFrontendConfig);
+
+ return retVal;
+}
+
int32_t
config_set_pw_user_attrs(const char *attrname, char *value, char *errorbuf, int apply)
{
int retVal = LDAP_SUCCESS;
- char **attrs = NULL;
slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
if (config_value_is_null(attrname, value, errorbuf, 0)) {
value = NULL;
}
if (apply) {
- if (value) {
+ /* During a reset, the value is "", so we have to handle this case. */
+ if (strcmp(value, "") != 0) {
+ char **nval_array;
+ char *nval = slapi_ch_strdup(value);
+ /* A separate variable is used because slapi_str2charray_ext can change it and nval'd become corrupted */
+ char *tmp_array_nval = slapi_ch_strdup(nval);
+
+ /* We should accept comma-separated lists but slapi_str2charray_ext will process only space-separated */
+ replace_char(tmp_array_nval, ',', ' ');
/* Take list of attributes and break it up into a char array */
- char *attr = NULL;
- char *token = NULL;
- char *next = NULL;
-
- token = slapi_ch_strdup(value);
- for (attr = ldap_utf8strtok_r(token, " ", &next); attr != NULL;
- attr = ldap_utf8strtok_r(NULL, " ", &next))
- {
- slapi_ch_array_add(&attrs, slapi_ch_strdup(attr));
- }
- slapi_ch_free_string(&token);
- }
+ nval_array = slapi_str2charray_ext(tmp_array_nval, " ", 0);
+ slapi_ch_free_string(&tmp_array_nval);
- CFG_LOCK_WRITE(slapdFrontendConfig);
- slapi_ch_array_free(slapdFrontendConfig->pw_policy.pw_cmp_attrs);
- slapdFrontendConfig->pw_policy.pw_cmp_attrs = attrs;
- CFG_UNLOCK_WRITE(slapdFrontendConfig);
+ CFG_LOCK_WRITE(slapdFrontendConfig);
+ slapi_ch_free_string(&slapdFrontendConfig->pw_policy.pw_cmp_attrs);
+ slapi_ch_array_free(slapdFrontendConfig->pw_policy.pw_cmp_attrs_array);
+ slapdFrontendConfig->pw_policy.pw_cmp_attrs = nval;
+ slapdFrontendConfig->pw_policy.pw_cmp_attrs_array = nval_array;
+ CFG_UNLOCK_WRITE(slapdFrontendConfig);
+ } else {
+ CFG_LOCK_WRITE(slapdFrontendConfig);
+ slapi_ch_free_string(&slapdFrontendConfig->pw_policy.pw_cmp_attrs);
+ slapi_ch_array_free(slapdFrontendConfig->pw_policy.pw_cmp_attrs_array);
+ slapdFrontendConfig->pw_policy.pw_cmp_attrs = NULL;
+ slapdFrontendConfig->pw_policy.pw_cmp_attrs_array = NULL;
+ CFG_UNLOCK_WRITE(slapdFrontendConfig);
+ }
}
return retVal;
}
+char **
+config_get_pw_bad_words_array(void)
+{
+ /*
+ * array of words to reject. If is null, returns NULL thanks to ch_array_dup.
+ * Caller must free!
+ */
+ char **retVal;
+ slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
+
+ CFG_LOCK_READ(slapdFrontendConfig);
+ retVal = slapi_ch_array_dup(slapdFrontendConfig->pw_policy.pw_bad_words_array);
+ CFG_UNLOCK_READ(slapdFrontendConfig);
+
+ return retVal;
+}
+
int32_t
config_set_pw_bad_words(const char *attrname, char *value, char *errorbuf, int apply)
{
int retVal = LDAP_SUCCESS;
- char **words = NULL;
slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
if (config_value_is_null(attrname, value, errorbuf, 0)) {
value = NULL;
}
if (apply) {
- if (value) {
+ /* During a reset, the value is "", so we have to handle this case. */
+ if (strcmp(value, "") != 0) {
+ char **nval_array;
+ char *nval = slapi_ch_strdup(value);
+ /* A separate variable is used because slapi_str2charray_ext can change it and nval'd become corrupted */
+ char *tmp_array_nval = slapi_ch_strdup(nval);
+
+ /* We should accept comma-separated lists but slapi_str2charray_ext will process only space-separated */
+ replace_char(tmp_array_nval, ',', ' ');
/* Take list of attributes and break it up into a char array */
- char *word = NULL;
- char *token = NULL;
- char *next = NULL;
-
- token = slapi_ch_strdup(value);
- for (word = ldap_utf8strtok_r(token, " ", &next); word != NULL;
- word = ldap_utf8strtok_r(NULL, " ", &next))
- {
- slapi_ch_array_add(&words, slapi_ch_strdup(word));
- }
- slapi_ch_free_string(&token);
- }
+ nval_array = slapi_str2charray_ext(tmp_array_nval, " ", 0);
+ slapi_ch_free_string(&tmp_array_nval);
- CFG_LOCK_WRITE(slapdFrontendConfig);
- slapi_ch_array_free(slapdFrontendConfig->pw_policy.pw_bad_words);
- slapdFrontendConfig->pw_policy.pw_bad_words = words;
- CFG_UNLOCK_WRITE(slapdFrontendConfig);
+ CFG_LOCK_WRITE(slapdFrontendConfig);
+ slapi_ch_free_string(&slapdFrontendConfig->pw_policy.pw_bad_words);
+ slapi_ch_array_free(slapdFrontendConfig->pw_policy.pw_bad_words_array);
+ slapdFrontendConfig->pw_policy.pw_bad_words = nval;
+ slapdFrontendConfig->pw_policy.pw_bad_words_array = nval_array;
+ CFG_UNLOCK_WRITE(slapdFrontendConfig);
+ } else {
+ CFG_LOCK_WRITE(slapdFrontendConfig);
+ slapi_ch_free_string(&slapdFrontendConfig->pw_policy.pw_bad_words);
+ slapi_ch_array_free(slapdFrontendConfig->pw_policy.pw_bad_words_array);
+ slapdFrontendConfig->pw_policy.pw_bad_words = NULL;
+ slapdFrontendConfig->pw_policy.pw_bad_words_array = NULL;
+ CFG_UNLOCK_WRITE(slapdFrontendConfig);
+ }
}
return retVal;
}
@@ -7338,13 +7385,13 @@ config_set_allowed_sasl_mechs(const char *attrname, char *value, char *errorbuf
/* During a reset, the value is "", so we have to handle this case. */
if (strcmp(value, "") != 0) {
+ char **nval_array;
char *nval = slapi_ch_strdup(value);
/* A separate variable is used because slapi_str2charray_ext can change it and nval'd become corrupted */
char *tmp_array_nval;
/* cyrus sasl doesn't like comma separated lists */
- remove_commas(nval);
- tmp_array_nval = slapi_ch_strdup(nval);
+ replace_char(nval, ',', ' ');
if (invalid_sasl_mech(nval)) {
slapi_log_err(SLAPI_LOG_ERR, "config_set_allowed_sasl_mechs",
@@ -7353,15 +7400,18 @@ config_set_allowed_sasl_mechs(const char *attrname, char *value, char *errorbuf
"digits, hyphens, or underscores\n",
nval);
slapi_ch_free_string(&nval);
- slapi_ch_free_string(&tmp_array_nval);
return LDAP_UNWILLING_TO_PERFORM;
}
+
+ tmp_array_nval = slapi_ch_strdup(nval);
+ nval_array = slapi_str2charray_ext(tmp_array_nval, " ", 0);
+ slapi_ch_free_string(&tmp_array_nval);
+
CFG_LOCK_WRITE(slapdFrontendConfig);
slapi_ch_free_string(&slapdFrontendConfig->allowed_sasl_mechs);
slapi_ch_array_free(slapdFrontendConfig->allowed_sasl_mechs_array);
slapdFrontendConfig->allowed_sasl_mechs = nval;
- slapdFrontendConfig->allowed_sasl_mechs_array = slapi_str2charray_ext(tmp_array_nval, " ", 0);
- slapi_ch_free_string(&tmp_array_nval);
+ slapdFrontendConfig->allowed_sasl_mechs_array = nval_array;
CFG_UNLOCK_WRITE(slapdFrontendConfig);
} else {
/* If this value is "", we need to set the list to *all* possible mechs */
@@ -8593,19 +8643,6 @@ slapi_err2string(int result)
return ldap_err2string(result);
}
-/* replace commas with spaces */
-static void
-remove_commas(char *str)
-{
- int i;
-
- for (i = 0; str && str[i]; i++) {
- if (str[i] == ',') {
- str[i] = ' ';
- }
- }
-}
-
/*
* Check the SASL mechanism values
*
diff --git a/ldap/servers/slapd/proto-slap.h b/ldap/servers/slapd/proto-slap.h
index d4111cb42..41308f12c 100644
--- a/ldap/servers/slapd/proto-slap.h
+++ b/ldap/servers/slapd/proto-slap.h
@@ -304,7 +304,9 @@ int config_set_pw_syntax(const char *attrname, char *value, char *errorbuf, int
int32_t config_set_pw_palindrome(const char *attrname, char *value, char *errorbuf, int apply);
int32_t config_set_pw_dict_check(const char *attrname, char *value, char *errorbuf, int apply);
int32_t config_set_pw_dict_path(const char *attrname, char *value, char *errorbuf, int apply);
+char **config_get_pw_user_attrs_array(void);
int32_t config_set_pw_user_attrs(const char *attrname, char *value, char *errorbuf, int apply);
+char **config_get_pw_bad_words_array(void);
int32_t config_set_pw_bad_words(const char *attrname, char *value, char *errorbuf, int apply);
int32_t config_set_pw_max_seq_sets(const char *attrname, char *value, char *errorbuf, int apply);
int32_t config_set_pw_max_seq(const char *attrname, char *value, char *errorbuf, int apply);
@@ -862,6 +864,7 @@ void slapd_nasty(char *str, int c, int err);
int strarray2str(char **a, char *buf, size_t buflen, int include_quotes);
int slapd_chown_if_not_owner(const char *filename, uid_t uid, gid_t gid);
int slapd_comp_path(char *p0, char *p1);
+void replace_char(char *name, char c, char c2);
/*
diff --git a/ldap/servers/slapd/pw.c b/ldap/servers/slapd/pw.c
index 7d67a67ee..2472cb404 100644
--- a/ldap/servers/slapd/pw.c
+++ b/ldap/servers/slapd/pw.c
@@ -1078,6 +1078,7 @@ check_pw_syntax_ext(Slapi_PBlock *pb, const Slapi_DN *sdn, Slapi_Value **vals, c
int num_repeated = 0;
int max_repeated = 0;
int num_categories = 0;
+ char **bad_words_array;
pwd = (char *)slapi_value_get_string(vals[i]);
@@ -1099,13 +1100,16 @@ check_pw_syntax_ext(Slapi_PBlock *pb, const Slapi_DN *sdn, Slapi_Value **vals, c
}
/* Check for bad words */
- if (pwpolicy->pw_bad_words) {
- for (size_t b = 0; pwpolicy->pw_bad_words && pwpolicy->pw_bad_words[b]; b++) {
- if (strcasestr(pwd, pwpolicy->pw_bad_words[b])) {
+ bad_words_array = config_get_pw_bad_words_array();
+ if (bad_words_array) {
+ for (size_t b = 0; bad_words_array && bad_words_array[b]; b++) {
+ if (strcasestr(pwd, bad_words_array[b])) {
report_pw_violation(pb, pwresponse_req, "Password contains a restricted word");
+ charray_free(bad_words_array);
return (1);
}
}
+ charray_free(bad_words_array);
}
/* Check for sequences */
@@ -1320,6 +1324,7 @@ check_pw_syntax_ext(Slapi_PBlock *pb, const Slapi_DN *sdn, Slapi_Value **vals, c
/* check for trivial words if syntax checking is enabled */
if (pwpolicy->pw_syntax == LDAP_ON) {
+ char **user_attrs_array;
/* e is null if this is an add operation*/
if (check_trivial_words(pb, e, vals, "uid", pwpolicy->pw_mintokenlength, smods) == 1 ||
check_trivial_words(pb, e, vals, "cn", pwpolicy->pw_mintokenlength, smods) == 1 ||
@@ -1334,15 +1339,18 @@ check_pw_syntax_ext(Slapi_PBlock *pb, const Slapi_DN *sdn, Slapi_Value **vals, c
return 1;
}
/* Check user attributes */
- if (pwpolicy->pw_cmp_attrs) {
- for (size_t a = 0; pwpolicy->pw_cmp_attrs && pwpolicy->pw_cmp_attrs[a]; a++) {
- if (check_trivial_words(pb, e, vals, pwpolicy->pw_cmp_attrs[a], pwpolicy->pw_mintokenlength, smods) == 1 ){
+ user_attrs_array = config_get_pw_user_attrs_array();
+ if (user_attrs_array) {
+ for (size_t a = 0; user_attrs_array && user_attrs_array[a]; a++) {
+ if (check_trivial_words(pb, e, vals, user_attrs_array[a], pwpolicy->pw_mintokenlength, smods) == 1 ){
if (mod_op) {
slapi_entry_free(e);
}
+ charray_free(user_attrs_array);
return 1;
}
}
+ charray_free(user_attrs_array);
}
}
@@ -2247,35 +2255,32 @@ new_passwdPolicy(Slapi_PBlock *pb, const char *dn)
}
} else if (!strcasecmp(attr_name, "passwordUserAttributes")) {
if ((sval = attr_get_present_values(attr))) {
- char **attrs = NULL;
- char *attr = NULL;
- char *token = NULL;
- char *next = NULL;
-
- token = slapi_ch_strdup(slapi_value_get_string(*sval));
- for (attr = ldap_utf8strtok_r(token, " ", &next); attr != NULL;
- attr = ldap_utf8strtok_r(NULL, " ", &next))
- {
- slapi_ch_array_add(&attrs, slapi_ch_strdup(attr));
- }
- slapi_ch_free_string(&token);
+ char *attrs = slapi_ch_strdup(slapi_value_get_string(*sval));
+ /* we need a separate string because it gets corrupted after slapi_str2charray_ext */
+ char *tmp_array_attrs = slapi_ch_strdup(attrs);
+
+ /* we should accept comma-separated lists but slapi_str2charray_ext will process only space-separated */
+ replace_char(tmp_array_attrs, ',', ' ');
+
pwdpolicy->pw_cmp_attrs = attrs;
+ /* Take list of attributes and break it up into a char array */
+ pwdpolicy->pw_cmp_attrs_array = slapi_str2charray_ext(tmp_array_attrs, " ", 0);
+ slapi_ch_free_string(&tmp_array_attrs);
}
} else if (!strcasecmp(attr_name, "passwordBadWords")) {
if ((sval = attr_get_present_values(attr))) {
- char **words = NULL;
- char *word = NULL;
- char *token = NULL;
- char *next = NULL;
-
- token = slapi_ch_strdup(slapi_value_get_string(*sval));
- for (word = ldap_utf8strtok_r(token, " ", &next); word != NULL;
- word = ldap_utf8strtok_r(NULL, " ", &next))
- {
- slapi_ch_array_add(&words, slapi_ch_strdup(word));
- }
- slapi_ch_free_string(&token);
+ char *words = slapi_ch_strdup(slapi_value_get_string(*sval));
+ /* we need a separate string because it gets corrupted after slapi_str2charray_ext */
+ char *tmp_array_words = slapi_ch_strdup(words);
+
+ /* we should accept comma-separated lists but slapi_str2charray_ext will process only space-separated */
+ replace_char(tmp_array_words, ',', ' ');
+
pwdpolicy->pw_bad_words = words;
+ /* Take list of attributes and break it up into a char array */
+ pwdpolicy->pw_bad_words_array = slapi_str2charray_ext(tmp_array_words, " ", 0);
+
+ slapi_ch_free_string(&tmp_array_words);
}
} else if (!strcasecmp(attr_name, "passwordMaxSequence")) {
if ((sval = attr_get_present_values(attr))) {
diff --git a/ldap/servers/slapd/slap.h b/ldap/servers/slapd/slap.h
index 3b39a0a32..a4cae784a 100644
--- a/ldap/servers/slapd/slap.h
+++ b/ldap/servers/slapd/slap.h
@@ -1813,10 +1813,12 @@ typedef struct passwordpolicyarray
the same character class. */
slapi_onoff_t pw_check_dict;
char *pw_dict_path; /* custom dictionary */
- char **pw_cmp_attrs; /* Space-separated list of attributes to see if the
+ char *pw_cmp_attrs; /* Comma-separated list of attributes to see if the
attribute values (and reversed values) in the entry
are contained in the new password. */
- char **pw_bad_words; /* Space-separated list of words to reject */
+ char **pw_cmp_attrs_array; /* Array of password user attributes */
+ char *pw_bad_words; /* Comma-separated list of words to reject */
+ char **pw_bad_words_array; /* Array of words to reject */
slapi_onoff_t pw_exp;
slapi_onoff_t pw_send_expiring;
diff --git a/ldap/servers/slapd/util.c b/ldap/servers/slapd/util.c
index 6ee1aacdf..e563528a0 100644
--- a/ldap/servers/slapd/util.c
+++ b/ldap/servers/slapd/util.c
@@ -467,6 +467,17 @@ slapi_escape_filter_value(char *filter_str, int len)
}
}
+/* replace c with c2 in str */
+void
+replace_char(char *str, char c, char c2)
+{
+ for (size_t i = 0; (str != NULL) && (str[i] != NULL); i++) {
+ if (c == str[i]) {
+ str[i] = c2;
+ }
+ }
+}
+
/*
** This function takes a quoted attribute value of the form "abc",
** and strips off the enclosing quotes. It also deals with quoted
@@ -1635,4 +1646,4 @@ mkdir_p(char *dir, unsigned int mode)
}
return 0;
}
-}
\ No newline at end of file
+}
| 0 |
e3aac6618a00236b73e44b99d15abed647708187
|
389ds/389-ds-base
|
Ticket 395 - RFE: 389-ds shouldn't advertise in the rootDSE that we can handle a sasl mech if we really can't
Bug Description: The root DSE lists all the mechanisms the SASL library can handle (sasl_listmech), but that's
not necessarily what the server/co-products can support (e.g. communicating with IPA).
Fix Description: Added new config setting to specifiy the SASL mechanisms that are allowed. If none are specified,
than all are allowed. This setting now impacts the SASL callback SASL_CB_GETOPT(ids_sasl_getopt), so
it applies to all SASL operations. So, the root DSE information is correct, and you can now control
what mechanisms the server actually allows.
https://fedorahosted.org/389/ticket/395
Reviewed by: richm(Thanks!)
|
commit e3aac6618a00236b73e44b99d15abed647708187
Author: Mark Reynolds <[email protected]>
Date: Wed Dec 5 17:43:30 2012 -0500
Ticket 395 - RFE: 389-ds shouldn't advertise in the rootDSE that we can handle a sasl mech if we really can't
Bug Description: The root DSE lists all the mechanisms the SASL library can handle (sasl_listmech), but that's
not necessarily what the server/co-products can support (e.g. communicating with IPA).
Fix Description: Added new config setting to specifiy the SASL mechanisms that are allowed. If none are specified,
than all are allowed. This setting now impacts the SASL callback SASL_CB_GETOPT(ids_sasl_getopt), so
it applies to all SASL operations. So, the root DSE information is correct, and you can now control
what mechanisms the server actually allows.
https://fedorahosted.org/389/ticket/395
Reviewed by: richm(Thanks!)
diff --git a/ldap/servers/slapd/configdse.c b/ldap/servers/slapd/configdse.c
index b54062d5b..bd1566ef8 100644
--- a/ldap/servers/slapd/configdse.c
+++ b/ldap/servers/slapd/configdse.c
@@ -81,6 +81,7 @@ static const char *requires_restart[] = {
#endif
"cn=config:" CONFIG_RETURN_EXACT_CASE_ATTRIBUTE,
"cn=config:" CONFIG_SCHEMA_IGNORE_TRAILING_SPACES,
+ "cn=config:nsslapd-allowed-sasl-mechanisms",
"cn=config,cn=ldbm:nsslapd-idlistscanlimit",
"cn=config,cn=ldbm:nsslapd-parentcheck",
"cn=config,cn=ldbm:nsslapd-dbcachesize",
diff --git a/ldap/servers/slapd/libglobs.c b/ldap/servers/slapd/libglobs.c
index dee7812b6..ab366fc3a 100644
--- a/ldap/servers/slapd/libglobs.c
+++ b/ldap/servers/slapd/libglobs.c
@@ -1006,6 +1006,10 @@ static struct config_get_and_set {
NULL, 0,
(void**)&global_slapdFrontendConfig.ndn_cache_max_size,
CONFIG_INT, (ConfigGetFunc)config_get_ndn_cache_size, DEFAULT_NDN_SIZE},
+ {CONFIG_ALLOWED_SASL_MECHS, config_set_allowed_sasl_mechs,
+ NULL, 0,
+ (void**)&global_slapdFrontendConfig.allowed_sasl_mechs,
+ CONFIG_STRING, (ConfigGetFunc)config_get_allowed_sasl_mechs, DEFAULT_ALLOWED_TO_DELETE_ATTRS},
#ifdef MEMPOOL_EXPERIMENTAL
,{CONFIG_MEMPOOL_SWITCH_ATTRIBUTE, config_set_mempool_switch,
NULL, 0,
@@ -1423,6 +1427,7 @@ FrontendConfig_init () {
cfg->entryusn_import_init = slapi_ch_strdup(ENTRYUSN_IMPORT_INIT);
cfg->allowed_to_delete_attrs = slapi_ch_strdup("nsslapd-listenhost nsslapd-securelistenhost nsslapd-defaultnamingcontext");
cfg->default_naming_context = NULL; /* store normalized dn */
+ cfg->allowed_sasl_mechs = NULL;
init_disk_monitoring = cfg->disk_monitoring = LDAP_OFF;
cfg->disk_threshold = 2097152; /* 2 mb */
@@ -6555,6 +6560,37 @@ config_set_allowed_to_delete_attrs( const char *attrname, char *value,
return retVal;
}
+char *
+config_get_allowed_sasl_mechs()
+{
+ char *retVal;
+ slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
+
+ CFG_LOCK_READ(slapdFrontendConfig);
+ retVal = slapdFrontendConfig->allowed_sasl_mechs;
+ CFG_UNLOCK_READ(slapdFrontendConfig);
+
+ return retVal;
+}
+
+/* separated list of sasl mechs to allow */
+int
+config_set_allowed_sasl_mechs(const char *attrname, char *value, char *errorbuf, int apply )
+{
+ slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
+
+ if(!apply || slapdFrontendConfig->allowed_sasl_mechs){
+ /* we only set this at startup, if we try again just return SUCCESS */
+ return LDAP_SUCCESS;
+ }
+
+ CFG_LOCK_WRITE(slapdFrontendConfig);
+ slapdFrontendConfig->allowed_sasl_mechs = slapi_ch_strdup(value);
+ CFG_UNLOCK_WRITE(slapdFrontendConfig);
+
+ return LDAP_SUCCESS;
+}
+
char *
config_get_default_naming_context(void)
{
diff --git a/ldap/servers/slapd/proto-slap.h b/ldap/servers/slapd/proto-slap.h
index a17f40dff..37b46473f 100644
--- a/ldap/servers/slapd/proto-slap.h
+++ b/ldap/servers/slapd/proto-slap.h
@@ -549,6 +549,8 @@ int config_get_disk_logging_critical();
int config_get_ndn_cache_count();
size_t config_get_ndn_cache_size();
int config_get_ndn_cache_enabled();
+char *config_get_allowed_sasl_mechs();
+int config_set_allowed_sasl_mechs(const char *attrname, char *value, char *errorbuf, int apply);
PLHashNumber hashNocaseString(const void *key);
PRIntn hashNocaseCompare(const void *v1, const void *v2);
diff --git a/ldap/servers/slapd/saslbind.c b/ldap/servers/slapd/saslbind.c
index f75e9773a..f9ddbfc5b 100644
--- a/ldap/servers/slapd/saslbind.c
+++ b/ldap/servers/slapd/saslbind.c
@@ -184,6 +184,8 @@ static int ids_sasl_getopt(
}
} else if (strcasecmp(option, "auxprop_plugin") == 0) {
*result = "iDS";
+ } else if (strcasecmp(option, "mech_list") == 0){
+ *result = config_get_allowed_sasl_mechs();
}
if (*result) *len = strlen(*result);
diff --git a/ldap/servers/slapd/slap.h b/ldap/servers/slapd/slap.h
index 8b43f5af1..a510d8acd 100644
--- a/ldap/servers/slapd/slap.h
+++ b/ldap/servers/slapd/slap.h
@@ -2048,6 +2048,7 @@ typedef struct _slapdEntryPoints {
#define CONFIG_DISK_LOGGING_CRITICAL "nsslapd-disk-monitoring-logging-critical"
#define CONFIG_NDN_CACHE "nsslapd-ndn-cache-enabled"
#define CONFIG_NDN_CACHE_SIZE "nsslapd-ndn-cache-max-size"
+#define CONFIG_ALLOWED_SASL_MECHS "nsslapd-allowed-sasl-mechanisms"
#ifdef MEMPOOL_EXPERIMENTAL
#define CONFIG_MEMPOOL_SWITCH_ATTRIBUTE "nsslapd-mempool"
@@ -2258,6 +2259,7 @@ typedef struct _slapdFrontendConfig {
char *entryusn_import_init; /* Entry USN: determine the initital value of import */
int pagedsizelimit;
char *default_naming_context; /* Default naming context (normalized) */
+ char *allowed_sasl_mechs; /* comma/space separated list of allowed sasl mechs */
/* disk monitoring */
int disk_monitoring;
| 0 |
16b151c38f14f9ca7eed6611df44c1c5d1fca42f
|
389ds/389-ds-base
|
Bug 630092 - Coverity #12000: Resource leaks issues
https://bugzilla.redhat.com/show_bug.cgi?id=630092
Description:
The plugin_setup() has been modified to release the value before
it returns.
|
commit 16b151c38f14f9ca7eed6611df44c1c5d1fca42f
Author: Endi Sukma Dewata <[email protected]>
Date: Fri Sep 17 16:58:53 2010 -0400
Bug 630092 - Coverity #12000: Resource leaks issues
https://bugzilla.redhat.com/show_bug.cgi?id=630092
Description:
The plugin_setup() has been modified to release the value before
it returns.
diff --git a/ldap/servers/slapd/plugin.c b/ldap/servers/slapd/plugin.c
index aa5442693..b8257d1b3 100644
--- a/ldap/servers/slapd/plugin.c
+++ b/ldap/servers/slapd/plugin.c
@@ -2270,6 +2270,7 @@ plugin_setup(Slapi_Entry *plugin_entry, struct slapi_componentid *group,
plugin->plg_initfunc, plugin->plg_name,
plugin->plg_libpath);
status = -1;
+ slapi_ch_free((void**)&value);
goto PLUGIN_CLEANUP;
}
@@ -2277,8 +2278,7 @@ plugin_setup(Slapi_Entry *plugin_entry, struct slapi_componentid *group,
status = plugin_add_descriptive_attributes( plugin_entry, plugin );
}
- if (value)
- slapi_ch_free((void**)&value);
+ slapi_ch_free((void**)&value);
if(enabled)
{
| 0 |
668cbe6b64782a7d3e5a4c14b88945efab861233
|
389ds/389-ds-base
|
Resolves: bug 288451
Bug Description: Show-Stopper - Migration from HP-PARISC DS 6.21 to DS80 on HP-Itaninum
Reviewed by: nhosoi (Thanks!)
Fix Description: These are additional fixes for various and sundry problems:
1) If doing cross platform migration, just use the default db and changelog db dirs
2) If doing same platform migration, try to keep the db dir if the user has changed it. It will usually be changed for a good reason, like moving to a separate device for performance reasons. In order to see if the directory has changed, we need to "normalize" the path in the attribute so that we can compare it correctly.
3) Do not migrate the attributes nsslapd-db-private-import-mem and nsslapd-import-cache-autosize - just use the new default values for these
4) Do not migrate nsslapd-allidsthreshold - this has been removed
5) Add additional error checking and handling when migrating the local.conf information.
6) Change the brand in the sie and isie when migrating.
7) Update the Group in console.conf during migration.
Platforms tested: HP-UX 11.23 IPF64
Flag Day: no
Doc impact: no
QA impact: should be covered by regular nightly and manual testing
New Tests integrated into TET: none
|
commit 668cbe6b64782a7d3e5a4c14b88945efab861233
Author: Rich Megginson <[email protected]>
Date: Fri Sep 14 21:24:12 2007 +0000
Resolves: bug 288451
Bug Description: Show-Stopper - Migration from HP-PARISC DS 6.21 to DS80 on HP-Itaninum
Reviewed by: nhosoi (Thanks!)
Fix Description: These are additional fixes for various and sundry problems:
1) If doing cross platform migration, just use the default db and changelog db dirs
2) If doing same platform migration, try to keep the db dir if the user has changed it. It will usually be changed for a good reason, like moving to a separate device for performance reasons. In order to see if the directory has changed, we need to "normalize" the path in the attribute so that we can compare it correctly.
3) Do not migrate the attributes nsslapd-db-private-import-mem and nsslapd-import-cache-autosize - just use the new default values for these
4) Do not migrate nsslapd-allidsthreshold - this has been removed
5) Add additional error checking and handling when migrating the local.conf information.
6) Change the brand in the sie and isie when migrating.
7) Update the Group in console.conf during migration.
Platforms tested: HP-UX 11.23 IPF64
Flag Day: no
Doc impact: no
QA impact: should be covered by regular nightly and manual testing
New Tests integrated into TET: none
diff --git a/ldap/admin/src/scripts/DSMigration.pm.in b/ldap/admin/src/scripts/DSMigration.pm.in
index 089f41212..fa220a7b2 100644
--- a/ldap/admin/src/scripts/DSMigration.pm.in
+++ b/ldap/admin/src/scripts/DSMigration.pm.in
@@ -56,6 +56,9 @@ use DSCreate;
use File::Temp qw(tempfile tempdir);
use File::Basename qw(basename);
+# absolute path handling
+use Cwd qw(realpath);
+
# load perldap
use Mozilla::LDAP::Conn;
use Mozilla::LDAP::Utils qw(normalizeDN);
@@ -104,7 +107,14 @@ my %ignoreOld =
'nsslapd-ldapigidnumbertype' => 'nsslapd-ldapigidnumbertype',
'nsslapd-ldapientrysearchbase' => 'nsslapd-ldapientrysearchbase',
'nsslapd-ldapiautodnsuffix' => 'nsslapd-ldapiautodnsuffix',
- 'numsubordinates' => 'numSubordinates'
+ 'numsubordinates' => 'numSubordinates',
+ # for these, we just want to use the default values, even if they were
+ # set in 7.1 or later
+ 'nsslapd-db-private-import-mem' => 'nsslapd-db-private-import-mem',
+ 'nsslapd-import-cache-autosize' => 'nsslapd-import-cache-autosize',
+ # nsslapd-allidsthreshold does not exist anymore
+ # the analogous concept is nsslapd-idlistscanlimit for searches
+ 'nsslapd-allidsthreshold' => 'nsslapd-allidsthreshold'
);
# these are the obsolete entries we do not migrate
@@ -126,19 +136,23 @@ my %alwaysUseOld =
sub getNewDbDir {
my ($ent, $attr, $mig, $inst) = @_;
+ my $newval;
my %objclasses = map { lc($_) => $_ } $ent->getValues('objectclass');
my $cn = $ent->getValues('cn');
- my $oldval = $ent->getValues($attr);
- my $newval;
# there is one case where we want to just use the existing db directory
# that's the case where the user has moved the indexes and/or the
# transaction logs to different partitions for performance
# in that case, the old directory will not be the same as the default,
# and the directory will exist
- my $olddefault = "$mig->{actualsroot}/$inst";
- if (-d $oldval and ($oldval !~ /^$olddefault/)) {
- debug(2, "Keeping old value [$oldval] for attr $attr in entry ", $ent->getDN(), "\n");
- return $oldval;
+ # for cross platform, we should just use the new default location
+ if (!$mig->{crossplatform}) {
+ my $oldval = $ent->getValues($attr);
+ my $absoldval = realpath($oldval) || $oldval;
+ my $olddefault = "$mig->{actualsroot}/$inst";
+ if (-d $absoldval and ($absoldval !~ /^$olddefault/)) {
+ debug(2, "Keeping old value [$absoldval] for attr $attr in entry ", $ent->getDN(), "\n");
+ return $oldval;
+ }
}
# otherwise, just use the new default locations
if ("@with_fhs_opt@") {
| 0 |
d5c9c4e61f5075e139513af7e96e0990faa0935d
|
389ds/389-ds-base
|
Issue 51086 - Fix instance name length for interactive install
Description: Instance name lenght is not properly validated
during interactive install. Add a check during a user input.
https://pagure.io/389-ds-base/issue/51086
Reviewed by: mreynolds (Thanks!)
|
commit d5c9c4e61f5075e139513af7e96e0990faa0935d
Author: Simon Pichugin <[email protected]>
Date: Thu Jul 23 23:45:18 2020 +0200
Issue 51086 - Fix instance name length for interactive install
Description: Instance name lenght is not properly validated
during interactive install. Add a check during a user input.
https://pagure.io/389-ds-base/issue/51086
Reviewed by: mreynolds (Thanks!)
diff --git a/src/lib389/lib389/instance/setup.py b/src/lib389/lib389/instance/setup.py
index 418b71b57..d11729263 100644
--- a/src/lib389/lib389/instance/setup.py
+++ b/src/lib389/lib389/instance/setup.py
@@ -309,6 +309,9 @@ class SetupDs(object):
val = input('\nEnter the instance name [{}]: '.format(slapd['instance_name'])).rstrip()
if val != "":
+ if len(val) > 80:
+ print("Server identifier should not be longer than 80 symbols")
+ continue
if not all(ord(c) < 128 for c in val):
print("Server identifier can not contain non ascii characters")
continue
| 0 |
983c373ce8dfd43639d57543e7fe8e206f831b62
|
389ds/389-ds-base
|
Issue 49761 - Fix CI test suite issues
Description:
CI nightly runs are still broken in suites/plugins/rootdn_plugin_test.py in race conditions on bind operations.
Polling implemented to fix that.
Relates: https://pagure.io/389-ds-base/issue/49761
Author: sgouvern
Review by: spichugi
|
commit 983c373ce8dfd43639d57543e7fe8e206f831b62
Author: Sylvie Gouverneyre <[email protected]>
Date: Wed Mar 11 11:34:31 2020 +0000
Issue 49761 - Fix CI test suite issues
Description:
CI nightly runs are still broken in suites/plugins/rootdn_plugin_test.py in race conditions on bind operations.
Polling implemented to fix that.
Relates: https://pagure.io/389-ds-base/issue/49761
Author: sgouvern
Review by: spichugi
diff --git a/dirsrvtests/tests/suites/plugins/rootdn_plugin_test.py b/dirsrvtests/tests/suites/plugins/rootdn_plugin_test.py
index a053c2961..a56503721 100644
--- a/dirsrvtests/tests/suites/plugins/rootdn_plugin_test.py
+++ b/dirsrvtests/tests/suites/plugins/rootdn_plugin_test.py
@@ -126,8 +126,13 @@ def test_rootdn_access_specific_time(topology_st, rootdn_setup, rootdn_cleanup,
raise Exception ("rootdn-open-time and rootdn-close-time were not updated")
# Bind as Root DN - should fail
- with pytest.raises(ldap.UNWILLING_TO_PERFORM):
- dm.bind()
+ for i in range(0, timeout):
+ try:
+ dm.bind()
+ except ldap.UNWILLING_TO_PERFORM:
+ break
+ else:
+ time.sleep(.5)
# Set config to allow the entire day
@@ -148,7 +153,12 @@ def test_rootdn_access_specific_time(topology_st, rootdn_setup, rootdn_cleanup,
raise Exception ("rootdn-open-time and rootdn-close-time were not updated")
# Bind as Root DN - should succeed
- dm.bind()
+ for i in range(0, timeout):
+ try:
+ dm.bind()
+ break
+ except:
+ time.sleep(.5)
# Cleanup - undo the changes we made so the next test has a clean slate
assert plugin.apply_mods([(ldap.MOD_DELETE, 'rootdn-open-time'),
@@ -208,8 +218,13 @@ def test_rootdn_access_day_of_week(topology_st, rootdn_setup, rootdn_cleanup, ti
raise Exception ("rootdn-days-allowed was not updated")
# Bind as Root DN - should fail
- with pytest.raises(ldap.UNWILLING_TO_PERFORM):
- dm.bind()
+ for i in range(0, timeout):
+ try:
+ dm.bind()
+ except ldap.UNWILLING_TO_PERFORM:
+ break
+ else:
+ time.sleep(.5)
# Set the allow days
plugin.set_days_allowed(allow_days)
@@ -226,8 +241,12 @@ def test_rootdn_access_day_of_week(topology_st, rootdn_setup, rootdn_cleanup, ti
raise Exception ("rootdn-days-allowed was not updated")
# Bind as Root DN - should succeed
- dm.bind()
-
+ for i in range(0, timeout):
+ try:
+ dm.bind()
+ break
+ except:
+ time.sleep(.5)
def test_rootdn_access_denied_ip(topology_st, rootdn_setup, rootdn_cleanup, timeout=5):
"""Test denied IP feature - we can just test denying 127.0.0.1
@@ -263,8 +282,13 @@ def test_rootdn_access_denied_ip(topology_st, rootdn_setup, rootdn_cleanup, time
# Bind as Root DN - should fail
uri = 'ldap://{}:{}'.format('127.0.0.1', topology_st.standalone.port)
- with pytest.raises(ldap.UNWILLING_TO_PERFORM):
- rootdn_bind(topology_st.standalone, uri=uri)
+ for i in range(0, timeout):
+ try:
+ rootdn_bind(topology_st.standalone, uri=uri)
+ except ldap.UNWILLING_TO_PERFORM:
+ break
+ else:
+ time.sleep(.5)
# Change the denied IP so root DN succeeds
plugin.apply_mods([(ldap.MOD_REPLACE, 'rootdn-deny-ip', '255.255.255.255')])
@@ -281,7 +305,12 @@ def test_rootdn_access_denied_ip(topology_st, rootdn_setup, rootdn_cleanup, time
raise Exception ("rootdn-deny-ip was not updated")
# Bind as Root DN - should succeed
- rootdn_bind(topology_st.standalone, uri=uri)
+ for i in range(0, timeout):
+ try:
+ rootdn_bind(topology_st.standalone, uri=uri)
+ break
+ except:
+ time.sleep(.5)
def test_rootdn_access_denied_host(topology_st, rootdn_setup, rootdn_cleanup, timeout=5):
@@ -320,8 +349,13 @@ def test_rootdn_access_denied_host(topology_st, rootdn_setup, rootdn_cleanup, ti
# Bind as Root DN - should fail
uri = 'ldap://{}:{}'.format(localhost, topology_st.standalone.port)
- with pytest.raises(ldap.UNWILLING_TO_PERFORM):
- rootdn_bind(topology_st.standalone, uri=uri)
+ for i in range(0, timeout):
+ try:
+ rootdn_bind(topology_st.standalone, uri=uri)
+ except ldap.UNWILLING_TO_PERFORM:
+ break
+ else:
+ time.sleep(.5)
# Change the denied host so root DN bind succeeds
rand_host = 'i.dont.exist.{}'.format(uuid.uuid4())
@@ -339,8 +373,12 @@ def test_rootdn_access_denied_host(topology_st, rootdn_setup, rootdn_cleanup, ti
raise Exception ("rootdn-deny-host was not updated")
# Bind as Root DN - should succeed
- rootdn_bind(topology_st.standalone, uri=uri)
-
+ for i in range(0, timeout):
+ try:
+ rootdn_bind(topology_st.standalone, uri=uri)
+ break
+ except:
+ time.sleep(.5)
def test_rootdn_access_allowed_ip(topology_st, rootdn_setup, rootdn_cleanup, timeout=5):
"""Test allowed ip feature
@@ -376,9 +414,14 @@ def test_rootdn_access_allowed_ip(topology_st, rootdn_setup, rootdn_cleanup, tim
raise Exception ("rootdn-allow-ip was not updated")
# Bind as Root DN - should fail
- uri = 'ldap://{}:{}'.format('127.0.0.1', topology_st.standalone.port)
- with pytest.raises(ldap.UNWILLING_TO_PERFORM):
- rootdn_bind(topology_st.standalone, uri=uri)
+ uri = 'ldap://{}:{}'.format(localhost, topology_st.standalone.port)
+ for i in range(0, timeout):
+ try:
+ rootdn_bind(topology_st.standalone, uri=uri)
+ except ldap.UNWILLING_TO_PERFORM:
+ break
+ else:
+ time.sleep(.5)
# Allow localhost
plugin.add_allow_ip('127.0.0.1')
@@ -396,8 +439,12 @@ def test_rootdn_access_allowed_ip(topology_st, rootdn_setup, rootdn_cleanup, tim
raise Exception ("rootdn-allow-ip was not updated")
# Bind as Root DN - should succeed
- rootdn_bind(topology_st.standalone, uri=uri)
-
+ for i in range(0, timeout):
+ try:
+ rootdn_bind(topology_st.standalone, uri=uri)
+ break
+ except:
+ time.sleep(.5)
def test_rootdn_access_allowed_host(topology_st, rootdn_setup, rootdn_cleanup, timeout=5):
"""Test allowed host feature
@@ -435,8 +482,13 @@ def test_rootdn_access_allowed_host(topology_st, rootdn_setup, rootdn_cleanup, t
# Bind as Root DN - should fail
uri = 'ldap://{}:{}'.format(localhost, topology_st.standalone.port)
- with pytest.raises(ldap.UNWILLING_TO_PERFORM):
- rootdn_bind(topology_st.standalone, uri=uri)
+ for i in range(0, timeout):
+ try:
+ rootdn_bind(topology_st.standalone, uri=uri)
+ except ldap.UNWILLING_TO_PERFORM:
+ break
+ else:
+ time.sleep(.5)
# Allow localhost
plugin.remove_all_allow_host()
@@ -456,8 +508,12 @@ def test_rootdn_access_allowed_host(topology_st, rootdn_setup, rootdn_cleanup, t
raise Exception ("rootdn-allow-host was not updated")
# Bind as Root DN - should succeed
- rootdn_bind(topology_st.standalone, uri=uri)
-
+ for i in range(0, timeout):
+ try:
+ rootdn_bind(topology_st.standalone, uri=uri)
+ break
+ except:
+ time.sleep(.5)
def test_rootdn_config_validate(topology_st, rootdn_setup, rootdn_cleanup):
"""Test plugin configuration validation
| 0 |
7cd8196f272d1cfacb767e2d4e6b04db325cae5c
|
389ds/389-ds-base
|
Updated man page with the new update options and documentation.
|
commit 7cd8196f272d1cfacb767e2d4e6b04db325cae5c
Author: Rich Megginson <[email protected]>
Date: Wed Sep 30 11:28:39 2009 -0600
Updated man page with the new update options and documentation.
diff --git a/ldap/admin/src/scripts/setup-ds.res.in b/ldap/admin/src/scripts/setup-ds.res.in
index c4be6589b..57e7654b0 100644
--- a/ldap/admin/src/scripts/setup-ds.res.in
+++ b/ldap/admin/src/scripts/setup-ds.res.in
@@ -140,7 +140,7 @@ error_loading_update = Error: not applying update '%s'. Error: %s\n
error_unknown_update = Error: cannot apply update '%s'. Not a recognized update type.\n
error_executing_update = Error: update '%s' returned code '%s': %s\n
error_updating = Error: could not update the directory server.\n
-update_successful = Finished successful update of directory server.\n
+update_successful = Finished successful update of directory server.\nPlease restart your directory servers.\n
update_dialog_first = This program will update the %s Directory Server.\n\nIt is recommended that you have "root" privilege to perform the update.\nTips for using this program:\n - Press "Enter" to choose the default and go to the next screen\n - Type "Control-B" or the word "back" then "Enter" to go back to the previous screen\n - Type "Control-C" to cancel the update\n\n
# %s -> brand
diff --git a/man/man8/setup-ds.pl.8 b/man/man8/setup-ds.pl.8
index 5598df674..e7eccc3e3 100644
--- a/man/man8/setup-ds.pl.8
+++ b/man/man8/setup-ds.pl.8
@@ -54,6 +54,12 @@ Do not delete the temporary .inf file generated by this program
.TP
.B \fB\-\-logfile\fR
Log setup messages to this file \- otherwise, a temp file will be used
+.TP
+.B \fB\-\-update\fR
+Update existing installations - add/update schema files, add/change configuration, add new instance scripts, etc. You must run setup-ds.pl -u after upgrading your packages. You will usually have to restart your servers in order for all of the changes to take effect (e.g. adding/changing plug-in configuration), so schedule this when you can afford a little downtime.
+.TP
+.B \fB\-\-continue\fR
+(Update only) Keep going even if errors occur. You will usually not need to use this option unless your installation is broken and you need to force it to update in order to fix it.
.PP
For all options, you can also use the short name e.g. \fB\-h\fR, \fB\-d\fR, etc. For the \fB\-d\fR argument,
specifying it more than once will increase the debug level e.g. \fB\-ddddd\fR
@@ -72,6 +78,27 @@ or
"slapd.Suffix=dc=example, dc=com"
.PP
Values passed in this manner will override values in an .inf file given with the \fB\-f\fR argument.
+.SH UPDATES
+The update process (setup-ds.pl -u) can work in one of two modes:
+
+\fBOnline\fR: Configuration changes are made to the running directory servers
+using LDAP. The operations must be performed as an administrative
+user. You must provide the name and password, for each instance if
+there is more than one instance of directory server. Some changes (e.g. plug-ins)
+may require a directory server restart to take effect. The update
+script will notify you if a restart is required.
+
+\fBOffline\fR: The changes are made to the server configuration files.
+The servers \fBMUST FIRST BE SHUTDOWN BY YOU\fR. The script will not
+shutdown the servers for you. \fBYou MUST\fR shutdown the servers in order
+to use this mode. A username and password are not required to use
+Offline mode. If the servers are not shutdown, \fBCHANGES WILL BE LOST\fR.
+
+To summarize:
+
+Online \- servers remain running - you must provide admin name and password for each server - servers may need to be restarted
+
+Offline \- servers \fBmust be shutdown\fR - no username or password required
.br
.SH AUTHOR
setup-ds.pl was written by the 389 Project.
| 0 |
436050e25f9814d7cf5e80d07cca29b3b8c7870d
|
389ds/389-ds-base
|
Large stack size for 64-bit builds: checkin lost in AOL/RH move.
|
commit 436050e25f9814d7cf5e80d07cca29b3b8c7870d
Author: David Boreham <[email protected]>
Date: Fri Jan 28 20:23:50 2005 +0000
Large stack size for 64-bit builds: checkin lost in AOL/RH move.
diff --git a/ldap/servers/slapd/csngen.c b/ldap/servers/slapd/csngen.c
index 16fe2afae..601317415 100644
--- a/ldap/servers/slapd/csngen.c
+++ b/ldap/servers/slapd/csngen.c
@@ -581,7 +581,7 @@ _csngen_adjust_local_time (CSNGen *gen, time_t cur_time)
* pointers are allocated 8 bytes but lower 4 bytes are used.
* The value 0 means use the default stacksize.
*/
-#if defined (OSF1)
+#if defined (OSF1) || defined(__LP64__) || defined (_LP64) /* 64-bit architectures need large stacks */
#define DEFAULT_THREAD_STACKSIZE 131072L
#else
#define DEFAULT_THREAD_STACKSIZE 0
| 0 |
47b3843284870848adc15bc4ab603f2cf0e5c7f7
|
389ds/389-ds-base
|
Bug 630094 - (cov#15457) Remove deadcode in import code
The skipit variable is set to zero shortly before we check if it
is 0 in an if condition. This if block can be removed since it
will never be hit. The entry that was being freed in the if block
is already removed earlier in the function if skipit was non-0
prior to resetting skipit to 0.
|
commit 47b3843284870848adc15bc4ab603f2cf0e5c7f7
Author: Nathan Kinder <[email protected]>
Date: Wed Sep 8 15:55:06 2010 -0700
Bug 630094 - (cov#15457) Remove deadcode in import code
The skipit variable is set to zero shortly before we check if it
is 0 in an if condition. This if block can be removed since it
will never be hit. The entry that was being freed in the if block
is already removed earlier in the function if skipit was non-0
prior to resetting skipit to 0.
diff --git a/ldap/servers/slapd/back-ldbm/import-threads.c b/ldap/servers/slapd/back-ldbm/import-threads.c
index 7bdf32f70..1fc269b93 100644
--- a/ldap/servers/slapd/back-ldbm/import-threads.c
+++ b/ldap/servers/slapd/back-ldbm/import-threads.c
@@ -1613,10 +1613,6 @@ upgradedn_producer(void *param)
}
}
upgradedn_free_list(&ud_list);
- if (skipit) {
- slapi_entry_free(e); e = NULL;
- continue;
- }
ep = import_make_backentry(e, temp_id);
if (!ep || !ep->ep_entry) {
| 0 |
94b305200b3b4294d16fdda1e6d6cb41cb3b048f
|
389ds/389-ds-base
|
Bug(s) fixed: 181776
Bug Description: 64bit issues with normalize_path(), make_dn(), and
add_aci_v()
Reviewed by: Nathan and Noriko (Thanks!)
Fix Description: 1) use sizeof(char *) as the elemsize to pass to
slapi_ch_calloc(). 2) create a variable const char *NULLSTR = 0 to pass
as the last argument to the varargs functions.
Platforms tested: RHEL4 64
Flag Day: no
Doc impact: no
|
commit 94b305200b3b4294d16fdda1e6d6cb41cb3b048f
Author: Rich Megginson <[email protected]>
Date: Thu Feb 16 17:39:53 2006 +0000
Bug(s) fixed: 181776
Bug Description: 64bit issues with normalize_path(), make_dn(), and
add_aci_v()
Reviewed by: Nathan and Noriko (Thanks!)
Fix Description: 1) use sizeof(char *) as the elemsize to pass to
slapi_ch_calloc(). 2) create a variable const char *NULLSTR = 0 to pass
as the last argument to the varargs functions.
Platforms tested: RHEL4 64
Flag Day: no
Doc impact: no
diff --git a/ldap/admin/src/cfg_sspt.c b/ldap/admin/src/cfg_sspt.c
index b378cac6b..7cd85f98a 100644
--- a/ldap/admin/src/cfg_sspt.c
+++ b/ldap/admin/src/cfg_sspt.c
@@ -54,6 +54,8 @@
#define TEST_CONFIG /* for testing cn=config40 dummy entry instead of real one */
+char* const NULLSTR = 0;
+
char* const class_top = "top";
char* const class_organization = "organization";
char* const class_organizationalUnit = "organizationalunit";
@@ -754,7 +756,7 @@ create_organizational_unit(LDAP* ld, char* base, char* unit, char *description,
if (!base)
entrydn = strdup(unit);
else
- entrydn = make_dn("%s=%s, %s", name_ou, unit, base, 0);
+ entrydn = make_dn("%s=%s, %s", name_ou, unit, base, NULLSTR);
if (!entry_exists(ld, entrydn))
{
@@ -1062,7 +1064,7 @@ create_configEntry(LDAP* ld)
return -1;
}
- entrydn = make_dn("%s=%s", name_cn, value_config40, 0);
+ entrydn = make_dn("%s=%s", name_cn, value_config40, NULLSTR);
if (!entry_exists(ld, entrydn))
{
LDAPMod* attrs[3];
@@ -1130,7 +1132,7 @@ create_group(LDAP* ld, char* base, char* group)
return -1;
}
- entrydn = make_dn("%s=%s, %s", name_cn, group, base, 0);
+ entrydn = make_dn("%s=%s, %s", name_cn, group, base, NULLSTR);
if (!entry_exists(ld, entrydn))
{
@@ -1379,7 +1381,7 @@ config_configEntry(LDAP* connection, QUERY_VARS* query)
{
/* initial ACIs for o=NetscapeRoot */
- int ret = add_aci_v (connection, value_config40DN, ACI_self_allow, 0);
+ int ret = add_aci_v (connection, value_config40DN, ACI_self_allow, NULLSTR);
return ret;
}
@@ -1418,7 +1420,7 @@ config_suitespot(SLAPD_CONFIG* slapd, QUERY_VARS* query)
/* parent dn of admin uid entry */
if (query->netscaperoot) {
parentDN = make_dn("%s, %s, %s", name_administratorsRDN,
- name_topologyRDN, query->netscaperoot, 0);
+ name_topologyRDN, query->netscaperoot, NULLSTR);
}
if (query->config_admin_uid) {
@@ -1428,12 +1430,12 @@ config_suitespot(SLAPD_CONFIG* slapd, QUERY_VARS* query)
configAdminDN = strdup(query->config_admin_uid);
} else if (parentDN) {
/* create a DN for admid */
- configAdminDN = make_dn(DN_formatUID, query->config_admin_uid, parentDN, 0);
+ configAdminDN = make_dn(DN_formatUID, query->config_admin_uid, parentDN, NULLSTR);
} else {
/* create one from scratch */
configAdminDN = make_dn("%s=%s, %s, %s, %s", name_uid, query->config_admin_uid,
name_administratorsRDN, name_topologyRDN,
- name_netscaperootDN, 0);
+ name_netscaperootDN, NULLSTR);
}
}
@@ -1444,7 +1446,7 @@ config_suitespot(SLAPD_CONFIG* slapd, QUERY_VARS* query)
{
if (configAdminDN && !is_root_user(configAdminDN, query)) {
add_aci_v(connection, query->suffix, ACI_user_allow_2,
- "all", configAdminDN, 0);
+ "all", configAdminDN, NULLSTR);
}
status = create_group(connection, query->suffix, name_localDAGroup);
@@ -1465,13 +1467,13 @@ config_suitespot(SLAPD_CONFIG* slapd, QUERY_VARS* query)
adminGroupDN = make_dn("%s, %s=%s, %s, %s", value_configAdminGroupRDN,
name_ou, value_groupsOU,
name_topologyRDN,
- query->netscaperoot, 0);
+ query->netscaperoot, NULLSTR);
}
if (query->suffix)
{
localDAGroupDN = make_dn("cn=%s, %s", name_localDAGroup,
- query->suffix, 0);
+ query->suffix, NULLSTR);
}
else
{
@@ -1483,20 +1485,20 @@ config_suitespot(SLAPD_CONFIG* slapd, QUERY_VARS* query)
add_aci_v(connection, entryAndAccessList[ii].entryDN,
ACI_config_admin_group_allow,
entryAndAccessList[ii].access,
- adminGroupDN, 0);
+ adminGroupDN, NULLSTR);
}
if (configAdminDN && !is_root_user(configAdminDN, query)) {
add_aci_v(connection, entryAndAccessList[ii].entryDN,
ACI_user_allow_2,
entryAndAccessList[ii].access,
- configAdminDN, 0);
+ configAdminDN, NULLSTR);
}
if (localDAGroupDN)
{
add_aci_v(connection, entryAndAccessList[ii].entryDN,
ACI_local_DA_allow,
entryAndAccessList[ii].access,
- localDAGroupDN, 0);
+ localDAGroupDN, NULLSTR);
}
}
}
@@ -1512,22 +1514,22 @@ config_suitespot(SLAPD_CONFIG* slapd, QUERY_VARS* query)
ACI_config_admin_group_allow_all,
value_configAdminGroupRDN,
name_ou, value_groupsOU, name_topologyRDN,
- query->netscaperoot, 0);
+ query->netscaperoot, NULLSTR);
if (!status)
status = add_aci_v(connection, query->netscaperoot,
ACI_anonymous_allow_with_filter,
- query->netscaperoot, 0);
+ query->netscaperoot, NULLSTR);
if (!status)
status = add_aci_v(connection, query->netscaperoot, ACI_group_expansion,
- query->netscaperoot, 0);
+ query->netscaperoot, NULLSTR);
/* create "topologyOU, netscaperoot" entry and set ACIs */
if (!status)
{
char *dn = make_dn("%s, %s", name_topologyRDN,
- query->netscaperoot, 0);
+ query->netscaperoot, NULLSTR);
status = create_organizational_unit(connection, NULL, dn,
value_topologyDESC,
0, 0, 0);
@@ -1542,7 +1544,7 @@ config_suitespot(SLAPD_CONFIG* slapd, QUERY_VARS* query)
if (!status)
{
char *dn = make_dn("%s=%s, %s, %s", name_ou, value_groupsOU,
- name_topologyRDN, query->netscaperoot, 0);
+ name_topologyRDN, query->netscaperoot, NULLSTR);
status = create_organizational_unit (connection, NULL, dn,
value_groupsDesc, 0, 0, 0);
free(dn);
@@ -1552,7 +1554,7 @@ config_suitespot(SLAPD_CONFIG* slapd, QUERY_VARS* query)
if (!status)
{
char *dn = make_dn("%s, %s, %s", name_administratorsRDN,
- name_topologyRDN, query->netscaperoot, 0);
+ name_topologyRDN, query->netscaperoot, NULLSTR);
status = create_organizational_unit (connection, NULL, dn,
value_administratorsDESC,
0, 0, 0);
@@ -1564,7 +1566,7 @@ config_suitespot(SLAPD_CONFIG* slapd, QUERY_VARS* query)
{
char *dn = make_dn("%s=%s, %s, %s", name_ou, value_groupsOU,
name_topologyRDN,
- query->netscaperoot, 0);
+ query->netscaperoot, NULLSTR);
status = create_group (connection, dn, value_configAdminGroupCN);
free(dn);
}
@@ -1575,7 +1577,7 @@ config_suitespot(SLAPD_CONFIG* slapd, QUERY_VARS* query)
/* group to add the uid to */
char *groupdn = make_dn("%s, %s=%s, %s, %s", value_configAdminGroupRDN,
name_ou, value_groupsOU, name_topologyRDN,
- query->netscaperoot, 0);
+ query->netscaperoot, NULLSTR);
create_ssadmin_user(connection, parentDN,
query->ssAdmID, query->ssAdmPW1);
@@ -1585,7 +1587,7 @@ config_suitespot(SLAPD_CONFIG* slapd, QUERY_VARS* query)
}
admin_domainDN = make_dn("%s=%s, %s", name_ou, query->admin_domain,
- query->netscaperoot, 0);
+ query->netscaperoot, NULLSTR);
if (!status)
status = create_organizational_unit (connection, 0,
diff --git a/ldap/servers/slapd/util.c b/ldap/servers/slapd/util.c
index cb695fc1e..9870e3516 100644
--- a/ldap/servers/slapd/util.c
+++ b/ldap/servers/slapd/util.c
@@ -408,8 +408,8 @@ normalize_path(char *path)
char *dname = slapi_ch_strdup(path);
char *dnamep = dname;
char *bnamep = NULL;
- char **dirs = (char **)slapi_ch_calloc(strlen(path), 1);
- char **rdirs = (char **)slapi_ch_calloc(strlen(path), 1);
+ char **dirs = (char **)slapi_ch_calloc(strlen(path), sizeof(char *));
+ char **rdirs = (char **)slapi_ch_calloc(strlen(path), sizeof(char *));
char **dp = dirs;
char **rdp;
do {
| 0 |
2108b4f63abbce6e58efc25b83993b0c1eccd5a3
|
389ds/389-ds-base
|
Issue 4595 - Paged search lookthroughlimit bug (#4602)
Bug Description: During a paged search with lookthroughlimit enabled,
lookthroughcount is used to keep track of how many entries are
examined. A paged search reads ahead one entry to catch the end of the
search so it doesn't show the prompt when there are no more entries.
lookthroughcount doesn't take read ahead into account when tracking
how many entries have been examined.
Fix Description: Keep lookthroughcount in sync with read ahead by
by decrementing it during read ahead roll back.
Fixes: https://github.com/389ds/389-ds-base/issues/4595
Relates: https://github.com/389ds/389-ds-base/issues/4513
Reviewed by: droideck, mreynolds389, Firstyear, progier389 (Many thanks)
|
commit 2108b4f63abbce6e58efc25b83993b0c1eccd5a3
Author: James Chapman <[email protected]>
Date: Fri Feb 19 16:32:22 2021 +0000
Issue 4595 - Paged search lookthroughlimit bug (#4602)
Bug Description: During a paged search with lookthroughlimit enabled,
lookthroughcount is used to keep track of how many entries are
examined. A paged search reads ahead one entry to catch the end of the
search so it doesn't show the prompt when there are no more entries.
lookthroughcount doesn't take read ahead into account when tracking
how many entries have been examined.
Fix Description: Keep lookthroughcount in sync with read ahead by
by decrementing it during read ahead roll back.
Fixes: https://github.com/389ds/389-ds-base/issues/4595
Relates: https://github.com/389ds/389-ds-base/issues/4513
Reviewed by: droideck, mreynolds389, Firstyear, progier389 (Many thanks)
diff --git a/dirsrvtests/tests/suites/basic/basic_test.py b/dirsrvtests/tests/suites/basic/basic_test.py
index 5a7b75ef1..a206bdb38 100644
--- a/dirsrvtests/tests/suites/basic/basic_test.py
+++ b/dirsrvtests/tests/suites/basic/basic_test.py
@@ -95,6 +95,24 @@ def rootdse_attr(topology_st, request):
return rootdse_attr_name
+def change_conf_attr(topology_st, suffix, attr_name, attr_value):
+ """Change configuration attribute in the given suffix.
+
+ Returns previous attribute value.
+ """
+
+ entry = DSLdapObject(topology_st.standalone, suffix)
+
+ attr_value_bck = entry.get_attr_val_bytes(attr_name)
+ log.info('Set %s to %s. Previous value - %s. Modified suffix - %s.' % (
+ attr_name, attr_value, attr_value_bck, suffix))
+ if attr_value is None:
+ entry.remove_all(attr_name)
+ else:
+ entry.replace(attr_name, attr_value)
+ return attr_value_bck
+
+
def test_basic_ops(topology_st, import_example_ldif):
"""Tests adds, mods, modrdns, and deletes operations
@@ -607,6 +625,73 @@ def test_basic_searches(topology_st, import_example_ldif):
log.info('test_basic_searches: PASSED')
[email protected]('limit,resp',
+ ((('200'), 'PASS'),
+ (('50'), ldap.ADMINLIMIT_EXCEEDED)))
+def test_basic_search_lookthroughlimit(topology_st, limit, resp, import_example_ldif):
+ """
+ Tests normal search with lookthroughlimit set high and low.
+
+ :id: b5119970-6c9f-41b7-9649-de9233226fec
+
+ :setup: Standalone instance, add example.ldif to the database, search filter (uid=*).
+
+ :steps:
+ 1. Import ldif user file.
+ 2. Change lookthroughlimit to 200.
+ 3. Bind to server as low priv user
+ 4. Run search 1 with "high" lookthroughlimit.
+ 5. Change lookthroughlimit to 50.
+ 6. Run search 2 with "low" lookthroughlimit.
+ 8. Delete user from DB.
+ 9. Reset lookthroughlimit to original.
+
+ :expectedresults:
+ 1. First search should complete with no error.
+ 2. Second search should return ldap.ADMINLIMIT_EXCEEDED error.
+ """
+
+ log.info('Running test_basic_search_lookthroughlimit...')
+
+ search_filter = "(uid=*)"
+
+ ltl_orig = change_conf_attr(topology_st, 'cn=config,cn=ldbm database,cn=plugins,cn=config', 'nsslapd-lookthroughlimit', limit)
+
+ try:
+ users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX, rdn=None)
+ user = users.create_test_user()
+ user.replace('userPassword', PASSWORD)
+ except ldap.LDAPError as e:
+ log.fatal('Failed to create test user: error ' + e.args[0]['desc'])
+ assert False
+
+ try:
+ conn = UserAccount(topology_st.standalone, user.dn).bind(PASSWORD)
+ except ldap.LDAPError as e:
+ log.fatal('Failed to bind test user: error ' + e.args[0]['desc'])
+ assert False
+
+ try:
+ if resp == ldap.ADMINLIMIT_EXCEEDED:
+ with pytest.raises(ldap.ADMINLIMIT_EXCEEDED):
+ searchid = conn.search(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, search_filter)
+ rtype, rdata = conn.result(searchid)
+ else:
+ searchid = conn.search(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, search_filter)
+ rtype, rdata = conn.result(searchid)
+ assert(len(rdata) == 151) #151 entries in the imported ldif file using "(uid=*)"
+ except ldap.LDAPError as e:
+ log.fatal('Failed to perform search: error ' + e.args[0]['desc'])
+ assert False
+
+ finally:
+ #Cleanup
+ change_conf_attr(topology_st, 'cn=config,cn=ldbm database,cn=plugins,cn=config', 'nsslapd-lookthroughlimit', ltl_orig)
+ user.delete()
+
+ log.info('test_basic_search_lookthroughlimit: PASSED')
+
+
@pytest.fixture(scope="module")
def add_test_entry(topology_st, request):
# Add test entry
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_search.c b/ldap/servers/slapd/back-ldbm/ldbm_search.c
index 10ede0667..5cc686700 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_search.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_search.c
@@ -1823,6 +1823,7 @@ ldbm_back_prev_search_results(Slapi_PBlock *pb)
sr->sr_entry = NULL;
}
idl_iterator_decrement(&(sr->sr_current));
+ --sr->sr_lookthroughcount;
}
return;
}
| 0 |
1d20c35b27f2cfcb9db42c87b56a6f731b8bb8dd
|
389ds/389-ds-base
|
170816 - Don't consume password changes if modify fails
|
commit 1d20c35b27f2cfcb9db42c87b56a6f731b8bb8dd
Author: Nathan Kinder <[email protected]>
Date: Fri Oct 14 17:58:19 2005 +0000
170816 - Don't consume password changes if modify fails
diff --git a/ldap/synctools/passwordsync/passsync/syncserv.cpp b/ldap/synctools/passwordsync/passsync/syncserv.cpp
index 247184ddd..bac518e8e 100644
--- a/ldap/synctools/passwordsync/passsync/syncserv.cpp
+++ b/ldap/synctools/passwordsync/passsync/syncserv.cpp
@@ -345,6 +345,11 @@ int PassSyncService::SyncPasswords()
// log modify failure.
timeStamp(&outLog);
outLog << "Modify password failed for remote entry: " << dn << endl;
+ // defer this change for later
+ timeStamp(&outLog);
+ outLog << "Deferring password change for " << currentPassInfo->username << endl;
+ currentPassInfo++;
+ continue;
}
else
{
| 0 |
c1912cdcac8319e2fe0f98f765aa935e6a8ff297
|
389ds/389-ds-base
|
Ticket 48215 - update dbverify usage in main.c
Description: Need to update dbverify usage in main.c
https://fedorahosted.org/389/ticket/48215
|
commit c1912cdcac8319e2fe0f98f765aa935e6a8ff297
Author: Mark Reynolds <[email protected]>
Date: Thu Aug 6 11:27:40 2015 -0400
Ticket 48215 - update dbverify usage in main.c
Description: Need to update dbverify usage in main.c
https://fedorahosted.org/389/ticket/48215
diff --git a/ldap/servers/slapd/main.c b/ldap/servers/slapd/main.c
index 922de97b9..4f9fbfe8e 100644
--- a/ldap/servers/slapd/main.c
+++ b/ldap/servers/slapd/main.c
@@ -398,7 +398,7 @@ usage( char *name, char *extraname )
usagestr = "usage: %s %s%s-D configdir [-d debuglevel] [-N] -n backend-instance-name -a fullpath-backend-instance-dir-full\n";
break;
case SLAPD_EXEMODE_DBVERIFY:
- usagestr = "usage: %s %s%s-D configdir [-d debuglevel] [-n backend-instance-name]\n";
+ usagestr = "usage: %s %s%s-D configdir [-d debuglevel] [-n backend-instance-name] [-a db-directory]\n";
break;
default: /* SLAPD_EXEMODE_SLAPD */
| 0 |
5cc301fcd6cf20f179e77d27f03570fd8c7b5a43
|
389ds/389-ds-base
|
Ticket 48982 - When plugin doesn't enable, actually log the path it used
Bug Description: When trying to debug plugin enabling, actually return the
path we tried to enable.
Fix Description: Add path to the error result. Because this is only possible to
cn=Directory Manager it's not a risk.
https://fedorahosted.org/389/ticket/48982
Author: wibrown
Review by: One line, trivial fix
|
commit 5cc301fcd6cf20f179e77d27f03570fd8c7b5a43
Author: William Brown <[email protected]>
Date: Thu Oct 6 19:34:26 2016 +1000
Ticket 48982 - When plugin doesn't enable, actually log the path it used
Bug Description: When trying to debug plugin enabling, actually return the
path we tried to enable.
Fix Description: Add path to the error result. Because this is only possible to
cn=Directory Manager it's not a risk.
https://fedorahosted.org/389/ticket/48982
Author: wibrown
Review by: One line, trivial fix
diff --git a/ldap/servers/slapd/fedse.c b/ldap/servers/slapd/fedse.c
index 09f292a68..9c8869e65 100644
--- a/ldap/servers/slapd/fedse.c
+++ b/ldap/servers/slapd/fedse.c
@@ -1965,7 +1965,7 @@ check_plugin_path(Slapi_PBlock *pb,
if (res) {
if ((handle = dlopen(res, RTLD_NOW)) == NULL) {
*returncode = LDAP_UNWILLING_TO_PERFORM;
- PR_snprintf(returntext, SLAPI_DSE_RETURNTEXT_SIZE,"Invalid plugin path - failed to open library");
+ PR_snprintf(returntext, SLAPI_DSE_RETURNTEXT_SIZE,"Invalid plugin path %s - failed to open library", res);
rc = SLAPI_DSE_CALLBACK_ERROR;
} else {
dlclose(handle);
| 0 |
f483225b19d9498e37a5a4a8457b6b1a39f45821
|
389ds/389-ds-base
|
Issue 6743 - CLI - dsidm add option to list DN's
Description: Add an option to list the full DN instead of the RDN
Relates: https://github.com/389ds/389-ds-base/issues/6734
Reviewed by: spichugi(Thanks!)
|
commit f483225b19d9498e37a5a4a8457b6b1a39f45821
Author: Mark Reynolds <[email protected]>
Date: Tue Apr 8 14:12:36 2025 -0400
Issue 6743 - CLI - dsidm add option to list DN's
Description: Add an option to list the full DN instead of the RDN
Relates: https://github.com/389ds/389-ds-base/issues/6734
Reviewed by: spichugi(Thanks!)
diff --git a/dirsrvtests/tests/suites/clu/dsidm_role_test.py b/dirsrvtests/tests/suites/clu/dsidm_role_test.py
index 5a7b3b9f5..65ef2a21f 100644
--- a/dirsrvtests/tests/suites/clu/dsidm_role_test.py
+++ b/dirsrvtests/tests/suites/clu/dsidm_role_test.py
@@ -19,7 +19,7 @@ from lib389.cli_idm.role import (
)
from lib389.topologies import topology_st
from lib389.cli_base import FakeArgs
-from lib389.utils import ds_is_older
+from lib389.utils import ds_is_older, is_a_dn
from lib389.idm.role import Roles, ManagedRoles, FilteredRoles, NestedRoles
from . import check_value_in_log_and_reset, check_value_in_log
@@ -266,7 +266,6 @@ def test_dsidm_role_delete(topology_st, create_test_managed_role):
assert not test_managed_role.exists()
[email protected](reason="DS6488")
@pytest.mark.skipif(ds_is_older("1.4.2"), reason="Not implemented")
def test_dsidm_role_list(topology_st, create_test_managed_role):
""" Test dsidm role list option
@@ -277,10 +276,12 @@ def test_dsidm_role_list(topology_st, create_test_managed_role):
1. Run dsidm role list option without json
2. Check the output content is correct
3. Run dsidm role list option with json
- 4. Check the output content is correct
- 5. Delete the role
- 6. CHeck the role is not in the list with json
- 7. Check the role is not in the list without json
+ 4. Test "full_dn" option with list
+ 5. Check the output content is correct
+ 6. Delete the role
+ 7. Check the role is not in the list with json
+ 8. Check the role is not in the list without json
+
:expectedresults:
1. Success
2. Success
@@ -289,11 +290,13 @@ def test_dsidm_role_list(topology_st, create_test_managed_role):
5. Success
6. Success
7. Success
+ 8. Success
"""
standalone = topology_st.standalone
args = FakeArgs()
args.json = False
+ args.full_dn = False
json_list = ['type',
'list',
'items']
@@ -310,13 +313,25 @@ def test_dsidm_role_list(topology_st, create_test_managed_role):
list(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
check_value_in_log_and_reset(topology_st, content_list=json_list, check_value=managed_role_name)
+ log.info('Test full_dn option with list')
+ args.full_dn = True
+ list(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
+ result = topology_st.logcap.get_raw_outputs()
+ json_result = json.loads(result[0])
+ assert is_a_dn(json_result['items'][0])
+ args.full_dn = False
+
log.info('Delete the role')
- roles = Roles(standalone, DEFAULT_SUFFIX)
+ roles = ManagedRoles(standalone, DEFAULT_SUFFIX)
test_role = roles.get(managed_role_name)
test_role.delete()
log.info('Test empty dsidm role list with json')
+ topology_st.logcap.flush()
list(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args)
+ result = topology_st.logcap.get_raw_outputs()
+ json_result = json.loads(result[0])
+ assert len(json_result['items']) == 0
check_value_in_log_and_reset(topology_st, content_list=json_list, check_value_not=managed_role_name)
log.info('Test empty dsidm role list without json')
diff --git a/src/lib389/lib389/_mapped_object.py b/src/lib389/lib389/_mapped_object.py
index 7d34ceaee..1404c04f4 100644
--- a/src/lib389/lib389/_mapped_object.py
+++ b/src/lib389/lib389/_mapped_object.py
@@ -1168,17 +1168,22 @@ class DSLdapObjects(DSLogging, DSLints):
# functions with very little work on the behalf of the overloader
return self._childobject(instance=self._instance, dn=dn)
- def list(self, paged_search=None, paged_critical=True):
- """Get a list of children entries (DSLdapObject, Replica, etc.) using a base DN
- and objectClasses of our object (DSLdapObjects, Replicas, etc.)
-
- :param paged_search: None for no paged search, or an int of page size to use.
+ def list(self, paged_search=None, paged_critical=True, full_dn=False):
+ """Get a list of children entries (DSLdapObject, Replica, etc.) using
+ a base DN and objectClasses of our object (DSLdapObjects, Replicas,
+ etc.)
+
+ :param paged_search: None for no paged search, or an int of page size
+ to use.
+ :param paged_critical: pages search is critical
+ :param full_dn: Return a list of DN's instead of objects
:returns: A list of children entries
"""
# Filter based on the objectclasses and the basedn
insts = None
- # This will yield and & filter for objectClass with as many terms as needed.
+ # This will yield an & filter for objectClass with as many terms as
+ # needed.
filterstr = self._get_objectclass_filter()
self._log.debug('list filter = %s' % filterstr)
@@ -1188,7 +1193,9 @@ class DSLdapObjects(DSLogging, DSLints):
results = []
pages = 0
pctrls = []
- req_pr_ctrl = SimplePagedResultsControl(paged_critical, size=paged_search, cookie='')
+ req_pr_ctrl = SimplePagedResultsControl(paged_critical,
+ size=paged_search,
+ cookie='')
if self._server_controls is not None:
controls = [req_pr_ctrl] + self._server_controls
else:
@@ -1204,12 +1211,12 @@ class DSLdapObjects(DSLogging, DSLints):
escapehatch='i am sure'
)
self._log.info('Getting page %d' % (pages,))
- rtype, rdata, rmsgid, rctrls = self._instance.result3(msgid, escapehatch='i am sure')
+ rtype, rdata, rmsgid, rctrls = self._instance.result3(msgid,
+ escapehatch='i am sure')
results.extend(rdata)
pages += 1
self._log.debug("%s" % rctrls)
- pctrls = [ c for c in rctrls
- if c.controlType == SimplePagedResultsControl.controlType]
+ pctrls = [c for c in rctrls if c.controlType == SimplePagedResultsControl.controlType]
if pctrls and pctrls[0].cookie:
req_pr_ctrl.cookie = pctrls[0].cookie
if self._server_controls is not None:
@@ -1218,26 +1225,32 @@ class DSLdapObjects(DSLogging, DSLints):
controls = [req_pr_ctrl]
else:
break
- #End while
+ # End while
# Result3 doesn't map through Entry, so we have to do it manually.
results = [Entry(r) for r in results]
- insts = [self._entry_to_instance(dn=r.dn, entry=r) for r in results]
+ if full_dn:
+ insts = [r.dn for r in results]
+ else:
+ insts = [self._entry_to_instance(dn=r.dn, entry=r) for r in results]
# End paged search
else:
# If not paged
try:
results = _search_ext_s(self._instance,
- base=self._basedn,
- scope=self._scope,
- filterstr=filterstr,
- attrlist=self._list_attrlist,
- serverctrls=self._server_controls, clientctrls=self._client_controls,
- escapehatch='i am sure'
- )
- # def __init__(self, instance, dn=None):
- insts = [self._entry_to_instance(dn=r.dn, entry=r) for r in results]
+ base=self._basedn,
+ scope=self._scope,
+ filterstr=filterstr,
+ attrlist=self._list_attrlist,
+ serverctrls=self._server_controls,
+ clientctrls=self._client_controls,
+ escapehatch='i am sure')
+ if full_dn:
+ insts = [r.dn for r in results]
+ else:
+ insts = [self._entry_to_instance(dn=r.dn, entry=r) for r in results]
except ldap.NO_SUCH_OBJECT:
- # There are no objects to select from, se we return an empty array
+ # There are no objects to select from, se we return an empty
+ # array
insts = []
return insts
@@ -1287,7 +1300,9 @@ class DSLdapObjects(DSLogging, DSLints):
if len(results) == 0:
raise ldap.NO_SUCH_OBJECT(f"No object exists given the filter criteria: {criteria} {search_filter}")
if len(results) > 1:
- raise ldap.UNWILLING_TO_PERFORM(f"Too many objects matched selection criteria: {criteria} {search_filter}")
+ entry_dn = [e.dn for e in results]
+ raise ldap.UNWILLING_TO_PERFORM(f"Too many objects matched selection criteria: {criteria} {search_filter}"
+ f" - Please use 'get-by-dn' to specify which entry to get:\n {'\n '.join(entry_dn)}")
if json:
return self._entry_to_instance(results[0].dn, results[0]).get_all_attrs_json()
else:
diff --git a/src/lib389/lib389/cli_idm/__init__.py b/src/lib389/lib389/cli_idm/__init__.py
index bdd2d56f5..83be03c1b 100644
--- a/src/lib389/lib389/cli_idm/__init__.py
+++ b/src/lib389/lib389/cli_idm/__init__.py
@@ -107,7 +107,10 @@ def _warn(data, msg=None):
def _generic_list(inst, basedn, log, manager_class, args=None):
mc = manager_class(inst, basedn)
- ol = mc.list()
+ full_dn = False
+ if hasattr(args, 'full_dn') and getattr(args, 'full_dn') is not None:
+ full_dn = args.full_dn
+ ol = mc.list(full_dn=full_dn)
if len(ol) == 0:
if args and args.json:
log.info(json.dumps({"type": "list", "items": []}, indent=4))
@@ -118,7 +121,10 @@ def _generic_list(inst, basedn, log, manager_class, args=None):
if args and args.json:
json_result = {"type": "list", "items": []}
for o in ol:
- o_str = o.get_rdn_from_dn(o.dn)
+ if full_dn:
+ o_str = o # Already a string
+ else:
+ o_str = o.get_rdn_from_dn(o.dn)
if args and args.json:
json_result['items'].append(o_str)
else:
diff --git a/src/lib389/lib389/cli_idm/account.py b/src/lib389/lib389/cli_idm/account.py
index 1b2bf78e3..8b6f99549 100644
--- a/src/lib389/lib389/cli_idm/account.py
+++ b/src/lib389/lib389/cli_idm/account.py
@@ -233,14 +233,17 @@ like modify, locking and unlocking. To create an account, see "user" subcommand
subcommands = account_parser.add_subparsers(help='action')
- list_parser = subcommands.add_parser('list', help='list accounts that could login to the directory', formatter_class=CustomHelpFormatter)
+ list_parser = subcommands.add_parser('list',
+ help='list accounts that could login to the directory (returns the full DN of the entry)',
+ formatter_class=CustomHelpFormatter)
list_parser.set_defaults(func=list)
get_dn_parser = subcommands.add_parser('get-by-dn', help='get-by-dn <dn>', formatter_class=CustomHelpFormatter)
get_dn_parser.set_defaults(func=get_dn)
get_dn_parser.add_argument('dn', nargs='?', help='The dn to get and display')
- modify_dn_parser = subcommands.add_parser('modify-by-dn', help='modify-by-dn <dn> <add|delete|replace>:<attribute>:<value> ...', formatter_class=CustomHelpFormatter)
+ modify_dn_parser = subcommands.add_parser('modify-by-dn', help='modify-by-dn <dn> <add|delete|replace>:<attribute>:<value> ...',
+ formatter_class=CustomHelpFormatter)
modify_dn_parser.set_defaults(func=modify)
modify_dn_parser.add_argument('dn', nargs=1, help='The dn to get and display')
modify_dn_parser.add_argument('changes', nargs='+', help="A list of changes to apply in format: <add|delete|replace>:<attribute>:<value>")
@@ -249,7 +252,8 @@ like modify, locking and unlocking. To create an account, see "user" subcommand
rename_dn_parser.set_defaults(func=rename)
rename_dn_parser.add_argument('dn', help='The dn to rename')
rename_dn_parser.add_argument('new_dn', help='A new role dn')
- rename_dn_parser.add_argument('--keep-old-rdn', action='store_true', help="Specify whether the old RDN (i.e. 'cn: old_role') should be kept as an attribute of the entry or not")
+ rename_dn_parser.add_argument('--keep-old-rdn', action='store_true',
+ help="Specify whether the old RDN (i.e. 'cn: old_role') should be kept as an attribute of the entry or not")
delete_parser = subcommands.add_parser('delete', help='deletes the account', formatter_class=CustomHelpFormatter)
delete_parser.set_defaults(func=delete)
@@ -278,18 +282,23 @@ like modify, locking and unlocking. To create an account, see "user" subcommand
status_parser.add_argument('-o', '--become-inactive-on',
help="Only display entries that will become inactive before specified date (in a format 2007-04-25T14:30)")
- reset_pw_parser = subcommands.add_parser('reset_password', help='Reset the password of an account. This should be performed by a directory admin.', formatter_class=CustomHelpFormatter)
+ reset_pw_parser = subcommands.add_parser('reset_password', help='Reset the password of an account. This should be performed by a directory admin.',
+ formatter_class=CustomHelpFormatter)
reset_pw_parser.set_defaults(func=reset_password)
reset_pw_parser.add_argument('dn', nargs='?', help='The dn to reset the password for')
reset_pw_parser.add_argument('new_password', nargs='?', help='The new password to set')
- change_pw_parser = subcommands.add_parser('change_password', help='Change the password of an account. This can be performed by any user (with correct rights)', formatter_class=CustomHelpFormatter)
+ change_pw_parser = subcommands.add_parser('change_password',
+ help='Change the password of an account. This can be performed by any user (with correct rights)',
+ formatter_class=CustomHelpFormatter)
change_pw_parser.set_defaults(func=change_password)
change_pw_parser.add_argument('dn', nargs='?', help='The dn to change the password for')
change_pw_parser.add_argument('new_password', nargs='?', help='The new password to set')
change_pw_parser.add_argument('current_password', nargs='?', help='The accounts current password')
- bulk_update_parser = subcommands.add_parser('bulk_update', help='Perform a common operation to a set of entries', formatter_class=CustomHelpFormatter)
+ bulk_update_parser = subcommands.add_parser('bulk_update',
+ help='Perform a common operation to a set of entries',
+ formatter_class=CustomHelpFormatter)
bulk_update_parser.set_defaults(func=bulk_update)
bulk_update_parser.add_argument('basedn', help="Search base for finding entries, only the children of this DN are processed")
bulk_update_parser.add_argument('-f', '--filter', help="Search filter for finding entries, default is '(objectclass=*)'")
diff --git a/src/lib389/lib389/cli_idm/group.py b/src/lib389/lib389/cli_idm/group.py
index c16a711bd..ffd8d2372 100644
--- a/src/lib389/lib389/cli_idm/group.py
+++ b/src/lib389/lib389/cli_idm/group.py
@@ -114,6 +114,8 @@ def create_parser(subparsers):
list_parser = subcommands.add_parser('list', help='list', formatter_class=CustomHelpFormatter)
list_parser.set_defaults(func=list)
+ list_parser.add_argument('--full-dn', action='store_true',
+ help="Return the full DN of the entry instead of the RDN value")
get_parser = subcommands.add_parser('get', help='get', formatter_class=CustomHelpFormatter)
get_parser.set_defaults(func=get)
diff --git a/src/lib389/lib389/cli_idm/organizationalunit.py b/src/lib389/lib389/cli_idm/organizationalunit.py
index 12b5fe8c0..d823cb066 100644
--- a/src/lib389/lib389/cli_idm/organizationalunit.py
+++ b/src/lib389/lib389/cli_idm/organizationalunit.py
@@ -62,6 +62,8 @@ def create_parser(subparsers):
list_parser = subcommands.add_parser('list', help='list', formatter_class=CustomHelpFormatter)
list_parser.set_defaults(func=list)
+ list_parser.add_argument('--full-dn', action='store_true',
+ help="Return the full DN of the entry instead of the RDN value")
get_parser = subcommands.add_parser('get', help='get', formatter_class=CustomHelpFormatter)
get_parser.set_defaults(func=get)
diff --git a/src/lib389/lib389/cli_idm/posixgroup.py b/src/lib389/lib389/cli_idm/posixgroup.py
index 6318d9b00..15d31a264 100644
--- a/src/lib389/lib389/cli_idm/posixgroup.py
+++ b/src/lib389/lib389/cli_idm/posixgroup.py
@@ -72,6 +72,8 @@ def create_parser(subparsers):
list_parser = subcommands.add_parser('list', help='list', formatter_class=CustomHelpFormatter)
list_parser.set_defaults(func=list)
+ list_parser.add_argument('--full-dn', action='store_true',
+ help="Return the full DN of the entry instead of the RDN value")
get_parser = subcommands.add_parser('get', help='get', formatter_class=CustomHelpFormatter)
get_parser.set_defaults(func=get)
diff --git a/src/lib389/lib389/cli_idm/role.py b/src/lib389/lib389/cli_idm/role.py
index b14ee9d72..e0a7f09c6 100644
--- a/src/lib389/lib389/cli_idm/role.py
+++ b/src/lib389/lib389/cli_idm/role.py
@@ -150,6 +150,8 @@ def create_parser(subparsers):
list_parser = subcommands.add_parser('list', help='list roles that could login to the directory', formatter_class=CustomHelpFormatter)
list_parser.set_defaults(func=list)
+ list_parser.add_argument('--full-dn', action='store_true',
+ help="Return the full DN of the entry instead of the RDN value")
get_parser = subcommands.add_parser('get', help='get', formatter_class=CustomHelpFormatter)
get_parser.set_defaults(func=get)
diff --git a/src/lib389/lib389/cli_idm/service.py b/src/lib389/lib389/cli_idm/service.py
index 2429c88ca..319379b6a 100644
--- a/src/lib389/lib389/cli_idm/service.py
+++ b/src/lib389/lib389/cli_idm/service.py
@@ -65,6 +65,8 @@ def create_parser(subparsers):
list_parser = subcommands.add_parser('list', help='list', formatter_class=CustomHelpFormatter)
list_parser.set_defaults(func=list)
+ list_parser.add_argument('--full-dn', action='store_true',
+ help="Return the full DN of the entry instead of the RDN value")
get_parser = subcommands.add_parser('get', help='get', formatter_class=CustomHelpFormatter)
get_parser.set_defaults(func=get)
diff --git a/src/lib389/lib389/cli_idm/uniquegroup.py b/src/lib389/lib389/cli_idm/uniquegroup.py
index 205169466..1cce59c4a 100644
--- a/src/lib389/lib389/cli_idm/uniquegroup.py
+++ b/src/lib389/lib389/cli_idm/uniquegroup.py
@@ -113,6 +113,8 @@ def create_parser(subparsers):
list_parser = subcommands.add_parser('list', help='list', formatter_class=CustomHelpFormatter)
list_parser.set_defaults(func=list)
+ list_parser.add_argument('--full-dn', action='store_true',
+ help="Return the full DN of the entry instead of the RDN value")
get_parser = subcommands.add_parser('get', help='get', formatter_class=CustomHelpFormatter)
get_parser.set_defaults(func=get)
diff --git a/src/lib389/lib389/cli_idm/user.py b/src/lib389/lib389/cli_idm/user.py
index a29d8a479..e075ff2de 100644
--- a/src/lib389/lib389/cli_idm/user.py
+++ b/src/lib389/lib389/cli_idm/user.py
@@ -73,6 +73,8 @@ def create_parser(subparsers):
list_parser = subcommands.add_parser('list', help='list', formatter_class=CustomHelpFormatter)
list_parser.set_defaults(func=list)
+ list_parser.add_argument('--full-dn', action='store_true',
+ help="Return the full DN of the entry instead of the RDN value")
get_parser = subcommands.add_parser('get', help='get', formatter_class=CustomHelpFormatter)
get_parser.set_defaults(func=get)
diff --git a/src/lib389/lib389/idm/role.py b/src/lib389/lib389/idm/role.py
index 410fc523d..faa948856 100644
--- a/src/lib389/lib389/idm/role.py
+++ b/src/lib389/lib389/idm/role.py
@@ -54,6 +54,7 @@ class Role(DSLdapObject):
'LDAPsubentry',
'nsRoleDefinition',
]
+ self._protected = False
def _format_status_message(self, message, role_dn=None):
return {"state": message, "role_dn": role_dn}
| 0 |
7dfe8172890ed6a2d4ae1fa1b48b9bf3fdcf489c
|
389ds/389-ds-base
|
Bug 505722 - Allow ntGroup to have mail attribute present
The ntGroup objectclass currently doesn't allow the mail attribute
to be present. This causes groups synched from AD with a mail
attribute set to receive a schema violation.
This patch changes the definition of the ntGroup objectclass to
allow the mail attribute.
|
commit 7dfe8172890ed6a2d4ae1fa1b48b9bf3fdcf489c
Author: Nathan Kinder <[email protected]>
Date: Thu Jan 13 14:25:59 2011 -0800
Bug 505722 - Allow ntGroup to have mail attribute present
The ntGroup objectclass currently doesn't allow the mail attribute
to be present. This causes groups synched from AD with a mail
attribute set to receive a schema violation.
This patch changes the definition of the ntGroup objectclass to
allow the mail attribute.
diff --git a/ldap/schema/50ns-directory.ldif b/ldap/schema/50ns-directory.ldif
index 294a0a824..1c7ae35ed 100644
--- a/ldap/schema/50ns-directory.ldif
+++ b/ldap/schema/50ns-directory.ldif
@@ -114,7 +114,7 @@ attributeTypes: ( 2.16.840.1.113730.3.1.2084 NAME 'nsSymmetricKey' DESC 'A symme
objectClasses: ( 2.16.840.1.113730.3.2.23 NAME 'netscapeDirectoryServer' DESC 'Netscape defined objectclass' SUP top MUST ( objectclass ) X-ORIGIN 'Netscape Directory Server' )
objectClasses: ( nsDirectoryServer-oid NAME 'nsDirectoryServer' DESC 'Netscape defined objectclass' SUP top MUST ( objectclass $ nsServerID ) MAY ( serverHostName $ nsServerPort $ nsSecureServerPort $ nsBindPassword $ nsBindDN $ nsBaseDN ) X-ORIGIN 'Netscape Directory Server' )
objectClasses: ( 2.16.840.1.113730.3.2.8 NAME 'ntUser' DESC 'Netscape defined objectclass' SUP top MUST ( ntUserDomainId ) MAY ( description $ l $ ou $ seeAlso $ ntUserPriv $ ntUserHomeDir $ ntUserComment $ ntUserFlags $ ntUserScriptPath $ ntUserAuthFlags $ ntUserUsrComment $ ntUserParms $ ntUserWorkstations $ ntUserLastLogon $ ntUserLastLogoff $ ntUserAcctExpires $ ntUserMaxStorage $ ntUserUnitsPerWeek $ ntUserLogonHours $ ntUserBadPwCount $ ntUserNumLogons $ ntUserLogonServer $ ntUserCountryCode $ ntUserCodePage $ ntUserUniqueId $ ntUserPrimaryGroupId $ ntUserProfile $ ntUserHomeDirDrive $ ntUserPasswordExpired $ ntUserCreateNewAccount $ ntUserDeleteAccount $ ntUniqueId) X-ORIGIN 'Netscape NT Synchronization' )
-objectClasses: ( 2.16.840.1.113730.3.2.9 NAME 'ntGroup' DESC 'Netscape defined objectclass' SUP top MUST ( ntUserDomainId ) MAY ( description $ l $ ou $ seeAlso $ ntGroupId $ ntGroupAttributes $ ntGroupCreateNewGroup $ ntGroupDeleteGroup $ ntGroupType $ ntUniqueId) X-ORIGIN 'Netscape NT Synchronization' )
+objectClasses: ( 2.16.840.1.113730.3.2.9 NAME 'ntGroup' DESC 'Netscape defined objectclass' SUP top MUST ( ntUserDomainId ) MAY ( description $ l $ ou $ seeAlso $ ntGroupId $ ntGroupAttributes $ ntGroupCreateNewGroup $ ntGroupDeleteGroup $ ntGroupType $ ntUniqueId $ mail ) X-ORIGIN 'Netscape NT Synchronization' )
objectClasses: ( 2.16.840.1.113730.3.2.82 NAME 'nsChangelog4Config' DESC 'Netscape defined objectclass' SUP top MAY ( cn ) X-ORIGIN 'Netscape Directory Server' )
objectClasses: ( 2.16.840.1.113730.3.2.114 NAME 'nsConsumer4Config' DESC 'Netscape defined objectclass' SUP top MAY ( cn ) X-ORIGIN 'Netscape Directory Server' )
objectClasses: ( 2.16.840.1.113730.3.2.36 NAME 'LDAPReplica' DESC 'Netscape defined objectclass' SUP top MUST ( cn ) MAY ( description $ l $ ou $ seeAlso $ replicaRoot $ replicaHost $ replicaPort $ replicaBindDn $ replicaCredentials $ replicaBindMethod $ replicaUseSSL $ replicaUpdateSchedule $ replicaUpdateReplayed $ replicaUpdateFailedAt $ replicaBeginORC $ replicaNickname $ replicaEntryFilter $ replicatedAttributeList $ replicaCFUpdated $ replicaAbandonedChanges $ replicaLastRelevantChange ) X-ORIGIN 'Netscape Directory Server' )
| 0 |
32edbde0c2895ca31b3717cbb4abbd28b66b545b
|
389ds/389-ds-base
|
Ticket #48234 - "matching rules" in ACI's "bind rules not fully evaluated
Description: Extensible filter was added to slapi_str2filter. Thus, aci
containing such a filter was ignored.
aci: (targetattr = "telephoneNumber") (target = "ldap:///dc=example,dc=com")
(version 3.0;acl "admin-tel-matching-rule-outest";deny (all)(userdn = "ldap
:///dc=example,dc=com??sub?(&(cn=admin)(ou:dn:=outest))");)
https://fedorahosted.org/389/ticket/48234
Reviewed by [email protected] (Thank you, William!!)
|
commit 32edbde0c2895ca31b3717cbb4abbd28b66b545b
Author: Noriko Hosoi <[email protected]>
Date: Fri Jun 10 17:27:50 2016 -0700
Ticket #48234 - "matching rules" in ACI's "bind rules not fully evaluated
Description: Extensible filter was added to slapi_str2filter. Thus, aci
containing such a filter was ignored.
aci: (targetattr = "telephoneNumber") (target = "ldap:///dc=example,dc=com")
(version 3.0;acl "admin-tel-matching-rule-outest";deny (all)(userdn = "ldap
:///dc=example,dc=com??sub?(&(cn=admin)(ou:dn:=outest))");)
https://fedorahosted.org/389/ticket/48234
Reviewed by [email protected] (Thank you, William!!)
diff --git a/ldap/servers/slapd/filter.h b/ldap/servers/slapd/filter.h
index 83e9bcc9d..e5a7e441d 100644
--- a/ldap/servers/slapd/filter.h
+++ b/ldap/servers/slapd/filter.h
@@ -24,10 +24,14 @@ typedef int (*mrf_plugin_fn) (Slapi_PBlock*);
#define MRF_ANY_TYPE 1
#define MRF_ANY_VALUE 2
+/*
+ * To adjust the other structures in struct slapi_filter,
+ * the first field must be type and the second must be value.
+ */
typedef struct mr_filter_t {
- char* mrf_oid;
char* mrf_type;
struct berval mrf_value;
+ char* mrf_oid;
char mrf_dnAttrs;
mrFilterMatchFn mrf_match;
mrf_plugin_fn mrf_index;
diff --git a/ldap/servers/slapd/str2filter.c b/ldap/servers/slapd/str2filter.c
index ad696db02..1a4a6ec80 100644
--- a/ldap/servers/slapd/str2filter.c
+++ b/ldap/servers/slapd/str2filter.c
@@ -45,32 +45,28 @@ slapi_str2filter( char *str )
str++;
switch ( *str ) {
case '&':
- LDAPDebug( LDAP_DEBUG_FILTER, "slapi_str2filter: AND\n",
- 0, 0, 0 );
+ LDAPDebug0Args(LDAP_DEBUG_FILTER, "slapi_str2filter: AND\n");
str++;
f = str2list( str, LDAP_FILTER_AND );
break;
case '|':
- LDAPDebug( LDAP_DEBUG_FILTER, "put_filter: OR\n",
- 0, 0, 0 );
+ LDAPDebug0Args(LDAP_DEBUG_FILTER, "put_filter: OR\n");
str++;
f = str2list( str, LDAP_FILTER_OR );
break;
case '!':
- LDAPDebug( LDAP_DEBUG_FILTER, "put_filter: NOT\n",
- 0, 0, 0 );
+ LDAPDebug0Args(LDAP_DEBUG_FILTER, "put_filter: NOT\n");
str++;
f = str2list( str, LDAP_FILTER_NOT );
break;
default:
- LDAPDebug( LDAP_DEBUG_FILTER, "slapi_str2filter: simple\n",
- 0, 0, 0 );
+ LDAPDebug0Args(LDAP_DEBUG_FILTER, "slapi_str2filter: simple\n");
f = str2simple( str , 1 /* unescape_filter */);
break;
@@ -79,8 +75,7 @@ slapi_str2filter( char *str )
break;
default: /* assume it's a simple type=value filter */
- LDAPDebug( LDAP_DEBUG_FILTER, "slapi_str2filter: default\n", 0, 0,
- 0 );
+ LDAPDebug0Args(LDAP_DEBUG_FILTER, "slapi_str2filter: default\n");
f = str2simple( str , 1 /* unescape_filter */);
break;
@@ -209,7 +204,102 @@ filt_unescape_str(const char *instr, char *outstr, size_t outsize, size_t* outle
return 1; /* ok */
}
-
+/*
+ * Return value: 0 -- success
+ * : 1 -- failure
+ */
+static int
+_parse_ext_filter(char *str, char *p0, char **type, char **oid, char *dnAttrs)
+{
+ char *p1 = NULL;
+ char *p2 = NULL;
+
+ if (!type || !oid || !dnAttrs) {
+ return 1;
+ }
+ *type = NULL;
+ *oid = NULL;
+ *dnAttrs = '\0';
+ /*
+ * RFC 4515 examples
+ * 1 - (cn:caseExactMatch:=Fred Flintstone)
+ * 2 - (cn:=Betty Rubble)
+ * 3 - (sn:dn:2.4.6.8.10:=Barney Rubble)
+ * 4 - (o:dn:=Ace Industry)
+ * 5 - (:1.2.3:=Wilma Flintstone)
+ * 6 - (:DN:2.4.6.8.10:=Dino)
+ */
+ p1 = strchr(p0+1, ':');
+ if (p1) {
+ p2 = strchr(p1+1, ':');
+ }
+ if (p0 == str) {
+ *type = slapi_ch_strdup(""); /* no type */
+ if (p2) {
+ /* example 6 */
+ *p1 = *p2 = '\0';
+ if (strcasecmp(p0+1, "dn") == 0) {
+ *dnAttrs = -1;
+ *oid = slapi_ch_strdup(p1+1);
+ } else {
+ goto error;
+ }
+ } else if (p1) {
+ /* example 5 */
+ *p1 = '\0';
+ if (strcasecmp(p0+1, "dn") == 0) {
+ *dnAttrs = -1;
+ } else {
+ *oid = slapi_ch_strdup(p0+1);
+ }
+ } else {
+ goto error;
+ }
+ } else if (p2) {
+ /* example 3 */
+ *p0 = *p1 = *p2 = '\0';
+ *type = slapi_ch_strdup(str);
+ if (strcasecmp(p0+1, "dn") == 0) {
+ *dnAttrs = -1;
+ *oid = slapi_ch_strdup(p1+1);
+ } else {
+ goto error;
+ }
+ } else if (p1) {
+ *p0 = *p1 = '\0';
+ *type = slapi_ch_strdup(str);
+ if (strcasecmp(p0+1, "dn") == 0) {
+ /* example 4 */
+ *dnAttrs = -1;
+ } else {
+ /* example 1 */
+ *oid = slapi_ch_strdup(p0+1);
+ }
+ } else {
+ /* example 2 */
+ *type = slapi_ch_strdup(str);
+ *p0 = '\0';
+ }
+ if (p1) {
+ *p1 = ':';
+ }
+ if (p2) {
+ *p2 = ':';
+ }
+ return 0;
+
+error:
+ slapi_ch_free_string(type);
+ slapi_ch_free_string(oid);
+ if (p1) {
+ *p1 = ':';
+ }
+ if (p2) {
+ *p2 = ':';
+ }
+ return 1;
+}
+
/*
* The caller unescapes it if unescape_filter == 0.
*/
@@ -234,6 +324,7 @@ str2simple( char *str , int unescape_filter)
f = (struct slapi_filter *) slapi_ch_calloc( 1, sizeof(struct slapi_filter) );
switch ( *s ) {
+ char *extp = NULL;
case '<':
f->f_choice = LDAP_FILTER_LE;
break;
@@ -245,7 +336,17 @@ str2simple( char *str , int unescape_filter)
break;
default:
LDAP_UTF8INC(s);
- if ( str_find_star( value ) == NULL ) {
+ if ((extp = strchr(str, ':')) && (extp < value)) {
+ int rc;
+ char *endp = s; /* '=' */
+ *endp = '\0';
+ rc = _parse_ext_filter(str, extp, &f->f_mr_type, &f->f_mr_oid, &f->f_mr_dnAttrs);
+ if (rc) {
+ return NULL; /* error */
+ } else {
+ f->f_choice = LDAP_FILTER_EXTENDED;
+ }
+ } else if ( str_find_star( value ) == NULL ) {
f->f_choice = LDAP_FILTER_EQUALITY;
} else if ( strcmp( value, "*" ) == 0 ) {
f->f_choice = LDAP_FILTER_PRESENT;
@@ -271,21 +372,23 @@ str2simple( char *str , int unescape_filter)
f->f_type = slapi_ch_strdup( str );
*s = savechar;
} else if ( unescape_filter ) {
- int r;
+ int r;
char *unqstr;
size_t len = strlen(value), len2;
/* dup attr */
savechar = *s;
*s = 0;
- f->f_avtype = slapi_ch_strdup( str );
+ if (f->f_choice != LDAP_FILTER_EXTENDED) {
+ f->f_avtype = slapi_ch_strdup( str );
+ }
*s = savechar;
/* dup value */
savechar = value[len];
value[len] = 0;
unqstr = slapi_ch_calloc( 1, len+1);
- r= filt_unescape_str(value, unqstr, len, &len2, 1);
+ r = filt_unescape_str(value, unqstr, len, &len2, 1);
value[len] = savechar;
if (!r) {
slapi_filter_free(f, 1);
@@ -310,7 +413,9 @@ str2simple( char *str , int unescape_filter)
}
} else if ( !unescape_filter ) {
- f->f_avtype = slapi_ch_strdup( str );
+ if (f->f_choice != LDAP_FILTER_EXTENDED) {
+ f->f_avtype = slapi_ch_strdup( str );
+ }
f->f_avvalue.bv_val = slapi_ch_strdup ( value );
f->f_avvalue.bv_len = strlen ( f->f_avvalue.bv_val );
}
@@ -337,12 +442,12 @@ str2subvals( char *val, struct slapi_filter *f, int unescape_filter )
len = strlen(val);
unqval = slapi_ch_malloc(len+1);
if (!filt_unescape_str(val, unqval, len, &outlen, 0)) {
- slapi_ch_free((void **)&unqval);
- return -1;
+ slapi_ch_free((void **)&unqval);
+ return -1;
}
- unqval[outlen]= '\0';
+ unqval[outlen]= '\0';
} else {
- unqval = slapi_ch_strdup ( val );
+ unqval = slapi_ch_strdup ( val );
}
if (unqval && unqval[0]) {
if (gotstar == 0) {
| 0 |
773e89898d995f4dfecbe872dd6679f4ae2e542d
|
389ds/389-ds-base
|
Use PKG_CHECK_MODULES to detect the kerberos library
|
commit 773e89898d995f4dfecbe872dd6679f4ae2e542d
Author: Hugh McMaster <[email protected]>
Date: Fri Mar 22 21:54:49 2019 +1100
Use PKG_CHECK_MODULES to detect the kerberos library
diff --git a/Makefile.am b/Makefile.am
index 241630579..83bd1c9bd 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -171,7 +171,6 @@ DB_LINK = @db_lib@ -ldb-@db_libver@
SASL_LINK = $(SASL_LIBS)
NETSNMP_LINK = @netsnmp_lib@ @netsnmp_link@
PAM_LINK = -lpam
-KERBEROS_LINK = $(kerberos_lib)
EVENT_LINK = $(EVENT_LIBS)
PW_CRACK_LINK = -lcrack
@@ -1384,8 +1383,8 @@ libslapd_la_SOURCES = ldap/servers/slapd/add.c \
ldap/servers/slapd/slapi_pal.c \
$(libavl_a_SOURCES)
-libslapd_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) $(SASL_CFLAGS) @db_inc@ @kerberos_inc@ $(PCRE_CFLAGS) $(SDS_CPPFLAGS) $(SVRCORE_INCLUDES)
-libslapd_la_LIBADD = $(LDAPSDK_LINK) $(SASL_LINK) $(NSS_LINK) $(NSPR_LINK) $(KERBEROS_LINK) $(PCRE_LIBS) $(THREADLIB) $(SYSTEMD_LINK) libsds.la libsvrcore.la
+libslapd_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) $(SASL_CFLAGS) @db_inc@ $(KERBEROS_CFLAGS) $(PCRE_CFLAGS) $(SDS_CPPFLAGS) $(SVRCORE_INCLUDES)
+libslapd_la_LIBADD = $(LDAPSDK_LINK) $(SASL_LINK) $(NSS_LINK) $(NSPR_LINK) $(KERBEROS_LIBS) $(PCRE_LIBS) $(THREADLIB) $(SYSTEMD_LINK) libsds.la libsvrcore.la
libslapd_la_LDFLAGS = $(AM_LDFLAGS) $(SLAPD_LDFLAGS)
diff --git a/configure.ac b/configure.ac
index 26e81d5d5..88458942f 100644
--- a/configure.ac
+++ b/configure.ac
@@ -844,7 +844,18 @@ SASL_CFLAGS="$SASL_CFLAGS -I$includedir/sasl"
PKG_CHECK_MODULES([ICU], [icu-i18n >= 60.2])
m4_include(m4/netsnmp.m4)
-m4_include(m4/kerberos.m4)
+
+PKG_CHECK_MODULES([KERBEROS], [krb5])
+krb5_vendor=`$PKG_CONFIG --variable=vendor krb5`
+if test "$krb5_vendor" = "MIT"; then
+ AC_DEFINE(HAVE_KRB5, 1, [Define if you have Kerberos V])
+ save_LIBS="$LIBS"
+ LIBS="$KERBEROS_LIBS"
+ AC_CHECK_FUNCS([krb5_cc_new_unique])
+ LIBS="$save_LIBS"
+elif test "$krb5_vendor" = "Heimdal"; then
+ AC_DEFINE(HAVE_HEIMDAL_KERBEROS, 1, [Define if you have Heimdal Kerberos])
+fi
if $PKG_CONFIG --exists pcre; then
PKG_CHECK_MODULES([PCRE], [pcre])
diff --git a/m4/kerberos.m4 b/m4/kerberos.m4
deleted file mode 100644
index a82f57ad5..000000000
--- a/m4/kerberos.m4
+++ /dev/null
@@ -1,187 +0,0 @@
-# BEGIN COPYRIGHT BLOCK
-# Copyright (C) 2008 Red Hat, Inc.
-# All rights reserved.
-#
-# License: GPL (version 3 or any later version).
-# See LICENSE for details.
-# END COPYRIGHT BLOCK
-# -*- tab-width: 4; -*-
-# Configure paths for Kerberos
-
-dnl ========================================================
-dnl = Kerberos is used directly for server to server SASL/GSSAPI
-dnl = authentication (replication, chaining, etc.)
-dnl = This allows us to authenticate using a keytab without
-dnl = having to call kinit outside the process
-dnl ========================================================
-AC_CHECKING(for Kerberos)
-
-if test -z "$with_kerberos" ; then
- with_kerberos=yes # if not set on cmdline, set default
-fi
-
-AC_MSG_CHECKING(for --with-kerberos)
-AC_ARG_WITH(kerberos,
- AS_HELP_STRING([--with-kerberos@<:@=PATH@:>@], [Use the kerberos API in the server directly - allows the server to authenticate directly with a keytab - otherwise, SASL/GSSAPI auth depends on underlying SASL libraries and external kinit with a keytab - if PATH is not specified, look for kerberos in the system locations. This will attempt to use krb5-config from the PATH to find the libs and include dirs - you can specify KRB5_CONFIG_BIN to specify a different filename or absolute path. If krb5-config does not work, this will attempt to look in various system directories]),
- [
- if test "x$withval" = "xyes"; then
- AC_MSG_RESULT(yes)
- elif test "x$withval" = "xno"; then
- AC_MSG_RESULT(no)
- with_kerberos=
- elif test -d "$withval" -a -d "$withval/lib" -a -d "$withval/include" ; then
- AC_MSG_RESULT([using $withval])
- kerberos_incdir="$withval/include"
- kerberos_libdir="$withval/lib"
- else
- AC_MSG_RESULT(yes)
- AC_MSG_ERROR([kerberos not found in $withval])
- fi
- ],
- [
- AC_MSG_RESULT(no)
- with_kerberos=
- ]
-)
-
-AC_MSG_CHECKING(for --with-kerberos-inc)
-AC_ARG_WITH(kerberos-inc,
- AS_HELP_STRING([--with-kerberos-inc=PATH], [Allows you to explicitly set the directory containing the kerberos include files - implies use of kerberos]),
- [
- if test -f "$withval"/krb5.h; then
- AC_MSG_RESULT([using $withval])
- kerberos_incdir="$withval"
- with_kerberos=yes # implies use of kerberos
- else
- echo
- AC_MSG_ERROR([$withval/krb5.h not found])
- fi
- ],
- AC_MSG_RESULT(no)
-)
-
-AC_MSG_CHECKING(for --with-kerberos-lib)
-AC_ARG_WITH(kerberos-lib,
- AS_HELP_STRING([--with-kerberos-lib=PATH], [Allows you to explicitly set the directory containing the kerberos libraries - implies use of kerberos]),
- [
- if test -d "$withval"; then
- AC_MSG_RESULT([using $withval])
- kerberos_libdir="$withval"
- with_kerberos=yes # implies use of kerberos
- else
- echo
- AC_MSG_ERROR([$withval not found])
- fi
- ],
- AC_MSG_RESULT(no)
-)
-
-if test -n "$with_kerberos" ; then
- if test -z "$kerberos_incdir" -o -z "$kerberos_libdir" ; then
- dnl look for these using the krb5-config script
- dnl user can define KRB5_CONFIG_BIN to the full path
- dnl and filename of the script if it cannot or will not
- dnl be found in PATH
- if test -z "$KRB5_CONFIG_BIN" ; then
- AC_PATH_PROG(KRB5_CONFIG_BIN, krb5-config)
- fi
- if test -n "$KRB5_CONFIG_BIN" ; then
- AC_MSG_CHECKING(for kerberos with $KRB5_CONFIG_BIN)
- if test -z "$kerberos_libdir" ; then
- kerberos_lib=`$KRB5_CONFIG_BIN --libs krb5`
- fi
- if test -z "$kerberos_incdir" ; then
- kerberos_inc=`$KRB5_CONFIG_BIN --cflags krb5`
- fi
- dnl if using system includes, inc will be empty - ok
- if test -n "$kerberos_lib" ; then
- AC_MSG_RESULT([using kerberos found with $KRB5_CONFIG_BIN])
- have_krb5=yes
- fi
- fi
- fi
-fi
-
-if test -n "$with_kerberos" -a -z "$kerberos_lib" ; then
- # save these in order to set them to use the check macros below
- # like AC_CHECK_HEADERS, AC_CHECK_LIB, and AC_CHECK_FUNCS
- save_CPPFLAGS="$CPPFLAGS"
- if test -n "$kerberos_incdir" ; then
- CPPFLAGS="-I$kerberos_incdir $CPPFLAGS"
- fi
- save_LDFLAGS="$LDFLAGS"
- if test -n "$kerberos_libdir" ; then
- LDFLAGS="-L$kerberos_libdir $LDFLAGS"
- fi
- krb5_impl=mit
-
- dnl check for Heimdal Kerberos
- AC_CHECK_HEADERS(heim_err.h)
- if test $ac_cv_header_heim_err_h = yes ; then
- krb5_impl=heimdal
- fi
-
- if test "x$krb5_impl" = "xmit"; then
- AC_CHECK_LIB(k5crypto, main,
- [krb5crypto=k5crypto],
- [krb5crypto=crypto])
-
- AC_CHECK_LIB(krb5, main,
- [have_krb5=yes
- kerberos_lib="-lkrb5 -l$krb5crypto -lcom_err"],
- [have_krb5=no],
- [-l$krb5crypto -lcom_err])
-
- elif test "x$krb5_impl" = "xheimdal"; then
- AC_CHECK_LIB(des, main,
- [krb5crypto=des],
- [krb5crypto=crypto])
-
- AC_CHECK_LIB(krb5, main,
- [have_krb5=yes
- kerberos_lib="-lkrb5 -l$krb5crypto -lasn1 -lroken -lcom_err"],
- [have_krb5=no],
- [-l$krb5crypto -lasn1 -lroken -lcom_err])
-
- AC_DEFINE(HAVE_HEIMDAL_KERBEROS, 1,
- [define if you have HEIMDAL Kerberos])
-
- else
- have_krb5=no
- AC_MSG_WARN([Unrecognized Kerberos5 Implementation])
- fi
-
- # reset to original values
- CPPFLAGS="$save_CPPFLAGS"
- LDFLAGS="$save_LDFLAGS"
- if test -n "$kerberos_incdir" ; then
- kerberos_inc="-I$kerberos_incdir"
- fi
- if test -n "$kerberos_libdir" ; then
- kerberos_lib="-L$kerberos_libdir $kerberos_lib"
- fi
-fi
-
-dnl at this point kerberos_lib and kerberos_inc should be set
-
-if test -n "$with_kerberos" ; then
- if test "x$have_krb5" = "xyes" ; then
- AC_DEFINE(HAVE_KRB5, 1,
- [define if you have Kerberos V])
- else
- AC_MSG_ERROR([Required Kerberos 5 support not available])
- fi
-
- dnl look for the wonderfully time saving function krb5_cc_new_unique
- save_LIBS="$LIBS"
- LIBS="$kerberos_lib"
- save_CPPFLAGS="$CPPFLAGS"
- CPPFLAGS="$kerberos_inc $CPPFLAGS"
- AC_CHECK_FUNCS([krb5_cc_new_unique])
- LIBS="$save_LIBS"
- CPPFLAGS="$save_CPPFLAGS"
-fi
-
-AC_SUBST(kerberos_inc)
-AC_SUBST(kerberos_lib)
-AC_SUBST(kerberos_libdir)
| 0 |
c8bfd0cd85c3ec59e3fe90c2cf122f984db8288f
|
389ds/389-ds-base
|
Resolves: 459433
Summray: MMR: intensive conflict test crashes the server
Description: values2keys functions in the syntax plugin did not check the
existence of the input and output variable.
|
commit c8bfd0cd85c3ec59e3fe90c2cf122f984db8288f
Author: Noriko Hosoi <[email protected]>
Date: Fri Dec 5 23:57:49 2008 +0000
Resolves: 459433
Summray: MMR: intensive conflict test crashes the server
Description: values2keys functions in the syntax plugin did not check the
existence of the input and output variable.
diff --git a/ldap/servers/plugins/syntaxes/bin.c b/ldap/servers/plugins/syntaxes/bin.c
index 1ae58a9f4..6d6c76328 100644
--- a/ldap/servers/plugins/syntaxes/bin.c
+++ b/ldap/servers/plugins/syntaxes/bin.c
@@ -186,6 +186,14 @@ bin_values2keys( Slapi_PBlock *pb, Slapi_Value **bvals,
{
int i;
+ if (NULL == ivals) {
+ return 1;
+ }
+ *ivals = NULL;
+ if (NULL == bvals) {
+ return 1;
+ }
+
if ( ftype != LDAP_FILTER_EQUALITY ) {
return( LDAP_PROTOCOL_ERROR );
}
diff --git a/ldap/servers/plugins/syntaxes/string.c b/ldap/servers/plugins/syntaxes/string.c
index 3142491bf..138d98af4 100644
--- a/ldap/servers/plugins/syntaxes/string.c
+++ b/ldap/servers/plugins/syntaxes/string.c
@@ -357,9 +357,12 @@ string_values2keys( Slapi_PBlock *pb, Slapi_Value **bvals,
Slapi_Value **bvlp;
char *w, *c, *p;
+ if (NULL == ivals) {
+ return 1;
+ }
+ *ivals = NULL;
if (NULL == bvals) {
- *ivals = NULL;
- return 0;
+ return 1;
}
switch ( ftype ) {
@@ -435,7 +438,7 @@ string_values2keys( Slapi_PBlock *pb, Slapi_Value **bvals,
* nsSubStrEnd: 2
* [...]
*
- * By default, begin == 2, middle == 3, end == 2 (defined in syntax.h)
+ * By default, begin == 3, middle == 3, end == 3 (defined in syntax.h)
*/
/* If nsSubStrLen is specified in each index entry,
| 0 |
97fa28886f19b8e26670ec7a20bfd58dc4521698
|
389ds/389-ds-base
|
Bug 614511 - fix coverify Defect Type: Null pointer dereferences issues 11846 - 11891
https://bugzilla.redhat.com/show_bug.cgi?id=614511
Resolves: bug 614511
Bug description: Fix coverify Defect Type: Null pointer dereferences issues 11846 - 11891
description: Catch possible NULL pointer in acl_access_allowed() and acl__TestRights().
|
commit 97fa28886f19b8e26670ec7a20bfd58dc4521698
Author: Endi S. Dewata <[email protected]>
Date: Mon Jul 12 22:55:19 2010 -0500
Bug 614511 - fix coverify Defect Type: Null pointer dereferences issues 11846 - 11891
https://bugzilla.redhat.com/show_bug.cgi?id=614511
Resolves: bug 614511
Bug description: Fix coverify Defect Type: Null pointer dereferences issues 11846 - 11891
description: Catch possible NULL pointer in acl_access_allowed() and acl__TestRights().
diff --git a/ldap/servers/plugins/acl/acl.c b/ldap/servers/plugins/acl/acl.c
index a96a34b21..bdef64cca 100644
--- a/ldap/servers/plugins/acl/acl.c
+++ b/ldap/servers/plugins/acl/acl.c
@@ -338,6 +338,12 @@ acl_access_allowed(
goto cleanup_and_ret;
}
+ if ( !aclpb->aclpb_curr_entry_sdn ) {
+ slapi_log_error ( SLAPI_LOG_FATAL, plugin_name, "NULL aclpb_curr_entry_sdn \n" );
+ ret_val = LDAP_OPERATIONS_ERROR;
+ goto cleanup_and_ret;
+ }
+
/* check if aclpb is initialized or not */
TNF_PROBE_0_DEBUG(acl_aclpbinit_start,"ACL","");
acl_init_aclpb ( pb, aclpb, clientDn, 0 );
@@ -2738,8 +2744,13 @@ acl__TestRights(Acl_PBlock *aclpb,int access, char **right, char ** map_generic,
** If the handle has been evaluated before, we can
** cache the result.
*/
- if (((aci = aclpb->aclpb_deny_handles[i]) == NULL) && (i <= ACI_MAX_ELEVEL))
- continue;
+ if ((aci = aclpb->aclpb_deny_handles[i]) == NULL) {
+ if (i <= ACI_MAX_ELEVEL) {
+ continue;
+ } else {
+ break;
+ }
+ }
k++;
index = aci->aci_index;
slapi_log_error(SLAPI_LOG_ACL, plugin_name,
@@ -2944,9 +2955,13 @@ acl__TestRights(Acl_PBlock *aclpb,int access, char **right, char ** map_generic,
** If the handle has been evaluated before, we can
** cache the result.
*/
- aci = aclpb->aclpb_allow_handles[i];
- if (((aci = aclpb->aclpb_allow_handles[i]) == NULL) && (i <= ACI_MAX_ELEVEL))
- continue;
+ if ((aci = aclpb->aclpb_allow_handles[i]) == NULL) {
+ if (i <= ACI_MAX_ELEVEL) {
+ continue;
+ } else {
+ break;
+ }
+ }
k++;
index = aci->aci_index;
slapi_log_error(SLAPI_LOG_ACL, plugin_name,
| 0 |
a154ecfc9b144d825e400892c84d6327470d6e64
|
389ds/389-ds-base
|
Ticket 47358 - implement backend optimazation levels
Bug Description: it is an enhancement to be able to test different
otimizations and have the default bahaviour as fallback.
Fix Description: As described in the ticket, this fix introduces
three optimization switches.
1] bypass ruv update inside transaction, might be replaced by
fix for ticket 564
2] reverse order of txn begin/commit and backen lock/unlock
to benefit from fix #568
3] find and lock entry to modify before taking backend lock
to decrease time backend lock is held
https://fedorahosted.org/389/ticket/47358
Reviewed by: Rich, thanks
|
commit a154ecfc9b144d825e400892c84d6327470d6e64
Author: Ludwig Krispenz <[email protected]>
Date: Tue May 14 14:04:28 2013 +0200
Ticket 47358 - implement backend optimazation levels
Bug Description: it is an enhancement to be able to test different
otimizations and have the default bahaviour as fallback.
Fix Description: As described in the ticket, this fix introduces
three optimization switches.
1] bypass ruv update inside transaction, might be replaced by
fix for ticket 564
2] reverse order of txn begin/commit and backen lock/unlock
to benefit from fix #568
3] find and lock entry to modify before taking backend lock
to decrease time backend lock is held
https://fedorahosted.org/389/ticket/47358
Reviewed by: Rich, thanks
diff --git a/ldap/servers/slapd/back-ldbm/back-ldbm.h b/ldap/servers/slapd/back-ldbm/back-ldbm.h
index b0f87e1f0..464556128 100644
--- a/ldap/servers/slapd/back-ldbm/back-ldbm.h
+++ b/ldap/servers/slapd/back-ldbm/back-ldbm.h
@@ -654,8 +654,17 @@ struct ldbminfo {
int li_reslimit_rangelookthrough_handle;
int li_idl_update;
int li_old_idl_maxids;
+#define BACKEND_OPT_NO_RUV_UPDATE 0x01
+#define BACKEND_OPT_DBLOCK_INSIDE_TXN 0x02
+#define BACKEND_OPT_MANAGE_ENTRY_BEFORE_DBLOCK 0x04
+ int li_backend_opt_level;
};
+
+#define NO_RUV_UPDATE(li) (li->li_backend_opt_level & BACKEND_OPT_NO_RUV_UPDATE)
+#define DBLOCK_INSIDE_TXN(li) (li->li_backend_opt_level & BACKEND_OPT_DBLOCK_INSIDE_TXN)
+#define MANAGE_ENTRY_BEFORE_DBLOCK(li) (li->li_backend_opt_level & BACKEND_OPT_MANAGE_ENTRY_BEFORE_DBLOCK)
+
/* li_flags could store these bits defined in ../slapi-plugin.h
* task flag (pb_task_flags) *
* SLAPI_TASK_RUNNING_AS_TASK
diff --git a/ldap/servers/slapd/back-ldbm/dblayer.c b/ldap/servers/slapd/back-ldbm/dblayer.c
index 607f80206..2f05668a5 100644
--- a/ldap/servers/slapd/back-ldbm/dblayer.c
+++ b/ldap/servers/slapd/back-ldbm/dblayer.c
@@ -3610,12 +3610,19 @@ dblayer_txn_begin(backend *be, back_txnid parent_txn, back_txn *txn)
{
struct ldbminfo *li = (struct ldbminfo *)be->be_database->plg_private;
int rc = 0;
- if (SERIALLOCK(li)) {
- dblayer_lock_backend(be);
- }
- rc = dblayer_txn_begin_ext(li,parent_txn,txn,PR_TRUE);
- if (rc && SERIALLOCK(li)) {
- dblayer_unlock_backend(be);
+ if (DBLOCK_INSIDE_TXN(li)) {
+ rc = dblayer_txn_begin_ext(li,parent_txn,txn,PR_TRUE);
+ if (!rc && SERIALLOCK(li)) {
+ dblayer_lock_backend(be);
+ }
+ } else {
+ if (SERIALLOCK(li)) {
+ dblayer_lock_backend(be);
+ }
+ rc = dblayer_txn_begin_ext(li,parent_txn,txn,PR_TRUE);
+ if (rc && SERIALLOCK(li)) {
+ dblayer_unlock_backend(be);
+ }
}
return rc;
}
@@ -3721,9 +3728,17 @@ int
dblayer_txn_commit(backend *be, back_txn *txn)
{
struct ldbminfo *li = (struct ldbminfo *)be->be_database->plg_private;
- int rc = dblayer_txn_commit_ext(li,txn,PR_TRUE);
- if (SERIALLOCK(li)) {
- dblayer_unlock_backend(be);
+ int rc;
+ if (DBLOCK_INSIDE_TXN(li)) {
+ if (SERIALLOCK(li)) {
+ dblayer_unlock_backend(be);
+ }
+ rc = dblayer_txn_commit_ext(li,txn,PR_TRUE);
+ } else {
+ rc = dblayer_txn_commit_ext(li,txn,PR_TRUE);
+ if (SERIALLOCK(li)) {
+ dblayer_unlock_backend(be);
+ }
}
return rc;
}
@@ -3803,9 +3818,17 @@ int
dblayer_txn_abort(backend *be, back_txn *txn)
{
struct ldbminfo *li = (struct ldbminfo *)be->be_database->plg_private;
- int rc = dblayer_txn_abort_ext(li, txn, PR_TRUE);
- if (SERIALLOCK(li)) {
- dblayer_unlock_backend(be);
+ int rc;
+ if (DBLOCK_INSIDE_TXN(li)) {
+ if (SERIALLOCK(li)) {
+ dblayer_unlock_backend(be);
+ }
+ rc = dblayer_txn_abort_ext(li, txn, PR_TRUE);
+ } else {
+ rc = dblayer_txn_abort_ext(li, txn, PR_TRUE);
+ if (SERIALLOCK(li)) {
+ dblayer_unlock_backend(be);
+ }
}
return rc;
}
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_add.c b/ldap/servers/slapd/back-ldbm/ldbm_add.c
index eab2ae18f..c7d1f620e 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_add.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_add.c
@@ -759,7 +759,7 @@ ldbm_back_add( Slapi_PBlock *pb )
parententry = NULL;
}
- if (!is_ruv && !is_fixup_operation) {
+ if (!is_ruv && !is_fixup_operation && !NO_RUV_UPDATE(li)) {
ruv_c_init = ldbm_txn_ruv_modify_context( pb, &ruv_c );
if (-1 == ruv_c_init) {
LDAPDebug( LDAP_DEBUG_ANY,
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_config.c b/ldap/servers/slapd/back-ldbm/ldbm_config.c
index 056c14cd3..24668615e 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_config.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_config.c
@@ -209,6 +209,27 @@ static int ldbm_config_rangelookthroughlimit_set(void *arg, void *value, char *e
return retval;
}
+static void *ldbm_config_backend_opt_level_get(void *arg)
+{
+ struct ldbminfo *li = (struct ldbminfo *) arg;
+
+ return (void *) ((uintptr_t)(li->li_backend_opt_level));
+}
+
+static int ldbm_config_backend_opt_level_set(void *arg, void *value, char *errorbuf, int phase, int apply)
+{
+ struct ldbminfo *li = (struct ldbminfo *) arg;
+ int retval = LDAP_SUCCESS;
+ int val = (int) ((uintptr_t)value);
+
+ /* Do whatever we can to make sure the data is ok. */
+
+ if (apply) {
+ li->li_backend_opt_level = val;
+ }
+
+ return retval;
+}
static void *ldbm_config_mode_get(void *arg)
{
struct ldbminfo *li = (struct ldbminfo *) arg;
@@ -1415,6 +1436,7 @@ static config_info ldbm_config[] = {
{CONFIG_PAGEDLOOKTHROUGHLIMIT, CONFIG_TYPE_INT, "0", &ldbm_config_pagedlookthroughlimit_get, &ldbm_config_pagedlookthroughlimit_set, CONFIG_FLAG_ALWAYS_SHOW|CONFIG_FLAG_ALLOW_RUNNING_CHANGE},
{CONFIG_PAGEDIDLISTSCANLIMIT, CONFIG_TYPE_INT, "0", &ldbm_config_pagedallidsthreshold_get, &ldbm_config_pagedallidsthreshold_set, CONFIG_FLAG_ALWAYS_SHOW|CONFIG_FLAG_ALLOW_RUNNING_CHANGE},
{CONFIG_RANGELOOKTHROUGHLIMIT, CONFIG_TYPE_INT, "5000", &ldbm_config_rangelookthroughlimit_get, &ldbm_config_rangelookthroughlimit_set, CONFIG_FLAG_ALWAYS_SHOW|CONFIG_FLAG_ALLOW_RUNNING_CHANGE},
+ {CONFIG_BACKEND_OPT_LEVEL, CONFIG_TYPE_INT, "0", &ldbm_config_backend_opt_level_get, &ldbm_config_backend_opt_level_set, CONFIG_FLAG_ALWAYS_SHOW},
{NULL, 0, NULL, NULL, NULL, 0}
};
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_config.h b/ldap/servers/slapd/back-ldbm/ldbm_config.h
index 456d27d2f..af6b2d0a9 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_config.h
+++ b/ldap/servers/slapd/back-ldbm/ldbm_config.h
@@ -145,6 +145,7 @@ struct config_info {
#define CONFIG_BYPASS_FILTER_TEST "nsslapd-search-bypass-filter-test"
#define CONFIG_USE_VLV_INDEX "nsslapd-search-use-vlv-index"
#define CONFIG_SERIAL_LOCK "nsslapd-serial-lock"
+#define CONFIG_BACKEND_OPT_LEVEL "nsslapd-backend-opt-level"
#define CONFIG_ENTRYRDN_SWITCH "nsslapd-subtree-rename-switch"
/* nsslapd-noancestorid is ignored unless nsslapd-subtree-rename-switch is on */
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_delete.c b/ldap/servers/slapd/back-ldbm/ldbm_delete.c
index 6efa03ed6..0478a12a8 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_delete.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_delete.c
@@ -551,7 +551,7 @@ ldbm_back_delete( Slapi_PBlock *pb )
}
}
- if (!is_ruv && !is_fixup_operation && !delete_tombstone_entry) {
+ if (!is_ruv && !is_fixup_operation && !delete_tombstone_entry && !NO_RUV_UPDATE(li)) {
ruv_c_init = ldbm_txn_ruv_modify_context( pb, &ruv_c );
if (-1 == ruv_c_init) {
LDAPDebug( LDAP_DEBUG_ANY,
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_modify.c b/ldap/servers/slapd/back-ldbm/ldbm_modify.c
index 1e728da72..17adc87d2 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_modify.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_modify.c
@@ -410,6 +410,14 @@ ldbm_back_modify( Slapi_PBlock *pb )
dblock_acquired= 1;
}
*/
+ if ( MANAGE_ENTRY_BEFORE_DBLOCK(li)) {
+ /* find and lock the entry we are about to modify */
+ if ( (e = find_entry2modify( pb, be, addr, &txn )) == NULL ) {
+ ldap_result_code= -1;
+ goto error_return; /* error result sent by find_entry2modify() */
+ }
+ }
+
txn.back_txn_txn = NULL; /* ready to create the child transaction */
for (retry_count = 0; retry_count < RETRY_TIMES; retry_count++) {
int cache_rc = 0;
@@ -467,10 +475,12 @@ ldbm_back_modify( Slapi_PBlock *pb )
slapi_pblock_set(pb, SLAPI_TXN, txn.back_txn_txn);
if (0 == retry_count) { /* just once */
- /* find and lock the entry we are about to modify */
- if ( (e = find_entry2modify( pb, be, addr, &txn )) == NULL ) {
- ldap_result_code= -1;
- goto error_return; /* error result sent by find_entry2modify() */
+ if ( !MANAGE_ENTRY_BEFORE_DBLOCK(li)) {
+ /* find and lock the entry we are about to modify */
+ if ( (e = find_entry2modify( pb, be, addr, &txn )) == NULL ) {
+ ldap_result_code= -1;
+ goto error_return; /* error result sent by find_entry2modify() */
+ }
}
if ( !is_fixup_operation )
@@ -543,7 +553,7 @@ ldbm_back_modify( Slapi_PBlock *pb )
goto error_return;
}
- if (!is_ruv && !is_fixup_operation) {
+ if (!is_ruv && !is_fixup_operation && !NO_RUV_UPDATE(li)) {
ruv_c_init = ldbm_txn_ruv_modify_context( pb, &ruv_c );
if (-1 == ruv_c_init) {
LDAPDebug( LDAP_DEBUG_ANY,
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c b/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c
index 80bf2f44c..bcc59b3bb 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_modrdn.c
@@ -839,7 +839,7 @@ ldbm_back_modrdn( Slapi_PBlock *pb )
/* JCM - A subtree move could break ACIs, static groups, and dynamic groups. */
}
- if (!is_ruv && !is_fixup_operation) {
+ if (!is_ruv && !is_fixup_operation && !NO_RUV_UPDATE(li)) {
ruv_c_init = ldbm_txn_ruv_modify_context( pb, &ruv_c );
if (-1 == ruv_c_init) {
LDAPDebug( LDAP_DEBUG_ANY,
| 0 |
a3e270e483b52f212c638b1d9de8bf862d2a3f49
|
389ds/389-ds-base
|
Ticket 48894 - improve entrywsi delete
Bug Description: The entrywsi cleanup code for maintaining the vs->sorted array
had a crash in it due to improper handling of the valueset and the sorted array.
Fix Description: This fixes the complex algorithm to be simpler, and resolves
the segfault found.
https://fedorahosted.org/389/ticket/48894
Author: wibrown
Review by: mreynolds (Thanks!)
|
commit a3e270e483b52f212c638b1d9de8bf862d2a3f49
Author: William Brown <[email protected]>
Date: Thu Dec 1 14:02:02 2016 +1000
Ticket 48894 - improve entrywsi delete
Bug Description: The entrywsi cleanup code for maintaining the vs->sorted array
had a crash in it due to improper handling of the valueset and the sorted array.
Fix Description: This fixes the complex algorithm to be simpler, and resolves
the segfault found.
https://fedorahosted.org/389/ticket/48894
Author: wibrown
Review by: mreynolds (Thanks!)
diff --git a/ldap/servers/slapd/attr.c b/ldap/servers/slapd/attr.c
index 170bdd3f8..76573edf6 100644
--- a/ldap/servers/slapd/attr.c
+++ b/ldap/servers/slapd/attr.c
@@ -758,7 +758,7 @@ attr_purge_state_information(Slapi_Entry *entry, Slapi_Attr *attr, const CSN *cs
{
if(!valueset_isempty(&attr->a_deleted_values))
{
- valueset_purge(&attr->a_deleted_values, csnUpTo);
+ valueset_purge(attr, &attr->a_deleted_values, csnUpTo);
}
}
diff --git a/ldap/servers/slapd/entrywsi.c b/ldap/servers/slapd/entrywsi.c
index da58cb279..94cf4ba22 100644
--- a/ldap/servers/slapd/entrywsi.c
+++ b/ldap/servers/slapd/entrywsi.c
@@ -811,8 +811,8 @@ entry_delete_present_values_wsi_multi_valued(Slapi_Entry *e, const char *type, s
* the current attr delete operation. These values need to be
* preserved, all others can be removed, purging should o the job.
*/
- valueset_purge(&a->a_present_values, csn);
- valueset_purge(&a->a_deleted_values, csn);
+ valueset_purge(a, &a->a_present_values, csn);
+ valueset_purge(a, &a->a_deleted_values, csn);
if(attr_state==ATTRIBUTE_PRESENT && valueset_isempty(&a->a_present_values))
entry_present_attribute_to_deleted_attribute(e, a);
}
diff --git a/ldap/servers/slapd/proto-slap.h b/ldap/servers/slapd/proto-slap.h
index af728a6bd..2cdee6a7c 100644
--- a/ldap/servers/slapd/proto-slap.h
+++ b/ldap/servers/slapd/proto-slap.h
@@ -149,7 +149,7 @@ int valueset_isempty( const Slapi_ValueSet *vs);
Slapi_Value *valueset_find(const Slapi_Attr *a, const Slapi_ValueSet *vs, const Slapi_Value *v);
Slapi_Value *valueset_remove_value(const Slapi_Attr *a, Slapi_ValueSet *vs, const Slapi_Value *v);
int valueset_remove_valuearray(Slapi_ValueSet *vs, const Slapi_Attr *a, Slapi_Value **valuestodelete, int flags, Slapi_Value ***va_out);
-int valueset_purge(Slapi_ValueSet *vs, const CSN *csn);
+int valueset_purge(const Slapi_Attr *a, Slapi_ValueSet *vs, const CSN *csn);
Slapi_Value **valueset_get_valuearray(const Slapi_ValueSet *vs);
size_t valueset_size(const Slapi_ValueSet *vs);
void slapi_valueset_add_valuearray(const Slapi_Attr *a, Slapi_ValueSet *vs, Slapi_Value **addvals);
diff --git a/ldap/servers/slapd/valueset.c b/ldap/servers/slapd/valueset.c
index ac2f1bed9..d8188fc6f 100644
--- a/ldap/servers/slapd/valueset.c
+++ b/ldap/servers/slapd/valueset.c
@@ -734,7 +734,7 @@ valueset_remove_value(const Slapi_Attr *a, Slapi_ValueSet *vs, const Slapi_Value
* Remove any values older than the CSN from valueset.
*/
int
-valueset_array_purge(Slapi_ValueSet *vs, const CSN *csn)
+valueset_array_purge(const Slapi_Attr *a, Slapi_ValueSet *vs, const CSN *csn)
{
size_t i = 0;
size_t j = 0;
@@ -744,6 +744,7 @@ valueset_array_purge(Slapi_ValueSet *vs, const CSN *csn)
/* Loop over all the values freeing the old ones. */
for(i = 0; i < vs->num; i++)
{
+ /* If we have the sorted array, find the va array ref by it. */
if (vs->sorted) {
j = vs->sorted[i];
} else {
@@ -753,71 +754,112 @@ valueset_array_purge(Slapi_ValueSet *vs, const CSN *csn)
if (vs->va[j]->v_csnset == NULL) {
slapi_value_free(&vs->va[j]);
vs->va[j] = NULL;
+ } else if (vs->va[j] != NULL) {
+ /* This value survived, we should count it. */
+ numValues++;
}
}
+
/* Now compact the value/sorted list. */
- numValues = i;
- nextValue = 0;
- for(i = 0; i<numValues; i++) {
- if (vs->sorted) {
- j = vs->sorted[nextValue];
- } else {
- j = nextValue;
- }
- while((nextValue < numValues) && (NULL == vs->va[j])) {
- if (vs->sorted) {
- j = vs->sorted[nextValue++];
- } else {
- nextValue++;
- }
- }
- if(nextValue < numValues) {
- if(vs->sorted) {
- vs->va[vs->sorted[i]] = vs->va[j];
- vs->sorted[i] = j;
- } else {
- vs->va[i] = vs->va[j];
+ /*
+ * Because we want to preserve the sorted array, this is complicated.
+ *
+ * We have an array of values:
+ * [ b, a, c, NULL, e, NULL, NULL, d]
+ * And an array of indicies that are sorted.
+ * [ 1, 0, 2, 7, 4, 3, 5, 6 ]
+ * Were we to iterate over the sorted array, we get refs to the values in
+ * some order.
+ * The issue is now we must *remove* from both the values *and* the sorted.
+ *
+ * Previously, we just discarded this, because too hard. Now we try to keep
+ * it. The issue is that this is surprisingly hard to actually keep in
+ * sync.
+ *
+ * We can't just blindly move the values down: That breaks the sorted array
+ * and we would need to iterate over the sorted array multiple times to
+ * achieve this.
+ *
+ * It's actually going to be easier to just ditch the sorted, compact vs
+ * and then qsort the array.
+ */
+
+ j = 0;
+ while (nextValue < numValues && j < vs->num)
+ {
+ /* nextValue is what we are looking at now
+ * j tracks along the array getting next elements.
+ *
+ * [ b, a, c, NULL, e, NULL, NULL, d]
+ * ^nv ^j
+ * [ b, a, c, e, NULL, NULL, NULL, d]
+ * ^nv ^j
+ * [ b, a, c, e, NULL, NULL, NULL, d]
+ * ^nv ^j
+ * [ b, a, c, e, NULL, NULL, NULL, d]
+ * ^nv ^j
+ * [ b, a, c, e, NULL, NULL, NULL, d]
+ * ^nv ^j
+ * [ b, a, c, e, d, NULL, NULL, NULL]
+ * ^nv ^j
+ */
+ if (vs->va[nextValue] == NULL) {
+ /* Advance j till we find something */
+ while (vs->va[j] == NULL) {
+ j++;
}
- nextValue++;
- } else {
- break;
+ /* We have something! */
+ vs->va[nextValue] = vs->va[j];
+ vs->va[j] = NULL;
}
+ nextValue++;
}
-
- if(vs->sorted) {
- vs->va[vs->sorted[i]] = NULL;
- vs->sorted[i] = 0;
- } else {
- vs->va[i] = NULL;
+ /* Fix up the number of values */
+ vs->num = numValues;
+ /* Should we re-alloc values to be smaller? */
+ /* Other parts of DS are lazy. Lets clean our list */
+ for (j = vs->num; j < vs->max; j++) {
+ vs->va[j] = NULL;
}
/* All the values were deleted, we can discard the whole array. */
- if(NULL == vs->va[0]) {
+ if(vs->num == 0) {
if(vs->sorted) {
slapi_ch_free ((void **)&vs->sorted);
}
slapi_ch_free ((void **)&vs->va);
- vs->va= NULL;
+ vs->va = NULL;
+ vs->max = 0;
+ } else if (vs->sorted != NULL) {
+ /* We still have values! rebuild the sorted array */
+ valueset_array_to_sorted(a, vs);
+ }
+
+#ifdef DEBUG
+ PR_ASSERT(vs->num == 0 || (vs->num > 0 && vs->va[0] != NULL));
+ size_t index = 0;
+ for (; index < vs->num; index++) {
+ PR_ASSERT(vs->va[index] != NULL);
}
+ for (; index < vs->max; index++) {
+ PR_ASSERT(vs->va[index] == NULL);
+ }
+#endif
/* return the number of remaining values */
- return i;
+ return numValues;
}
/*
* Remove any values older than the CSN.
*/
int
-valueset_purge(Slapi_ValueSet *vs, const CSN *csn)
+valueset_purge(const Slapi_Attr *a, Slapi_ValueSet *vs, const CSN *csn)
{
int r= 0;
if(!valuearray_isempty(vs->va)) {
- r= valueset_array_purge(vs, csn);
+ r= valueset_array_purge(a, vs, csn);
vs->num = r;
- if (vs->va == NULL) {
- /* va was freed */
- vs->max = 0;
- }
PR_ASSERT((vs->sorted == NULL) || (vs->num < VALUESET_ARRAY_SORT_THRESHOLD) || ((vs->num >= VALUESET_ARRAY_SORT_THRESHOLD) && (vs->sorted[0] < vs->num)));
}
return 0;
@@ -1207,12 +1249,13 @@ valueset_add_string(const Slapi_Attr *a, Slapi_ValueSet *vs, const char *s, CSNT
void
valueset_set_valueset(Slapi_ValueSet *vs1, const Slapi_ValueSet *vs2)
{
- int i;
+ size_t i;
if (vs1 && vs2) {
int oldmax = vs1->max;
/* pre-condition - vs1 empty - otherwise, existing data is overwritten */
PR_ASSERT(vs1->num == 0);
+
if (vs2->va) {
/* need to copy valuearray */
if (vs2->max == 0) {
| 0 |
ea6ac915da1cf1b50c2607410a37a65a766a1823
|
389ds/389-ds-base
|
Ticket 47644 - Managed Entry Plugin - transaction not aborted upon failure to create managed entry
Bug Description: The plugin is now a backend transaction plugin, but
if the plugin fails, the initial operation still succeeds.
Fix Description: Return an error from the plugin when the plugins fails to
create the managed entry. This will abort the entire operation
as expected.
https://fedorahosted.org/389/ticket/47644
Reviewed by: rmeggins(Thanks!)
|
commit ea6ac915da1cf1b50c2607410a37a65a766a1823
Author: Mark Reynolds <[email protected]>
Date: Fri May 30 14:57:15 2014 -0400
Ticket 47644 - Managed Entry Plugin - transaction not aborted upon failure to create managed entry
Bug Description: The plugin is now a backend transaction plugin, but
if the plugin fails, the initial operation still succeeds.
Fix Description: Return an error from the plugin when the plugins fails to
create the managed entry. This will abort the entire operation
as expected.
https://fedorahosted.org/389/ticket/47644
Reviewed by: rmeggins(Thanks!)
diff --git a/ldap/servers/plugins/mep/mep.c b/ldap/servers/plugins/mep/mep.c
index 3ee57f4a1..9b3d5d2b2 100644
--- a/ldap/servers/plugins/mep/mep.c
+++ b/ldap/servers/plugins/mep/mep.c
@@ -106,7 +106,7 @@ static int mep_oktodo(Slapi_PBlock *pb);
static int mep_isrepl(Slapi_PBlock *pb);
static Slapi_Entry *mep_create_managed_entry(struct configEntry *config,
Slapi_Entry *origin);
-static void mep_add_managed_entry(struct configEntry *config,
+static int mep_add_managed_entry(struct configEntry *config,
Slapi_Entry *origin);
static void mep_rename_managed_entry(Slapi_Entry *origin,
Slapi_DN *new_dn, Slapi_DN *old_dn);
@@ -1394,7 +1394,7 @@ mep_create_managed_entry(struct configEntry *config, Slapi_Entry *origin)
* origin entry will also be modified to add a link to the
* newly created managed entry.
*/
-static void
+static int
mep_add_managed_entry(struct configEntry *config,
Slapi_Entry *origin)
{
@@ -1415,6 +1415,7 @@ mep_add_managed_entry(struct configEntry *config,
"mep_add_managed_entry: Unable to create a managed "
"entry from origin entry \"%s\" using config "
"\"%s\".\n", slapi_entry_get_dn(origin), slapi_sdn_get_dn(config->sdn));
+ return -1;
} else {
/* Copy the managed entry DN to use when
* creating the pointer attribute. */
@@ -1435,6 +1436,7 @@ mep_add_managed_entry(struct configEntry *config,
"entry \"%s\" for origin entry \"%s\" (%s).\n",
managed_dn, slapi_entry_get_dn(origin),
ldap_err2string(result));
+ goto bail;
} else {
/* Add forward link to origin entry. */
LDAPMod oc_mod;
@@ -1484,8 +1486,10 @@ mep_add_managed_entry(struct configEntry *config,
}
}
+bail:
slapi_ch_free_string(&managed_dn);
slapi_pblock_destroy(mod_pb);
+ return result;
}
/* mep_rename_managed_entry()
@@ -2418,6 +2422,7 @@ mep_add_post_op(Slapi_PBlock *pb)
Slapi_Entry *e = NULL;
Slapi_DN *sdn = NULL;
struct configEntry *config = NULL;
+ int result = SLAPI_PLUGIN_SUCCESS;
slapi_log_error(SLAPI_LOG_TRACE, MEP_PLUGIN_SUBSYSTEM,
"--> mep_add_post_op\n");
@@ -2459,7 +2464,16 @@ mep_add_post_op(Slapi_PBlock *pb)
mep_find_config(e, &config);
if (config) {
- mep_add_managed_entry(config, e);
+ if(mep_add_managed_entry(config, e)){
+ char errtxt[SLAPI_DSE_RETURNTEXT_SIZE];
+ int rc = LDAP_UNWILLING_TO_PERFORM;
+
+ PR_snprintf(errtxt, SLAPI_DSE_RETURNTEXT_SIZE,
+ "Managed Entry Plugin rejected add operation (see errors log).\n");
+ slapi_pblock_set(pb, SLAPI_PB_RESULT_TEXT, &errtxt);
+ slapi_pblock_set(pb, SLAPI_RESULT_CODE, &rc);
+ result = SLAPI_PLUGIN_FAILURE;
+ }
}
mep_config_unlock();
@@ -2472,7 +2486,7 @@ mep_add_post_op(Slapi_PBlock *pb)
slapi_log_error(SLAPI_LOG_TRACE, MEP_PLUGIN_SUBSYSTEM,
"<-- mep_add_post_op\n");
- return SLAPI_PLUGIN_SUCCESS;
+ return result;
}
static int
@@ -2548,6 +2562,7 @@ mep_modrdn_post_op(Slapi_PBlock *pb)
Slapi_Entry *post_e = NULL;
char *managed_dn = NULL;
struct configEntry *config = NULL;
+ int result = SLAPI_PLUGIN_SUCCESS;
slapi_log_error(SLAPI_LOG_TRACE, MEP_PLUGIN_SUBSYSTEM,
"--> mep_modrdn_post_op\n");
@@ -2681,6 +2696,7 @@ mep_modrdn_post_op(Slapi_PBlock *pb)
slapi_log_error(SLAPI_LOG_FATAL, MEP_PLUGIN_SUBSYSTEM,
"mep_modrdn_post_op: Unable to create in-memory "
"managed entry from origin entry \"%s\".\n", new_dn);
+ result = SLAPI_PLUGIN_FAILURE;
goto bailmod;
}
@@ -2713,6 +2729,7 @@ mep_modrdn_post_op(Slapi_PBlock *pb)
"mep_modrdn_post_op: Unable to update pointer to "
"origin entry \"%s\" in managed entry \"%s\" "
"(%s).\n", new_dn, managed_dn, ldap_err2string(result));
+
} else {
/* See if we need to rename the managed entry. */
if (slapi_sdn_compare(slapi_entry_get_sdn(new_managed_entry), managed_sdn) != 0) {
@@ -2724,8 +2741,8 @@ mep_modrdn_post_op(Slapi_PBlock *pb)
slapi_entry_get_dn(new_managed_entry),
slapi_sdn_get_dn(old_sdn));
mep_rename_managed_entry(post_e,
- slapi_entry_get_sdn(new_managed_entry),
- managed_sdn);
+ slapi_entry_get_sdn(new_managed_entry),
+ managed_sdn);
}
/* Update all of the mapped attributes
@@ -2780,12 +2797,21 @@ bailmod:
/* Bail out if the plug-in close function was just called. */
if (!slapi_plugin_running(pb)) {
mep_config_unlock();
- return SLAPI_PLUGIN_SUCCESS;
+ return result;
}
mep_find_config(post_e, &config);
if (config) {
- mep_add_managed_entry(config, post_e);
+ if(mep_add_managed_entry(config, post_e)){
+ char errtxt[SLAPI_DSE_RETURNTEXT_SIZE];
+ int rc = LDAP_UNWILLING_TO_PERFORM;
+
+ PR_snprintf(errtxt, SLAPI_DSE_RETURNTEXT_SIZE,
+ "Managed Entry Plugin rejected modrdn operation (see errors log).\n");
+ slapi_pblock_set(pb, SLAPI_PB_RESULT_TEXT, &errtxt);
+ slapi_pblock_set(pb, SLAPI_RESULT_CODE, &rc);
+ result = SLAPI_PLUGIN_FAILURE;
+ }
}
mep_config_unlock();
@@ -2793,7 +2819,7 @@ bailmod:
slapi_log_error(SLAPI_LOG_TRACE, MEP_PLUGIN_SUBSYSTEM,
"<-- mep_modrdn_post_op\n");
- return SLAPI_PLUGIN_SUCCESS;
+ return result;
}
static int
| 0 |
50cff32950e3a21c794e136a6ae8323a08e7e352
|
389ds/389-ds-base
|
Resolves: bug 249470
Bug Description: cn equality index missing by default
Reviewed by: nhosoi (Thanks!)
Fix Description: When creating the database instance during dse.ldif processing, we do not create the user defined indexes from the defaults for this backend. This used to work in the old setup code because that code would always add the configuration for the indexes for the new instance. The way it is supposed to work is that a new instance should just copy the default indexes for that backend. This works fine when adding an instance via LDAP but not during startup. I just added a call to have this done during startup. I also removed some obsolete indexes from the default indexes.
Platforms tested: RHEL4
Flag Day: no
Doc impact: no
|
commit 50cff32950e3a21c794e136a6ae8323a08e7e352
Author: Rich Megginson <[email protected]>
Date: Wed Jul 25 15:20:01 2007 +0000
Resolves: bug 249470
Bug Description: cn equality index missing by default
Reviewed by: nhosoi (Thanks!)
Fix Description: When creating the database instance during dse.ldif processing, we do not create the user defined indexes from the defaults for this backend. This used to work in the old setup code because that code would always add the configuration for the indexes for the new instance. The way it is supposed to work is that a new instance should just copy the default indexes for that backend. This works fine when adding an instance via LDAP but not during startup. I just added a call to have this done during startup. I also removed some obsolete indexes from the default indexes.
Platforms tested: RHEL4
Flag Day: no
Doc impact: no
diff --git a/ldap/ldif/template-dse.ldif.in b/ldap/ldif/template-dse.ldif.in
index 5d9aec857..ba064d8e2 100644
--- a/ldap/ldif/template-dse.ldif.in
+++ b/ldap/ldif/template-dse.ldif.in
@@ -603,22 +603,6 @@ cn: member
nssystemindex: false
nsindextype: eq
-dn: cn=nsCalXItemId,cn=default indexes, cn=config,cn=ldbm database,cn=plugins,cn=config
-objectclass: top
-objectclass: nsIndex
-cn: nsCalXItemId
-nssystemindex: false
-nsindextype: pres
-nsindextype: eq
-nsindextype: sub
-
-dn: cn=nsLIProfileName,cn=default indexes, cn=config,cn=ldbm database,cn=plugins,cn=config
-objectclass: top
-objectclass: nsIndex
-cn: nsLIProfileName
-nssystemindex: false
-nsindextype: eq
-
dn: cn=nsUniqueId,cn=default indexes, cn=config,cn=ldbm database,cn=plugins,cn=config
objectclass: top
objectclass: nsIndex
@@ -626,13 +610,6 @@ cn: nsUniqueId
nssystemindex: true
nsindextype: eq
-dn: cn=nswcalCALID,cn=default indexes, cn=config,cn=ldbm database,cn=plugins,cn=config
-objectclass: top
-objectclass: nsIndex
-cn: nswcalCALID
-nssystemindex: false
-nsindextype: eq
-
dn: cn=numsubordinates,cn=default indexes, cn=config,cn=ldbm database,cn=plugins,cn=config
objectclass: top
objectclass: nsIndex
@@ -661,20 +638,6 @@ cn: parentid
nssystemindex: true
nsindextype: eq
-dn: cn=pipstatus,cn=default indexes, cn=config,cn=ldbm database,cn=plugins,cn=config
-objectclass: top
-objectclass: nsIndex
-cn: pipstatus
-nssystemindex: false
-nsindextype: eq
-
-dn: cn=pipuid,cn=default indexes, cn=config,cn=ldbm database,cn=plugins,cn=config
-objectclass: top
-objectclass: nsIndex
-cn: pipuid
-nssystemindex: false
-nsindextype: pres
-
dn: cn=seeAlso,cn=default indexes, cn=config,cn=ldbm database,cn=plugins,cn=config
objectclass: top
objectclass: nsIndex
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_instance_config.c b/ldap/servers/slapd/back-ldbm/ldbm_instance_config.c
index 7dfc826f6..94f1f80e3 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_instance_config.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_instance_config.c
@@ -837,6 +837,10 @@ ldbm_instance_add_instance_entry_callback(Slapi_PBlock *pb, Slapi_Entry* entryBe
if (pb == NULL) {
/* called during startup -- do the rest now */
rc = ldbm_instance_generate(li, instance_name, NULL);
+ if (!rc) {
+ inst = ldbm_instance_find_by_name(li, instance_name);
+ rc = ldbm_instance_create_default_user_indexes(inst);
+ }
}
/* if called during a normal ADD operation, the postadd callback
* will do the rest.
| 0 |
9d0ebfe5c1c964526448e113dc13d10731c27992
|
389ds/389-ds-base
|
Issue 5973 - Fix fedora cop RawHide builds (#5974)
Problem: @389ds/389-ds-base-nightly copr nigthly builds faild on
fedora-rawhide-s390x and fedora-rawhide-x86_64
Solution:
[1] Work around a gcc cpp bug by moving stavfs.h include line
before ldbm-backend.h include line
[2] Do not use large file API on LP64 architecture
Reviewed by: @tbordaz , @droideck (Thanks !)
|
commit 9d0ebfe5c1c964526448e113dc13d10731c27992
Author: progier389 <[email protected]>
Date: Tue Oct 31 12:08:32 2023 +0100
Issue 5973 - Fix fedora cop RawHide builds (#5974)
Problem: @389ds/389-ds-base-nightly copr nigthly builds faild on
fedora-rawhide-s390x and fedora-rawhide-x86_64
Solution:
[1] Work around a gcc cpp bug by moving stavfs.h include line
before ldbm-backend.h include line
[2] Do not use large file API on LP64 architecture
Reviewed by: @tbordaz , @droideck (Thanks !)
diff --git a/ldap/servers/slapd/back-ldbm/back-ldbm.h b/ldap/servers/slapd/back-ldbm/back-ldbm.h
index deff9c06c..b429c98c6 100644
--- a/ldap/servers/slapd/back-ldbm/back-ldbm.h
+++ b/ldap/servers/slapd/back-ldbm/back-ldbm.h
@@ -18,8 +18,11 @@
#if defined(HPUX11) || defined(OS_solaris) || defined(linux)
/* built-in 64-bit file I/O support */
+#if ! defined(__LP64__)
+/* But not on 64-bit arch: It is needless and build fails since gcc 13.2.1-4 */
#define DB_USE_64LFS
#endif
+#endif
/* needed by at least HPUX and Solaris, to define off64_t */
#ifdef DB_USE_64LFS
diff --git a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c
index 7179e129a..77e1e0e4e 100644
--- a/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c
+++ b/ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c
@@ -7,14 +7,14 @@
* END COPYRIGHT BLOCK **/
+#include <sys/types.h>
+#include <sys/statvfs.h>
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
#include "bdb_layer.h"
#include <prthread.h>
#include <prclist.h>
-#include <sys/types.h>
-#include <sys/statvfs.h>
#include <glob.h>
diff --git a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_config.c b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_config.c
index 5b364cb43..74c59b679 100644
--- a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_config.c
+++ b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_config.c
@@ -6,6 +6,8 @@
* See LICENSE for details.
* END COPYRIGHT BLOCK **/
+#include <sys/types.h>
+#include <sys/statvfs.h>
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
@@ -13,7 +15,6 @@
/* dbmdb_ctx_t.c - Handles configuration information that is specific to a MDB backend instance. */
#include "mdb_layer.h"
-#include <sys/statvfs.h>
/* Forward declarations */
diff --git a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.c b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.c
index 1c2fa22d7..226556bf3 100644
--- a/ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.c
+++ b/ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.c
@@ -7,6 +7,8 @@
* END COPYRIGHT BLOCK **/
+#include <sys/types.h>
+#include <sys/statvfs.h>
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
@@ -15,8 +17,6 @@
#include <prthread.h>
#include <assert.h>
#include <prclist.h>
-#include <sys/types.h>
-#include <sys/statvfs.h>
#include <glob.h>
Slapi_ComponentId *dbmdb_componentid;
diff --git a/ldap/servers/slapd/back-ldbm/dbimpl.c b/ldap/servers/slapd/back-ldbm/dbimpl.c
index 944a2e29c..da4a4548e 100644
--- a/ldap/servers/slapd/back-ldbm/dbimpl.c
+++ b/ldap/servers/slapd/back-ldbm/dbimpl.c
@@ -26,6 +26,8 @@
* are in dblayer.c ( All function defined during phase 2 )
*/
+#include <sys/types.h>
+#include <sys/statvfs.h>
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
@@ -34,8 +36,6 @@
#include "dblayer.h"
#include <prthread.h>
#include <prclist.h>
-#include <sys/types.h>
-#include <sys/statvfs.h>
static inline dblayer_private *dblayer_get_priv(Slapi_Backend *be)
diff --git a/ldap/servers/slapd/back-ldbm/dblayer.c b/ldap/servers/slapd/back-ldbm/dblayer.c
index d5022cd5b..30cd0c76a 100644
--- a/ldap/servers/slapd/back-ldbm/dblayer.c
+++ b/ldap/servers/slapd/back-ldbm/dblayer.c
@@ -57,6 +57,8 @@
* dblayer_release_index_file()
*/
+#include <sys/types.h>
+#include <sys/statvfs.h>
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
@@ -64,8 +66,6 @@
#include "dblayer.h"
#include <prthread.h>
#include <prclist.h>
-#include <sys/types.h>
-#include <sys/statvfs.h>
#define NEWDIR_MODE 0755
#define DB_REGION_PREFIX "__db."
| 0 |
54cf07cc0a7995d7fa6174796d77c36cc630202a
|
389ds/389-ds-base
|
Issue 5785 - move bash completion to post section of specfile
Description: Need to move bash completion setup to %post section of specfile.
Previously it was done during the build process which is incorrect and breaks
builds.
relates: https://github.com/389ds/389-ds-base/issues/5785
Reviewed by: spichugi(Thanks!)
|
commit 54cf07cc0a7995d7fa6174796d77c36cc630202a
Author: Mark Reynolds <[email protected]>
Date: Thu Jun 8 09:50:27 2023 -0400
Issue 5785 - move bash completion to post section of specfile
Description: Need to move bash completion setup to %post section of specfile.
Previously it was done during the build process which is incorrect and breaks
builds.
relates: https://github.com/389ds/389-ds-base/issues/5785
Reviewed by: spichugi(Thanks!)
diff --git a/rpm/389-ds-base.spec.in b/rpm/389-ds-base.spec.in
index afd336744..0ce8c5ce9 100644
--- a/rpm/389-ds-base.spec.in
+++ b/rpm/389-ds-base.spec.in
@@ -495,12 +495,6 @@ autoreconf -fiv
sed -r -i '/^(Protect(Home|Hostname|KernelLogs)|PrivateMounts)=/d' %{_builddir}/%{name}-%{version}%{?prerel}/wrappers/*.service.in
%endif
-# Register CLI tools for bash completion
-for clitool in dsconf dsctl dsidm dscreate ds-replcheck
-do
- register-python-argcomplete "${clitool}" > "/usr/share/bash-completion/completions/${clitool}"
-done
-
%if 0%{?rhel} > 7 || 0%{?fedora}
# lib389
pushd ./src/lib389
@@ -590,6 +584,12 @@ else
output2=/dev/null
fi
+# Register CLI tools for bash completion
+for clitool in dsconf dsctl dsidm dscreate ds-replcheck
+do
+ register-python-argcomplete "${clitool}" > "/usr/share/bash-completion/completions/${clitool}"
+done
+
# reload to pick up any changes to systemd files
/bin/systemctl daemon-reload >$output 2>&1 || :
| 0 |
f6799c274266c7e25b738554aa46c7619cafcd21
|
389ds/389-ds-base
|
Ticket 4345 - import self sign cert doc comment (#4346)
Bug Description: It was raised that the doc comment with TLS
and self sign cert could be confusing and if disabled it was
not clear how to enable TLS later.
Fix Description: Improve the doc comment with examples.
fixes: #4345
Author: William Brown <[email protected]>
Review by: spichugi (Thanks!)
|
commit f6799c274266c7e25b738554aa46c7619cafcd21
Author: Firstyear <[email protected]>
Date: Wed Sep 30 11:35:20 2020 +1000
Ticket 4345 - import self sign cert doc comment (#4346)
Bug Description: It was raised that the doc comment with TLS
and self sign cert could be confusing and if disabled it was
not clear how to enable TLS later.
Fix Description: Improve the doc comment with examples.
fixes: #4345
Author: William Brown <[email protected]>
Review by: spichugi (Thanks!)
diff --git a/src/lib389/lib389/instance/options.py b/src/lib389/lib389/instance/options.py
index fd60d433c..ea0b0b9aa 100644
--- a/src/lib389/lib389/instance/options.py
+++ b/src/lib389/lib389/instance/options.py
@@ -184,7 +184,7 @@ class Slapd2Base(Options2):
self._options['self_sign_cert'] = True
self._type['self_sign_cert'] = bool
- self._helptext['self_sign_cert'] = "Sets whether the setup creates a self-signed certificate and enables TLS encryption during the installation. This is not suitable for production, but it enables administrators to use TLS right after the installation. You can replace the self-signed certificate with a certificate issued by a Certificate Authority."
+ self._helptext['self_sign_cert'] = "Sets whether the setup creates a self-signed certificate and enables TLS encryption during the installation. The certificate is not suitable for production, but it enables administrators to use TLS right after the installation. You can replace the self-signed certificate with a certificate issued by a Certificate Authority. If set to False, you can enable TLS later by importing a CA/Certificate and enabling 'dsconf <instance_name> config replace nsslapd-security=on'"
self._options['self_sign_cert_valid_months'] = 24
self._type['self_sign_cert_valid_months'] = int
| 0 |
96c8abba57bea957b5dfa944a95f339a970ccad7
|
389ds/389-ds-base
|
Issue 49731 - set and use db_home_directory by default
Description: New instances will automatically create and use db_home_dir
/dev/shm/dirsrv/slapd-INSTANCE.
relates: https://pagure.io/389-ds-base/issue/49731
Reviewed by: tbordaz & firstyear(Thanks!)
|
commit 96c8abba57bea957b5dfa944a95f339a970ccad7
Author: Mark Reynolds <[email protected]>
Date: Mon Mar 16 12:39:07 2020 -0400
Issue 49731 - set and use db_home_directory by default
Description: New instances will automatically create and use db_home_dir
/dev/shm/dirsrv/slapd-INSTANCE.
relates: https://pagure.io/389-ds-base/issue/49731
Reviewed by: tbordaz & firstyear(Thanks!)
diff --git a/ldap/admin/src/defaults.inf.in b/ldap/admin/src/defaults.inf.in
index 42fc09fe5..23ea09dfa 100644
--- a/ldap/admin/src/defaults.inf.in
+++ b/ldap/admin/src/defaults.inf.in
@@ -58,6 +58,7 @@ access_log = @localstatedir@/log/dirsrv/slapd-{instance_name}/access
audit_log = @localstatedir@/log/dirsrv/slapd-{instance_name}/audit
error_log = @localstatedir@/log/dirsrv/slapd-{instance_name}/errors
db_dir = @localstatedir@/lib/dirsrv/slapd-{instance_name}/db
+db_home_dir = /dev/shm/dirsrv/slapd-{instance_name}
backup_dir = @localstatedir@/lib/dirsrv/slapd-{instance_name}/bak
ldif_dir = @localstatedir@/lib/dirsrv/slapd-{instance_name}/ldif
diff --git a/ldap/ldif/template-dse.ldif.in b/ldap/ldif/template-dse.ldif.in
index 9efff58c5..19abcf841 100644
--- a/ldap/ldif/template-dse.ldif.in
+++ b/ldap/ldif/template-dse.ldif.in
@@ -905,6 +905,7 @@ objectclass: extensibleObject
cn: config
nsslapd-mode: 600
nsslapd-directory: %db_dir%
+nsslapd-db-home-directory: %db_home_dir%
nsslapd-subtree-rename-switch: on
dn: cn=default indexes, cn=config,cn=ldbm database,cn=plugins,cn=config
diff --git a/rpm/389-ds-base.spec.in b/rpm/389-ds-base.spec.in
index 9a1d13936..60bcee905 100644
--- a/rpm/389-ds-base.spec.in
+++ b/rpm/389-ds-base.spec.in
@@ -430,6 +430,7 @@ popd
mkdir -p $RPM_BUILD_ROOT/var/log/%{pkgname}
mkdir -p $RPM_BUILD_ROOT/var/lib/%{pkgname}
mkdir -p $RPM_BUILD_ROOT/var/lock/%{pkgname}
+mkdir -p $RPM_BUILD_ROOT/dev/shm/%{pkgname}
# for systemd
mkdir -p $RPM_BUILD_ROOT%{_sysconfdir}/systemd/system/%{groupname}.wants
@@ -635,6 +636,7 @@ exit 0
%{_prefix}/lib/sysctl.d/*
%dir %{_localstatedir}/lib/%{pkgname}
%dir %{_localstatedir}/log/%{pkgname}
+%dir /dev/shm/%{pkgname}
%ghost %dir %{_localstatedir}/lock/%{pkgname}
%exclude %{_sbindir}/ldap-agent*
%exclude %{_mandir}/man1/ldap-agent.1.gz
diff --git a/src/lib389/lib389/instance/remove.py b/src/lib389/lib389/instance/remove.py
index e48b294df..bf2c6cff3 100644
--- a/src/lib389/lib389/instance/remove.py
+++ b/src/lib389/lib389/instance/remove.py
@@ -44,6 +44,7 @@ def remove_ds_instance(dirsrv, force=False):
remove_paths['cert_dir'] = dirsrv.ds_paths.cert_dir
remove_paths['config_dir'] = dirsrv.ds_paths.config_dir
remove_paths['db_dir'] = dirsrv.ds_paths.db_dir
+ remove_paths['db_home_dir'] = dirsrv.ds_paths.db_home_dir
remove_paths['db_dir_parent'] = dirsrv.ds_paths.db_dir + "/../"
### WARNING: The changelogdb isn't removed. we assume it's in:
# db_dir ../changelogdb. So remove that too!
diff --git a/src/lib389/lib389/instance/setup.py b/src/lib389/lib389/instance/setup.py
index 0a3daedf1..0cb8f4f54 100644
--- a/src/lib389/lib389/instance/setup.py
+++ b/src/lib389/lib389/instance/setup.py
@@ -281,6 +281,7 @@ class SetupDs(object):
'inst_dir': ds_paths.inst_dir,
'backup_dir': ds_paths.backup_dir,
'db_dir': ds_paths.db_dir,
+ 'db_home_dir': ds_paths.db_home_dir,
'ldif_dir': ds_paths.ldif_dir,
'lock_dir': ds_paths.lock_dir,
'log_dir': ds_paths.log_dir,
@@ -740,11 +741,12 @@ class SetupDs(object):
ds_suffix=ds_suffix,
config_dir=slapd['config_dir'],
db_dir=slapd['db_dir'],
+ db_home_dir=slapd['db_home_dir']
))
# Create all the needed paths
# we should only need to make bak_dir, cert_dir, config_dir, db_dir, ldif_dir, lock_dir, log_dir, run_dir?
- for path in ('backup_dir', 'cert_dir', 'db_dir', 'ldif_dir', 'lock_dir', 'log_dir', 'run_dir'):
+ for path in ('backup_dir', 'cert_dir', 'db_dir', 'db_home_dir', 'ldif_dir', 'lock_dir', 'log_dir', 'run_dir'):
self.log.debug("ACTION: creating %s", slapd[path])
try:
os.umask(0o007) # For parent dirs that get created -> sets 770 for perms
@@ -864,8 +866,9 @@ class SetupDs(object):
# Do selinux fixups
if general['selinux']:
- selinux_paths = ('backup_dir', 'cert_dir', 'config_dir', 'db_dir', 'ldif_dir',
- 'lock_dir', 'log_dir', 'run_dir', 'schema_dir', 'tmp_dir')
+ selinux_paths = ('backup_dir', 'cert_dir', 'config_dir', 'db_dir',
+ 'db_home_dir', 'ldif_dir', 'lock_dir', 'log_dir',
+ 'run_dir', 'schema_dir', 'tmp_dir')
for path in selinux_paths:
selinux_restorecon(slapd[path])
diff --git a/src/lib389/lib389/paths.py b/src/lib389/lib389/paths.py
index 869df1f5f..8dcceadfb 100644
--- a/src/lib389/lib389/paths.py
+++ b/src/lib389/lib389/paths.py
@@ -57,6 +57,7 @@ MUST = [
'log_dir',
'inst_dir',
'db_dir',
+ 'db_home_dir',
'backup_dir',
'ldif_dir',
'initconfig_dir',
@@ -76,6 +77,7 @@ CONFIG_MAP = {
'lock_dir' : ('cn=config','nsslapd-lockdir'),
'inst_dir' : ('cn=config','nsslapd-instancedir'),
'db_dir' : ('cn=config,cn=ldbm database,cn=plugins,cn=config', 'nsslapd-directory'),
+ 'db_home_dir' : ('cn=bdb,cn=config,cn=ldbm database,cn=plugins,cn=config', 'nsslapd-db-home-directory'),
'backup_dir': ('cn=config','nsslapd-bakdir'),
'ldif_dir': ('cn=config','nsslapd-ldifdir'),
'error_log' : ('cn=config', 'nsslapd-errorlog'),
@@ -128,7 +130,7 @@ class Paths(object):
for spath in search_paths:
if os.path.isfile(spath):
return spath
- raise IOError('defaults.inf not found in any wellknown location!')
+ raise IOError('defaults.inf not found in any well known location!')
def _read_defaults(self):
spath = self._get_defaults_loc(DEFAULTS_PATH)
| 0 |
5a61fc79325fa562141dcaa43cc32c1348226a80
|
389ds/389-ds-base
|
Bug 193297 - Call bind pre-op and post-op plug-ins for SASL binds
This patch makes SASL binds call the pre-op and post-op plug-ins.
The previous code was not calling the plug-ins for SASL binds.
This fix was contributed by Ulf Weltman of Hewlett Packard.
|
commit 5a61fc79325fa562141dcaa43cc32c1348226a80
Author: Nathan Kinder <[email protected]>
Date: Wed Dec 2 10:47:46 2009 -0800
Bug 193297 - Call bind pre-op and post-op plug-ins for SASL binds
This patch makes SASL binds call the pre-op and post-op plug-ins.
The previous code was not calling the plug-ins for SASL binds.
This fix was contributed by Ulf Weltman of Hewlett Packard.
diff --git a/ldap/servers/slapd/bind.c b/ldap/servers/slapd/bind.c
index c6b009238..abb027a6b 100644
--- a/ldap/servers/slapd/bind.c
+++ b/ldap/servers/slapd/bind.c
@@ -32,8 +32,14 @@
*
*
* Copyright (C) 2001 Sun Microsystems, Inc. Used by permission.
- * Copyright (C) 2005 Red Hat, Inc.
+ * Copyright (C) 2009 Red Hat, Inc.
+ * Copyright (C) 2009 Hewlett-Packard Development Company, L.P.
* All rights reserved.
+ *
+ * Contributors:
+ * Hewlett-Packard Development Company, L.P.
+ * Bugfix for bug #193297
+ *
* END COPYRIGHT BLOCK **/
#ifdef HAVE_CONFIG_H
@@ -370,7 +376,10 @@ do_bind( Slapi_PBlock *pb )
}
if (!pmech) {
/* now check the sasl library */
+ /* ids_sasl_check_bind takes care of calling bind
+ * pre-op plugins after it knows the target DN */
ids_sasl_check_bind(pb);
+ plugin_call_plugins( pb, SLAPI_PLUGIN_POST_BIND_FN );
goto free_and_return;
}
else {
@@ -378,6 +387,11 @@ do_bind( Slapi_PBlock *pb )
}
if (!strcasecmp (saslmech, LDAP_SASL_EXTERNAL)) {
+ /* call preop plugins */
+ if (plugin_call_plugins( pb, SLAPI_PLUGIN_PRE_BIND_FN ) != 0){
+ goto free_and_return;
+ }
+
#if defined(ENABLE_AUTOBIND)
if (1 == auto_bind) {
/* Already AUTO-BOUND */
@@ -392,6 +406,8 @@ do_bind( Slapi_PBlock *pb )
send_ldap_result( pb, LDAP_INAPPROPRIATE_AUTH, NULL,
"SASL EXTERNAL bind requires an SSL connection",
0, NULL );
+ /* call postop plugins */
+ plugin_call_plugins( pb, SLAPI_PLUGIN_POST_BIND_FN );
goto free_and_return;
}
@@ -403,6 +419,8 @@ do_bind( Slapi_PBlock *pb )
NULL == pb->pb_conn->c_external_dn ) {
send_ldap_result( pb, LDAP_INVALID_CREDENTIALS, NULL,
"client certificate mapping failed", 0, NULL );
+ /* call postop plugins */
+ plugin_call_plugins( pb, SLAPI_PLUGIN_POST_BIND_FN );
goto free_and_return;
}
@@ -417,6 +435,8 @@ do_bind( Slapi_PBlock *pb )
slapi_add_auth_response_control( pb, pb->pb_conn->c_external_dn );
}
send_ldap_result( pb, LDAP_SUCCESS, NULL, NULL, 0, NULL );
+ /* call postop plugins */
+ plugin_call_plugins( pb, SLAPI_PLUGIN_POST_BIND_FN );
goto free_and_return;
}
break;
diff --git a/ldap/servers/slapd/saslbind.c b/ldap/servers/slapd/saslbind.c
index 376bec132..0892c6709 100644
--- a/ldap/servers/slapd/saslbind.c
+++ b/ldap/servers/slapd/saslbind.c
@@ -32,8 +32,14 @@
*
*
* Copyright (C) 2001 Sun Microsystems, Inc. Used by permission.
- * Copyright (C) 2005 Red Hat, Inc.
+ * Copyright (C) 2009 Red Hat, Inc.
+ * Copyright (C) 2009 Hewlett-Packard Development Company, L.P.
* All rights reserved.
+ *
+ * Contributors:
+ * Hewlett-Packard Development Company, L.P.
+ * Bugfix for bug #193297
+ *
* END COPYRIGHT BLOCK **/
#ifdef HAVE_CONFIG_H
@@ -890,6 +896,11 @@ void ids_sasl_check_bind(Slapi_PBlock *pb)
break;
}
+ slapi_pblock_set( pb, SLAPI_BIND_TARGET, slapi_ch_strdup( dn ) );
+ if (plugin_call_plugins( pb, SLAPI_PLUGIN_PRE_BIND_FN ) != 0){
+ break;
+ }
+
isroot = slapi_dn_isroot(dn);
if (!isroot )
| 0 |
5c6ffae12ea1f503e1f032aebb73b15a338c8415
|
389ds/389-ds-base
|
Ticket 50439 - Update docker integration to work out of source directory
Bug Description: Docker did not function in some cases, and we had to wait for
releases via rpm.
Fix Description: This adds the support to build from source into the tree
so that we can build and test git master. This also resolves a var/run
issue in the image, as well as some other minor python cleaning such
as handling sigchld to act as init.
https://pagure.io/389-ds-base/issue/50439
Author: William Brown [email protected]
Review by: spichugi
|
commit 5c6ffae12ea1f503e1f032aebb73b15a338c8415
Author: William Brown <[email protected]>
Date: Fri Jun 14 10:33:08 2019 +0200
Ticket 50439 - Update docker integration to work out of source directory
Bug Description: Docker did not function in some cases, and we had to wait for
releases via rpm.
Fix Description: This adds the support to build from source into the tree
so that we can build and test git master. This also resolves a var/run
issue in the image, as well as some other minor python cleaning such
as handling sigchld to act as init.
https://pagure.io/389-ds-base/issue/50439
Author: William Brown [email protected]
Review by: spichugi
diff --git a/.dockerignore b/.dockerignore
new file mode 100644
index 000000000..1905eb5ea
--- /dev/null
+++ b/.dockerignore
@@ -0,0 +1,3 @@
+.git
+.gitignore
+./src/cockpit/389-console/node_modules
diff --git a/docker.mk b/docker.mk
index 528ab2f06..4f07ceca2 100644
--- a/docker.mk
+++ b/docker.mk
@@ -1,4 +1,3 @@
-
-poc:
- docker build -t 389-poc:latest -f docker/389ds_poc/Dockerfile .
+suse:
+ docker build -t 389-ds-suse:master -f docker/389-ds-suse/Dockerfile .
diff --git a/docker/389ds_poc/Dockerfile b/docker/389-ds-fedora/Dockerfile
similarity index 100%
rename from docker/389ds_poc/Dockerfile
rename to docker/389-ds-fedora/Dockerfile
diff --git a/docker/389-ds-suse/Dockerfile b/docker/389-ds-suse/Dockerfile
new file mode 100644
index 000000000..a28691205
--- /dev/null
+++ b/docker/389-ds-suse/Dockerfile
@@ -0,0 +1,78 @@
+#!BuildTag: 389-ds-container
+FROM opensuse/leap:15.1
+MAINTAINER [email protected]
+
+EXPOSE 3389 3636
+
+# RUN zypper ar -G obs://network:ldap network:ldap && \
+RUN zypper ar http://download.opensuse.org/update/leap/15.1/oss/ u && \
+ zypper ar http://download.opensuse.org/distribution/leap/15.1/repo/oss/ m && \
+ zypper ar http://download.opensuse.org/repositories/network:ldap/openSUSE_Leap_15.1/ "network:ldap" && \
+ zypper mr -p 97 "network:ldap" && \
+ zypper --gpg-auto-import-keys ref
+
+RUN zypper --non-interactive si --build-deps-only 389-ds && \
+ zypper in -y 389-ds rust cargo rust-std && \
+ zypper rm -y 389-ds
+
+# Install build dependencies
+# RUN zypper in -C -y autoconf automake cracklib-devel cyrus-sasl-devel db-devel doxygen gcc-c++ \
+# gdb krb5-devel libcmocka-devel libevent-devel libtalloc-devel libtevent-devel libtool \
+# net-snmp-devel openldap2-devel pam-devel pkgconfig python-rpm-macros "pkgconfig(icu-i18n)" \
+# "pkgconfig(icu-uc)" "pkgconfig(libcap)" "pkgconfig(libpcre)" "pkgconfig(libsystemd)" \
+# "pkgconfig(nspr)" "pkgconfig(nss)" rsync cargo rust rust-std acl cyrus-sasl-plain db-utils \
+# bind-utils krb5 fillup shadow openldap2-devel pkgconfig "pkgconfig(nspr)" "pkgconfig(nss)" \
+# "pkgconfig(systemd)" python3-argcomplete python3-argparse-manpage python3-ldap \
+# python3-pyasn1 python3-pyasn1-modules python3-python-dateutil python3-six krb5-client \
+# mozilla-nss-tools
+
+# Push source code to the container
+ADD ./ /usr/local/src/389-ds-base
+WORKDIR /usr/local/src/389-ds-base
+
+
+# Build and install
+# Derived from rpm --eval '%configure' on opensuse.
+RUN ./configure --host=x86_64-suse-linux-gnu --build=x86_64-suse-linux-gnu \
+ --program-prefix= \
+ --disable-dependency-tracking \
+ --prefix=/usr \
+ --exec-prefix=/usr \
+ --bindir=/usr/bin \
+ --sbindir=/usr/sbin \
+ --sysconfdir=/etc \
+ --datadir=/usr/share \
+ --includedir=/usr/include \
+ --libdir=/usr/lib64 \
+ --libexecdir=/usr/lib \
+ --localstatedir=/var \
+ --sharedstatedir=/var/lib \
+ --mandir=/usr/share/man \
+ --infodir=/usr/share/info \
+ --disable-dependency-tracking \
+ --enable-debug \
+ --enable-gcc-security --enable-autobind --enable-auto-dn-suffix --with-openldap \
+ --enable-cmocka --enable-rust --disable-perl --with-pythonexec="python3" --without-systemd \
+ --libexecdir=/usr/lib/dirsrv/ --prefix=/ && \
+ make -j 12 && \
+ make install && \
+ make lib389 && \
+ make lib389-install
+
+# Link some known static locations to point to /data
+RUN mkdir -p /data/config && \
+ mkdir -p /data/ssca && \
+ mkdir -p /data/run && \
+ mkdir -p /var/run/dirsrv && \
+ ln -s /data/config /etc/dirsrv/slapd-localhost && \
+ ln -s /data/ssca /etc/dirsrv/ssca && \
+ ln -s /data/run /var/run/dirsrv
+
+# Temporal volumes for each instance
+
+VOLUME /data
+
+# Set the userup correctly.
+# USER dirsrv
+
+CMD [ "/usr/sbin/dscontainer", "-r" ]
diff --git a/src/lib389/cli/dscontainer b/src/lib389/cli/dscontainer
index 81195c999..83a1f794b 100755
--- a/src/lib389/cli/dscontainer
+++ b/src/lib389/cli/dscontainer
@@ -45,6 +45,12 @@ from lib389.paths import Paths
# is always available!
log = setup_script_logger("container-init", True)
+# Handle any dead child process signals we receive. Wait for them to terminate, or
+# if they are not found, move on.
+def _sigchild_handler():
+ log.debug("Received SIGCHLD ...")
+ os.waitpid(-1, os.WNOHANG)
+
def begin_magic():
log.info("The 389 Directory Server Container Bootstrap")
# Leave this comment here: UofA let me take this code with me provided
@@ -158,8 +164,8 @@ def begin_magic():
# Create the marker to say we exist. This is also a good writable permissions
# test for the volume.
- with open('/data/config/container.inf', 'w'):
- pass
+ with open('/data/config/container.inf', 'w') as f:
+ f.write('allocated')
# TODO: All of this is contingent on the server starting *and*
# ldapi working ... Perhaps these are better inside ns-slapd core
@@ -206,6 +212,9 @@ def begin_magic():
# THE LETTER OF THE DAY IS C AND THE NUMBER IS 10
if __name__ == '__main__':
+ # Before all else, we are INIT so setup sigchild
+ signal.signal(signal.SIGCHLD, _sigchild_handler)
+
parser = argparse.ArgumentParser(allow_abbrev=True, description="""
dscontainer - this is a container entry point that will run a stateless
instance of 389-ds. You should not use this unless you are developing or
diff --git a/src/lib389/lib389/__init__.py b/src/lib389/lib389/__init__.py
index ed1c2594b..1c5741503 100644
--- a/src/lib389/lib389/__init__.py
+++ b/src/lib389/lib389/__init__.py
@@ -212,6 +212,8 @@ def pid_exists(pid):
return True
else:
raise
+ # Tell the OS to reap this please ...
+ os.waitpid(pid, os.WNOHANG)
return True
def pid_from_file(pidfile):
| 0 |
d9cac184c8d9bc4af8af3fd16b62a60a41fe5c88
|
389ds/389-ds-base
|
Bug 606439 - Creating server instance with LDAPI takes too long
The setup-ds.pl script expects ns-slapd to write a "slapd started"
message to the errors log when the server comes up. When you
create an instance that does not listen on a standard LDAP port,
the "slapd started" string is never printed. This causes the
installer to hang for 10 minutes, at which point in prints an error
that it could not create the instance.
This patch ensures that the "slapd started" message is printed
for servers that only listen on LDAPS or LDAPI interfaces.
|
commit d9cac184c8d9bc4af8af3fd16b62a60a41fe5c88
Author: Nathan Kinder <[email protected]>
Date: Fri Dec 10 14:11:10 2010 -0800
Bug 606439 - Creating server instance with LDAPI takes too long
The setup-ds.pl script expects ns-slapd to write a "slapd started"
message to the errors log when the server comes up. When you
create an instance that does not listen on a standard LDAP port,
the "slapd started" string is never printed. This causes the
installer to hang for 10 minutes, at which point in prints an error
that it could not create the instance.
This patch ensures that the "slapd started" message is printed
for servers that only listen on LDAPS or LDAPI interfaces.
diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c
index 705db50e7..0242902eb 100644
--- a/ldap/servers/slapd/daemon.c
+++ b/ldap/servers/slapd/daemon.c
@@ -2300,6 +2300,7 @@ unfurl_banners(Connection_Table *ct,daemon_ports_t *ports, PRFileDesc **n_tcps,
{
slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
char addrbuf[ 256 ];
+ int isfirsttime = 1;
if ( ct->size <= slapdFrontendConfig->reservedescriptors ) {
#ifdef _WIN32
@@ -2340,7 +2341,6 @@ unfurl_banners(Connection_Table *ct,daemon_ports_t *ports, PRFileDesc **n_tcps,
#if !defined( XP_WIN32 )
if ( n_tcps != NULL ) { /* standard LDAP */
PRNetAddr **nap = NULL;
- int isfirsttime = 1;
for (nap = ports->n_listenaddr; nap && *nap; nap++) {
if (isfirsttime) {
@@ -2362,10 +2362,18 @@ unfurl_banners(Connection_Table *ct,daemon_ports_t *ports, PRFileDesc **n_tcps,
PRNetAddr **sap = NULL;
for (sap = ports->s_listenaddr; sap && *sap; sap++) {
- LDAPDebug( LDAP_DEBUG_ANY,
- "Listening on %s port %d for LDAPS requests\n",
- netaddr2string(*sap, addrbuf, sizeof(addrbuf)),
- ports->s_port, 0 );
+ if (isfirsttime) {
+ LDAPDebug( LDAP_DEBUG_ANY,
+ "slapd started. Listening on %s port %d for LDAPS requests\n",
+ netaddr2string(*sap, addrbuf, sizeof(addrbuf)),
+ ports->s_port, 0 );
+ isfirsttime = 0;
+ } else {
+ LDAPDebug( LDAP_DEBUG_ANY,
+ "Listening on %s port %d for LDAPS requests\n",
+ netaddr2string(*sap, addrbuf, sizeof(addrbuf)),
+ ports->s_port, 0 );
+ }
}
}
#else
@@ -2388,8 +2396,10 @@ unfurl_banners(Connection_Table *ct,daemon_ports_t *ports, PRFileDesc **n_tcps,
#if defined(ENABLE_LDAPI)
if ( i_unix != NULL ) { /* LDAPI */
PRNetAddr **iap = ports->i_listenaddr;
+
LDAPDebug( LDAP_DEBUG_ANY,
- "Listening on %s for LDAPI requests\n", (*iap)->local.path, 0, 0 );
+ "%sListening on %s for LDAPI requests\n", isfirsttime?"slapd started. ":"",
+ (*iap)->local.path, 0 );
}
#endif /* ENABLE_LDAPI */
#endif
| 0 |
fd851b3028bc3ef7bbc2394da8baa96764b2222b
|
389ds/389-ds-base
|
Ticket 39344 - changelog ldif import fails
fix: init buflen
reviewed by: Mark, thanks
|
commit fd851b3028bc3ef7bbc2394da8baa96764b2222b
Author: Ludwig Krispenz <[email protected]>
Date: Mon Aug 7 11:38:48 2017 +0200
Ticket 39344 - changelog ldif import fails
fix: init buflen
reviewed by: Mark, thanks
diff --git a/ldap/servers/plugins/replication/cl5_api.c b/ldap/servers/plugins/replication/cl5_api.c
index a0b7d597c..ec648c014 100644
--- a/ldap/servers/plugins/replication/cl5_api.c
+++ b/ldap/servers/plugins/replication/cl5_api.c
@@ -768,7 +768,7 @@ cl5ImportLDIF(const char *clDir, const char *ldifFile, Object **replicas)
{
#if defined(USE_OPENLDAP)
LDIFFP *file = NULL;
- int buflen;
+ int buflen = 0;
ldif_record_lineno_t lineno = 0;
#else
FILE *file = NULL;
| 0 |
b9723db9e5ff82712e46738fa810f728b2b6a333
|
389ds/389-ds-base
|
Ticket #48048 - Fix coverity issues - 2015/2/24
Coverity defect 13046 - Explicit null dereferenced (FORWARD_NULL)
Description: Added NULL check for op. If NULL, default connid and
opid are used in the accesslog.
modified: aclanom_match_profile in aclanom.c
|
commit b9723db9e5ff82712e46738fa810f728b2b6a333
Author: Noriko Hosoi <[email protected]>
Date: Fri Feb 27 15:31:28 2015 -0800
Ticket #48048 - Fix coverity issues - 2015/2/24
Coverity defect 13046 - Explicit null dereferenced (FORWARD_NULL)
Description: Added NULL check for op. If NULL, default connid and
opid are used in the accesslog.
modified: aclanom_match_profile in aclanom.c
diff --git a/ldap/servers/plugins/acl/aclanom.c b/ldap/servers/plugins/acl/aclanom.c
index d46b49803..118f95f48 100644
--- a/ldap/servers/plugins/acl/aclanom.c
+++ b/ldap/servers/plugins/acl/aclanom.c
@@ -537,43 +537,49 @@ aclanom_match_profile (Slapi_PBlock *pb, struct acl_pblock *aclpb, Slapi_Entry *
if ( slapi_is_loglevel_set(loglevel) ) {
char *ndn = NULL;
Slapi_Operation *op = NULL;
-
+ PRUint64 o_connid = 0xffffffffffffffff; /* no op */
+ int o_opid = -1; /* no op */
+
ndn = slapi_entry_get_ndn ( e ) ;
slapi_pblock_get(pb, SLAPI_OPERATION, &op);
+ if (op) {
+ o_connid = op->o_connid;
+ o_opid = op->o_opid;
+ }
if ( result == LDAP_SUCCESS) {
const char *aci_ndn;
aci_ndn = slapi_sdn_get_ndn (acl_anom_profile->anom_targetinfo[i].anom_target);
- if (access & SLAPI_ACL_MODDN) {
- slapi_log_error(loglevel, plugin_name,
- "conn=%" NSPRIu64 " op=%d: Allow access on entry(%s).attr(%s) (from %s) to anonymous: acidn=\"%s\"\n",
- (long long unsigned int)op->o_connid, op->o_opid,
- ndn,
- attr ? attr:"NULL",
- aclpb->aclpb_moddn_source_sdn ? slapi_sdn_get_dn(aclpb->aclpb_moddn_source_sdn) : "NULL",
- aci_ndn);
-
- } else {
- slapi_log_error(loglevel, plugin_name,
- "conn=%" NSPRIu64 " op=%d: Allow access on entry(%s).attr(%s) to anonymous: acidn=\"%s\"\n",
- (long long unsigned int)op->o_connid, op->o_opid,
- ndn,
- attr ? attr:"NULL",
- aci_ndn);
- }
+ if (access & SLAPI_ACL_MODDN) {
+ slapi_log_error(loglevel, plugin_name,
+ "conn=%" NSPRIu64 " op=%d: Allow access on entry(%s).attr(%s) (from %s) to anonymous: acidn=\"%s\"\n",
+ o_connid, o_opid,
+ ndn,
+ attr ? attr:"NULL",
+ aclpb->aclpb_moddn_source_sdn ? slapi_sdn_get_dn(aclpb->aclpb_moddn_source_sdn) : "NULL",
+ aci_ndn);
+
+ } else {
+ slapi_log_error(loglevel, plugin_name,
+ "conn=%" NSPRIu64 " op=%d: Allow access on entry(%s).attr(%s) to anonymous: acidn=\"%s\"\n",
+ o_connid, o_opid,
+ ndn,
+ attr ? attr:"NULL",
+ aci_ndn);
+ }
} else {
- if (access & SLAPI_ACL_MODDN) {
- slapi_log_error(loglevel, plugin_name,
- "conn=%" NSPRIu64 " op=%d: Deny access on entry(%s).attr(%s) (from %s) to anonymous\n",
- (long long unsigned int)op->o_connid, op->o_opid,
- ndn, attr ? attr:"NULL" ,
- aclpb->aclpb_moddn_source_sdn ? slapi_sdn_get_dn(aclpb->aclpb_moddn_source_sdn) : "NULL");
- } else {
- slapi_log_error(loglevel, plugin_name,
- "conn=%" NSPRIu64 " op=%d: Deny access on entry(%s).attr(%s) to anonymous\n",
- (long long unsigned int)op->o_connid, op->o_opid,
- ndn, attr ? attr:"NULL" );
- }
+ if (access & SLAPI_ACL_MODDN) {
+ slapi_log_error(loglevel, plugin_name,
+ "conn=%" NSPRIu64 " op=%d: Deny access on entry(%s).attr(%s) (from %s) to anonymous\n",
+ o_connid, o_opid,
+ ndn, attr ? attr:"NULL" ,
+ aclpb->aclpb_moddn_source_sdn ? slapi_sdn_get_dn(aclpb->aclpb_moddn_source_sdn) : "NULL");
+ } else {
+ slapi_log_error(loglevel, plugin_name,
+ "conn=%" NSPRIu64 " op=%d: Deny access on entry(%s).attr(%s) to anonymous\n",
+ o_connid, o_opid,
+ ndn, attr ? attr:"NULL" );
+ }
}
}
@@ -582,8 +588,8 @@ aclanom_match_profile (Slapi_PBlock *pb, struct acl_pblock *aclpb, Slapi_Entry *
return -1;
else
return result;
-
}
+
int
aclanom_is_client_anonymous ( Slapi_PBlock *pb )
{
| 0 |
06876946f77436f71013392becb0fb9d9e4111f6
|
389ds/389-ds-base
|
Bug 1347760 - Additional CI test case
Description: Information disclosure via repeated use of LDAP ADD operation, etc.
Case 1. When an account is inactivated, if a bind is successful (e.g.,
a correct password is given), it returns UNWILLING_TO_PERFORM.
Case 2. When an account is inactivated, if a bind is not successful,
it returns INVALID_CREDENTIALS.
|
commit 06876946f77436f71013392becb0fb9d9e4111f6
Author: Noriko Hosoi <[email protected]>
Date: Fri Jul 22 17:11:48 2016 -0700
Bug 1347760 - Additional CI test case
Description: Information disclosure via repeated use of LDAP ADD operation, etc.
Case 1. When an account is inactivated, if a bind is successful (e.g.,
a correct password is given), it returns UNWILLING_TO_PERFORM.
Case 2. When an account is inactivated, if a bind is not successful,
it returns INVALID_CREDENTIALS.
diff --git a/dirsrvtests/tests/tickets/ticket1347760_test.py b/dirsrvtests/tests/tickets/ticket1347760_test.py
index bf0356307..a54aea10b 100644
--- a/dirsrvtests/tests/tickets/ticket1347760_test.py
+++ b/dirsrvtests/tests/tickets/ticket1347760_test.py
@@ -12,6 +12,8 @@ import time
import ldap
import logging
import pytest
+
+from subprocess import Popen
from lib389 import DirSrv, Entry, tools, tasks
from lib389.tools import DirSrvTools
from lib389._constants import *
@@ -434,6 +436,27 @@ def test_ticket1347760(topology):
log.info('Delete case. the deleting entry does not exist, it should fail with %s' % rc.__name__)
check_op_result(topology.standalone, 'delete', BOGUSDN, None, exists, rc)
+ log.info('Inactivate %s' % BINDDN)
+ nsinactivate = '%s/sbin/ns-inactivate.pl' % topology.standalone.prefix
+ p = Popen([nsinactivate, '-Z', 'standalone', '-D', DN_DM, '-w', PASSWORD, '-I', BINDDN])
+ assert(p.wait() == 0)
+
+ log.info('Bind as {%s,%s} which should fail with %s.' % (BINDDN, BUID, ldap.UNWILLING_TO_PERFORM.__name__))
+ try:
+ topology.standalone.simple_bind_s(BINDDN, BUID)
+ except ldap.LDAPError as e:
+ log.info("Exception (expected): %s" % type(e).__name__)
+ log.info('Desc ' + e.message['desc'])
+ assert isinstance(e, ldap.UNWILLING_TO_PERFORM)
+
+ log.info('Bind as {%s,%s} which should fail with %s.' % (BINDDN, 'bogus', ldap.INVALID_CREDENTIALS.__name__))
+ try:
+ topology.standalone.simple_bind_s(BINDDN, 'bogus')
+ except ldap.LDAPError as e:
+ log.info("Exception (expected): %s" % type(e).__name__)
+ log.info('Desc ' + e.message['desc'])
+ assert isinstance(e, ldap.INVALID_CREDENTIALS)
+
log.info('SUCCESS')
| 0 |
6306fc4e8eb2fb5973360f550c83e3c9b220df5c
|
389ds/389-ds-base
|
Ticket 48119 - setup-ds.pl does not log invalid --file path errors the same way as other errors.
Bug Description: Errors occuring from Inf.pm are only written to STDERR
Fix Description: Write errors from Inf.pm using the debug function
https://fedorahosted.org/389/ticket/48119
Reviewed by: nhosoi(Thanks!)
|
commit 6306fc4e8eb2fb5973360f550c83e3c9b220df5c
Author: Mark Reynolds <[email protected]>
Date: Mon Jul 6 15:55:43 2015 -0400
Ticket 48119 - setup-ds.pl does not log invalid --file path errors the same way as other errors.
Bug Description: Errors occuring from Inf.pm are only written to STDERR
Fix Description: Write errors from Inf.pm using the debug function
https://fedorahosted.org/389/ticket/48119
Reviewed by: nhosoi(Thanks!)
diff --git a/ldap/admin/src/scripts/Inf.pm b/ldap/admin/src/scripts/Inf.pm
index 98649acc3..ec433e242 100644
--- a/ldap/admin/src/scripts/Inf.pm
+++ b/ldap/admin/src/scripts/Inf.pm
@@ -12,6 +12,7 @@
package Inf;
+use DSUtil;
use File::Temp qw(tempfile tempdir);
#require Exporter;
@@ -59,7 +60,7 @@ sub read {
$inffh = \*STDIN;
} else {
if (!open(INF, $filename)) {
- print STDERR "Error: could not open inf file $filename: $!\n";
+ debug(0, "Error: could not open inf file $filename: $!\n");
return;
}
$inffh = \*INF;
@@ -124,7 +125,7 @@ sub section {
my $key = shift;
if (!exists($self->{$key})) {
- print "Error: unknown inf section $key\n";
+ debug(0, "Error: unknown inf section $key\n");
return undef;
}
@@ -187,7 +188,7 @@ sub write {
my $savemask = umask(0077);
if (!$fh) {
if (!open(INF, ">$filename")) {
- print STDERR "Error: could not write inf file $filename: $!\n";
+ debug(0, "Error: could not write inf file $filename: $!\n");
umask($savemask);
return;
}
@@ -232,7 +233,7 @@ sub updateFromArgs {
$argsinf->{$sec}->{$parm} = $val;
}
} else { # error
- print STDERR "Error: unknown command line option $arg\n";
+ debug(0, "Error: unknown command line option $arg\n");
return;
}
}
| 0 |
ff997cd6fa5f2a0678721ba0b6a56fdce327feb0
|
389ds/389-ds-base
|
Ticket #48914 - db2bak.pl task enters infinitive loop when bak fs is almost full
Description: A backend helper function dblayer_copyfile returns an error
when any of the copy operation fails. But one of the caller functions
dblayer_backup ignored the error.
This patch checks the error returned from dblayer_copyfile and abort the
back-up.
Also, more error info is added to the log messages.
https://fedorahosted.org/389/ticket/48914
Reviewed by [email protected] (Thank you, Mark!!)
|
commit ff997cd6fa5f2a0678721ba0b6a56fdce327feb0
Author: Noriko Hosoi <[email protected]>
Date: Sat Jul 9 18:36:17 2016 -0700
Ticket #48914 - db2bak.pl task enters infinitive loop when bak fs is almost full
Description: A backend helper function dblayer_copyfile returns an error
when any of the copy operation fails. But one of the caller functions
dblayer_backup ignored the error.
This patch checks the error returned from dblayer_copyfile and abort the
back-up.
Also, more error info is added to the log messages.
https://fedorahosted.org/389/ticket/48914
Reviewed by [email protected] (Thank you, Mark!!)
diff --git a/ldap/servers/slapd/back-ldbm/dblayer.c b/ldap/servers/slapd/back-ldbm/dblayer.c
index 93d42bed8..783d10423 100644
--- a/ldap/servers/slapd/back-ldbm/dblayer.c
+++ b/ldap/servers/slapd/back-ldbm/dblayer.c
@@ -5643,18 +5643,16 @@ dblayer_copyfile(char *source, char *destination, int overwrite, int mode)
source_fd = OPEN_FUNCTION(source,O_RDONLY,0);
if (-1 == source_fd)
{
- LDAPDebug1Arg(LDAP_DEBUG_ANY,
- "dblayer_copyfile: failed to open source file: %s\n",
- source);
+ LDAPDebug2Args(LDAP_DEBUG_ANY, "dblayer_copyfile: failed to open source file %s by \"%s\"\n",
+ source, strerror(errno));
goto error;
}
/* Open destination file */
dest_fd = OPEN_FUNCTION(destination,O_CREAT | O_WRONLY, mode);
if (-1 == dest_fd)
{
- LDAPDebug1Arg(LDAP_DEBUG_ANY,
- "dblayer_copyfile: failed to open dest file: %s\n",
- destination);
+ LDAPDebug2Args(LDAP_DEBUG_ANY, "dblayer_copyfile: failed to open dest file %s by \"%s\"\n",
+ destination, strerror(errno));
goto error;
}
LDAPDebug2Args(LDAP_DEBUG_BACKLDBM,
@@ -5662,24 +5660,38 @@ dblayer_copyfile(char *source, char *destination, int overwrite, int mode)
/* Loop round reading data and writing it */
while (1)
{
+ int i;
+ char *ptr = NULL;
return_value = read(source_fd,buffer,64*1024);
- if (return_value <= 0)
- {
+ if (return_value <= 0) {
/* means error or EOF */
- if (return_value < 0)
- {
- LDAPDebug1Arg(LDAP_DEBUG_ANY,
- "dblayer_copyfile: failed to read: %d\n", errno);
+ if (return_value < 0) {
+ LDAPDebug2Args(LDAP_DEBUG_ANY, "dblayer_copyfile: failed to read by \"%s\": rval = %d\n",
+ strerror(errno), return_value);
}
break;
}
bytes_to_write = return_value;
- return_value = write(dest_fd,buffer,bytes_to_write);
- if (return_value != bytes_to_write)
- {
- /* means error */
- LDAPDebug1Arg(LDAP_DEBUG_ANY,
- "dblayer_copyfile: failed to write: %d\n", errno);
+ ptr = buffer;
+#define CPRETRY 4
+ for (i = 0; i < CPRETRY; i++) { /* retry twice */
+ return_value = write(dest_fd, ptr, bytes_to_write);
+ if (return_value == bytes_to_write) {
+ break;
+ } else {
+ /* means error */
+ LDAPDebug(LDAP_DEBUG_ANY, "dblayer_copyfile: failed to write by \"%s\"; real: %d bytes, exp: %d bytes\n",
+ strerror(errno), return_value, bytes_to_write);
+ if (return_value > 0) {
+ bytes_to_write -= return_value;
+ ptr += return_value;
+ LDAPDebug1Arg(LDAP_DEBUG_ANY, "dblayer_copyfile: retrying to write %d bytes\n", bytes_to_write);
+ } else {
+ break;
+ }
+ }
+ }
+ if ((CPRETRY == i) || (return_value < 0)) {
return_value = -1;
break;
}
@@ -5906,10 +5918,15 @@ dblayer_copy_directory(struct ldbminfo *li,
return_value = dblayer_copyfile(filename1, filename2,
0, priv->dblayer_file_mode);
}
+ if (return_value < 0) {
+ LDAPDebug2Args(LDAP_DEBUG_ANY, "dblayer_copy_directory: Failed to copy file %s to %s\n",
+ filename1, filename2);
+ slapi_ch_free((void**)&filename1);
+ slapi_ch_free((void**)&filename2);
+ break;
+ }
slapi_ch_free((void**)&filename1);
slapi_ch_free((void**)&filename2);
- if (0 > return_value)
- break;
(*cnt)++;
}
@@ -6165,9 +6182,14 @@ dblayer_backup(struct ldbminfo *li, char *dest_dir, Slapi_Task *task)
changelog_destdir, DBVERSION_FILENAME);
return_value = dblayer_copyfile(pathname1, pathname2,
0, priv->dblayer_file_mode);
- slapi_ch_free_string(&pathname1);
slapi_ch_free_string(&pathname2);
slapi_ch_free_string(&changelog_destdir);
+ if (0 > return_value) {
+ LDAPDebug1Arg(LDAP_DEBUG_ANY, "Backup: Failed to copy file %s\n", pathname1);
+ slapi_ch_free_string(&pathname1);
+ goto bail;
+ }
+ slapi_ch_free_string(&pathname1);
}
if (priv->dblayer_enable_transactions) {
/* now, get the list of logfiles that still exist */
@@ -6240,15 +6262,15 @@ dblayer_backup(struct ldbminfo *li, char *dest_dir, Slapi_Task *task)
return_value = dblayer_copyfile(pathname1, pathname2,
0, priv->dblayer_file_mode);
if (0 > return_value) {
- LDAPDebug2Args(LDAP_DEBUG_ANY, "Backup: error in "
- "copying file '%s' (err=%d) -- Starting over...\n",
- pathname1, return_value);
+ LDAPDebug2Args(LDAP_DEBUG_ANY, "Backup: error in copying file '%s' (err=%d)\n",
+ pathname1, return_value);
if (task) {
- slapi_task_log_notice(task,
- "Error copying file '%s' (err=%d) -- Starting "
- "over...", pathname1, return_value);
+ slapi_task_log_notice(task, "Error copying file '%s' (err=%d)",
+ pathname1, return_value);
}
- ok = 0;
+ slapi_ch_free((void **)&pathname1);
+ slapi_ch_free((void **)&pathname2);
+ goto bail;
}
if ( g_get_shutdown() || c_get_shutdown() ) {
LDAPDebug0Args(LDAP_DEBUG_ANY, "Backup aborted\n");
@@ -6276,9 +6298,8 @@ dblayer_backup(struct ldbminfo *li, char *dest_dir, Slapi_Task *task)
slapi_task_log_notice(task, "Backing up file %d (%s)", cnt, pathname2);
slapi_task_log_status(task, "Backing up file %d (%s)", cnt, pathname2);
}
- return_value =
- dblayer_copyfile(pathname1, pathname2, 0, priv->dblayer_file_mode);
- if (return_value) {
+ return_value = dblayer_copyfile(pathname1, pathname2, 0, priv->dblayer_file_mode);
+ if (0 > return_value) {
LDAPDebug(LDAP_DEBUG_ANY,
"Backup: error in copying version file "
"(%s -> %s): err=%d\n",
@@ -6458,11 +6479,12 @@ static int dblayer_copy_dirand_contents(char* src_dir, char* dst_dir, int mode,
slapi_task_log_status(task, "Moving file %s",
filename2);
}
- return_value = dblayer_copyfile(filename1, filename2, 0,
- mode);
+ return_value = dblayer_copyfile(filename1, filename2, 0, mode);
}
- if (0 > return_value)
+ if (0 > return_value) {
+ LDAPDebug1Arg(LDAP_DEBUG_ANY, "dblayer_copy_dirand_contents: failed to copy file %s\n", filename1);
break;
+ }
}
PR_CloseDir(dirhandle);
}
@@ -6838,6 +6860,10 @@ int dblayer_restore(struct ldbminfo *li, char *src_dir, Slapi_Task *task, char *
changelogdir, DBVERSION_FILENAME);
return_value = dblayer_copyfile(filename1, filename2,
0, priv->dblayer_file_mode);
+ if (0 > return_value) {
+ LDAPDebug1Arg(LDAP_DEBUG_ANY, "Restore: failed to copy file %s\n", filename1);
+ goto error_out;
+ }
}
continue;
}
@@ -6897,6 +6923,7 @@ int dblayer_restore(struct ldbminfo *li, char *src_dir, Slapi_Task *task, char *
return_value = dblayer_copyfile(filename1, filename2, 0,
priv->dblayer_file_mode);
if (0 > return_value) {
+ LDAPDebug1Arg(LDAP_DEBUG_ANY, "Restore: failed to copy file %s\n", filename1);
goto error_out;
}
cnt++;
| 0 |
02cdc0c0195301760400300469c258e9d268981b
|
389ds/389-ds-base
|
Issue 6319 - bdb subpackage has `%description` in the wrong place
Bug Description:
Currently, `%description` section comes before `Requires` and `Provides`.
This is a problem for rpmbuild because it is still reading lines for the
description when it encounters the `Requires` and `Provides` lines. So those
never end up in the subpackage RPM header which causes the rpminspect error.
Fix Description:
* The %description section needs to come after the Requires and Provides lines.
* Add `Requires` for 389-ds-base-libs
Fixes: https://github.com/389ds/389-ds-base/issues/6319
Relates: https://github.com/rpminspect/rpminspect/issues/1427
Reviewed by: @progier389, @droideck (Thanks!)
|
commit 02cdc0c0195301760400300469c258e9d268981b
Author: Viktor Ashirov <[email protected]>
Date: Thu Sep 5 15:41:09 2024 +0200
Issue 6319 - bdb subpackage has `%description` in the wrong place
Bug Description:
Currently, `%description` section comes before `Requires` and `Provides`.
This is a problem for rpmbuild because it is still reading lines for the
description when it encounters the `Requires` and `Provides` lines. So those
never end up in the subpackage RPM header which causes the rpminspect error.
Fix Description:
* The %description section needs to come after the Requires and Provides lines.
* Add `Requires` for 389-ds-base-libs
Fixes: https://github.com/389ds/389-ds-base/issues/6319
Relates: https://github.com/rpminspect/rpminspect/issues/1427
Reviewed by: @progier389, @droideck (Thanks!)
diff --git a/rpm/389-ds-base.spec.in b/rpm/389-ds-base.spec.in
index 3917ea0e7..cf8365248 100644
--- a/rpm/389-ds-base.spec.in
+++ b/rpm/389-ds-base.spec.in
@@ -284,19 +284,20 @@ SNMP Agent for the 389 Directory Server base package.
%if %{with bundle_libdb}
%package bdb
Summary: Berkeley Database backend for 389 Directory Server
-%description bdb
-Berkeley Database backend for 389 Directory Server
-Warning! This backend is deprecated in favor of lmdb and its support
-may be removed in future versions.
Requires: %{name} = %{version}-%{release}
+Requires: %{name}-libs = %{version}-%{release}
# Berkeley DB database libdb was marked as deprecated since F40:
# https://fedoraproject.org/wiki/Changes/389_Directory_Server_3.0.0
# because libdb was marked as deprecated since F33
# https://fedoraproject.org/wiki/Changes/Libdb_deprecated
Provides: deprecated()
-%endif
+%description bdb
+Berkeley Database backend for 389 Directory Server
+Warning! This backend is deprecated in favor of lmdb and its support
+may be removed in future versions.
+%endif
%package -n python%{python3_pkgversion}-lib389
Summary: A library for accessing, testing, and configuring the 389 Directory Server
| 0 |
7453f55416dd6e4904907598920605b610eccd67
|
389ds/389-ds-base
|
Resolves: #212098
Summary: Use autoconf to generate task perl script templates
Comment #10, #11, #14
|
commit 7453f55416dd6e4904907598920605b610eccd67
Author: Noriko Hosoi <[email protected]>
Date: Thu Oct 26 17:45:28 2006 +0000
Resolves: #212098
Summary: Use autoconf to generate task perl script templates
Comment #10, #11, #14
diff --git a/Makefile.am b/Makefile.am
index b71886644..422205913 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -51,6 +51,7 @@ propertydir = @sysconfdir@@propertydir@
schemadir = @sysconfdir@@schemadir@
serverdir = @prefix@@serverdir@
serverplugindir = @prefix@@serverplugindir@
+taskdir = @sysconfdir@@scripttemplatedir@
#------------------------
# Build Products
@@ -125,8 +126,39 @@ server_SCRIPTS = wrappers/dbscan \
wrappers/ldif \
wrappers/migratecred \
wrappers/mmldif \
- wrappers/pwdhash
-
+ wrappers/pwdhash \
+ $(srcdir)/ldap/admin/src/ds_newinst.pl
+
+task_SCRIPTS = ldap/admin/src/scripts/template-bak2db \
+ ldap/admin/src/scripts/template-db2bak \
+ ldap/admin/src/scripts/template-db2index \
+ ldap/admin/src/scripts/template-db2ldif \
+ ldap/admin/src/scripts/template-ldif2db \
+ ldap/admin/src/scripts/template-ldif2ldap \
+ ldap/admin/src/scripts/template-monitor \
+ ldap/admin/src/scripts/template-restoreconfig \
+ ldap/admin/src/scripts/template-saveconfig \
+ ldap/admin/src/scripts/template-start-slapd \
+ ldap/admin/src/scripts/template-stop-slapd \
+ ldap/admin/src/scripts/template-suffix2instance \
+ ldap/admin/src/scripts/template-vlvindex \
+ ldap/admin/src/scripts/template-bak2db.pl \
+ ldap/admin/src/scripts/template-cl-dump.pl \
+ ldap/admin/src/scripts/template-db2bak.pl \
+ ldap/admin/src/scripts/template-db2index.pl \
+ ldap/admin/src/scripts/template-db2ldif.pl \
+ ldap/admin/src/scripts/template-ldif2db.pl \
+ ldap/admin/src/scripts/template-ns-accountstatus.pl \
+ ldap/admin/src/scripts/template-ns-activate.pl \
+ ldap/admin/src/scripts/template-ns-inactivate.pl \
+ ldap/admin/src/scripts/template-ns-newpwpolicy.pl \
+ ldap/admin/src/scripts/template-repl-monitor-cgi.pl \
+ ldap/admin/src/scripts/template-repl-monitor.pl \
+ ldap/admin/src/scripts/template-verify-db.pl \
+ $(srcdir)/ldap/admin/src/scripts/template-migrate5to7 \
+ $(srcdir)/ldap/admin/src/scripts/template-migrate6to7 \
+ $(srcdir)/ldap/admin/src/scripts/template-migrateInstance7 \
+ $(srcdir)/ldap/admin/src/scripts/template-migrateTo7
#////////////////////////////////////////////////////////////////
#
@@ -584,7 +616,7 @@ libreferint_plugin_la_CPPFLAGS = $(PLUGIN_CPPFLAGS)
#------------------------
# libreplication-plugin
#------------------------
-libreplication_plugin_la_SOURCES = ldap/servers/plugins/replication/cl5_api.o \
+libreplication_plugin_la_SOURCES = ldap/servers/plugins/replication/cl5_api.c \
ldap/servers/plugins/replication/cl5_clcache.c \
ldap/servers/plugins/replication/cl5_config.c \
ldap/servers/plugins/replication/cl5_init.c \
diff --git a/Makefile.in b/Makefile.in
index cbbd9aea0..4cb2ee33d 100644
--- a/Makefile.in
+++ b/Makefile.in
@@ -52,6 +52,32 @@ noinst_PROGRAMS = makstrdb$(EXEEXT)
@SOLARIS_TRUE@am__append_1 = ldap/servers/slapd/tools/ldclt/opCheck.c
DIST_COMMON = $(am__configure_deps) $(srcdir)/Makefile.am \
$(srcdir)/Makefile.in $(top_srcdir)/configure \
+ $(top_srcdir)/ldap/admin/src/scripts/template-bak2db.in \
+ $(top_srcdir)/ldap/admin/src/scripts/template-bak2db.pl.in \
+ $(top_srcdir)/ldap/admin/src/scripts/template-cl-dump.pl.in \
+ $(top_srcdir)/ldap/admin/src/scripts/template-db2bak.in \
+ $(top_srcdir)/ldap/admin/src/scripts/template-db2bak.pl.in \
+ $(top_srcdir)/ldap/admin/src/scripts/template-db2index.in \
+ $(top_srcdir)/ldap/admin/src/scripts/template-db2index.pl.in \
+ $(top_srcdir)/ldap/admin/src/scripts/template-db2ldif.in \
+ $(top_srcdir)/ldap/admin/src/scripts/template-db2ldif.pl.in \
+ $(top_srcdir)/ldap/admin/src/scripts/template-ldif2db.in \
+ $(top_srcdir)/ldap/admin/src/scripts/template-ldif2db.pl.in \
+ $(top_srcdir)/ldap/admin/src/scripts/template-ldif2ldap.in \
+ $(top_srcdir)/ldap/admin/src/scripts/template-monitor.in \
+ $(top_srcdir)/ldap/admin/src/scripts/template-ns-accountstatus.pl.in \
+ $(top_srcdir)/ldap/admin/src/scripts/template-ns-activate.pl.in \
+ $(top_srcdir)/ldap/admin/src/scripts/template-ns-inactivate.pl.in \
+ $(top_srcdir)/ldap/admin/src/scripts/template-ns-newpwpolicy.pl.in \
+ $(top_srcdir)/ldap/admin/src/scripts/template-repl-monitor-cgi.pl.in \
+ $(top_srcdir)/ldap/admin/src/scripts/template-repl-monitor.pl.in \
+ $(top_srcdir)/ldap/admin/src/scripts/template-restoreconfig.in \
+ $(top_srcdir)/ldap/admin/src/scripts/template-saveconfig.in \
+ $(top_srcdir)/ldap/admin/src/scripts/template-start-slapd.in \
+ $(top_srcdir)/ldap/admin/src/scripts/template-stop-slapd.in \
+ $(top_srcdir)/ldap/admin/src/scripts/template-suffix2instance.in \
+ $(top_srcdir)/ldap/admin/src/scripts/template-verify-db.pl.in \
+ $(top_srcdir)/ldap/admin/src/scripts/template-vlvindex.in \
$(top_srcdir)/wrappers/dbscan.in \
$(top_srcdir)/wrappers/ds_newinst.in \
$(top_srcdir)/wrappers/dsktune.in \
@@ -78,7 +104,33 @@ mkinstalldirs = $(install_sh) -d
CONFIG_CLEAN_FILES = wrappers/dbscan wrappers/dsktune \
wrappers/ds_newinst wrappers/infadd wrappers/ldap-agent \
wrappers/ldclt wrappers/ldif wrappers/migratecred \
- wrappers/mmldif wrappers/pwdhash wrappers/rsearch
+ wrappers/mmldif wrappers/pwdhash wrappers/rsearch \
+ ldap/admin/src/scripts/template-bak2db \
+ ldap/admin/src/scripts/template-db2bak \
+ ldap/admin/src/scripts/template-db2index \
+ ldap/admin/src/scripts/template-db2ldif \
+ ldap/admin/src/scripts/template-ldif2db \
+ ldap/admin/src/scripts/template-ldif2ldap \
+ ldap/admin/src/scripts/template-monitor \
+ ldap/admin/src/scripts/template-restoreconfig \
+ ldap/admin/src/scripts/template-saveconfig \
+ ldap/admin/src/scripts/template-start-slapd \
+ ldap/admin/src/scripts/template-stop-slapd \
+ ldap/admin/src/scripts/template-suffix2instance \
+ ldap/admin/src/scripts/template-vlvindex \
+ ldap/admin/src/scripts/template-bak2db.pl \
+ ldap/admin/src/scripts/template-cl-dump.pl \
+ ldap/admin/src/scripts/template-db2bak.pl \
+ ldap/admin/src/scripts/template-db2index.pl \
+ ldap/admin/src/scripts/template-db2ldif.pl \
+ ldap/admin/src/scripts/template-ldif2db.pl \
+ ldap/admin/src/scripts/template-ns-accountstatus.pl \
+ ldap/admin/src/scripts/template-ns-activate.pl \
+ ldap/admin/src/scripts/template-ns-inactivate.pl \
+ ldap/admin/src/scripts/template-ns-newpwpolicy.pl \
+ ldap/admin/src/scripts/template-repl-monitor-cgi.pl \
+ ldap/admin/src/scripts/template-repl-monitor.pl \
+ ldap/admin/src/scripts/template-verify-db.pl
LIBRARIES = $(noinst_LIBRARIES)
ARFLAGS = cru
libavl_a_AR = $(AR) $(ARFLAGS)
@@ -107,9 +159,9 @@ am__strip_dir = `echo $$p | sed -e 's|^.*/||'`;
am__installdirs = "$(DESTDIR)$(serverdir)" \
"$(DESTDIR)$(serverplugindir)" "$(DESTDIR)$(bindir)" \
"$(DESTDIR)$(serverdir)" "$(DESTDIR)$(bindir)" \
- "$(DESTDIR)$(serverdir)" "$(DESTDIR)$(configdir)" \
- "$(DESTDIR)$(datadir)" "$(DESTDIR)$(propertydir)" \
- "$(DESTDIR)$(schemadir)"
+ "$(DESTDIR)$(serverdir)" "$(DESTDIR)$(taskdir)" \
+ "$(DESTDIR)$(configdir)" "$(DESTDIR)$(datadir)" \
+ "$(DESTDIR)$(propertydir)" "$(DESTDIR)$(schemadir)"
serverLTLIBRARIES_INSTALL = $(INSTALL)
serverpluginLTLIBRARIES_INSTALL = $(INSTALL)
LTLIBRARIES = $(server_LTLIBRARIES) $(serverplugin_LTLIBRARIES)
@@ -364,7 +416,8 @@ libreferint_plugin_la_LIBADD =
am_libreferint_plugin_la_OBJECTS = ldap/servers/plugins/referint/libreferint_plugin_la-referint.lo
libreferint_plugin_la_OBJECTS = $(am_libreferint_plugin_la_OBJECTS)
libreplication_plugin_la_DEPENDENCIES = $(am__DEPENDENCIES_1)
-am_libreplication_plugin_la_OBJECTS = ldap/servers/plugins/replication/libreplication_plugin_la-cl5_clcache.lo \
+am_libreplication_plugin_la_OBJECTS = ldap/servers/plugins/replication/libreplication_plugin_la-cl5_api.lo \
+ ldap/servers/plugins/replication/libreplication_plugin_la-cl5_clcache.lo \
ldap/servers/plugins/replication/libreplication_plugin_la-cl5_config.lo \
ldap/servers/plugins/replication/libreplication_plugin_la-cl5_init.lo \
ldap/servers/plugins/replication/libreplication_plugin_la-csnpl.lo \
@@ -679,7 +732,8 @@ rsearch_bin_DEPENDENCIES = $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \
$(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1)
binSCRIPT_INSTALL = $(INSTALL_SCRIPT)
serverSCRIPT_INSTALL = $(INSTALL_SCRIPT)
-SCRIPTS = $(bin_SCRIPTS) $(server_SCRIPTS)
+taskSCRIPT_INSTALL = $(INSTALL_SCRIPT)
+SCRIPTS = $(bin_SCRIPTS) $(server_SCRIPTS) $(task_SCRIPTS)
DEFAULT_INCLUDES = -I. -I$(srcdir)
depcomp = $(SHELL) $(top_srcdir)/depcomp
am__depfiles_maybe = depfiles
@@ -853,6 +907,7 @@ build_os = @build_os@
build_vendor = @build_vendor@
configdir = @sysconfdir@@configdir@
datadir = @prefix@@datadir@
+db_bindir = @db_bindir@
db_inc = @db_inc@
db_incdir = @db_incdir@
db_lib = @db_lib@
@@ -870,6 +925,7 @@ icu_lib = @icu_lib@
includedir = @includedir@
infodir = @infodir@
install_sh = @install_sh@
+ldapsdk_bindir = @ldapsdk_bindir@
ldapsdk_inc = @ldapsdk_inc@
ldapsdk_lib = @ldapsdk_lib@
ldapsdk_libdir = @ldapsdk_libdir@
@@ -898,6 +954,7 @@ sasl_lib = @sasl_lib@
sasl_libdir = @sasl_libdir@
sbindir = @sbindir@
schemadir = @sysconfdir@@schemadir@
+scripttemplatedir = @scripttemplatedir@
serverdir = @prefix@@serverdir@
serverplugindir = @prefix@@serverplugindir@
sharedstatedir = @sharedstatedir@
@@ -942,6 +999,7 @@ PAM_LINK = -lpam
#------------------------
BUILT_SOURCES = dirver.h dberrstrs.h
CLEANFILES = dirver.h dberrstrs.h ns-slapd.properties
+taskdir = @sysconfdir@@scripttemplatedir@
server_LTLIBRARIES = libslapd.la libback-ldbm.la libds_admin.la libns-dshttpd.la
serverplugin_LTLIBRARIES = libacl-plugin.la libattr-unique-plugin.la libchainingdb-plugin.la \
libcos-plugin.la libdes-plugin.la libdistrib-plugin.la \
@@ -1003,7 +1061,39 @@ server_SCRIPTS = wrappers/dbscan \
wrappers/ldif \
wrappers/migratecred \
wrappers/mmldif \
- wrappers/pwdhash
+ wrappers/pwdhash \
+ $(srcdir)/ldap/admin/src/ds_newinst.pl
+
+task_SCRIPTS = ldap/admin/src/scripts/template-bak2db \
+ ldap/admin/src/scripts/template-db2bak \
+ ldap/admin/src/scripts/template-db2index \
+ ldap/admin/src/scripts/template-db2ldif \
+ ldap/admin/src/scripts/template-ldif2db \
+ ldap/admin/src/scripts/template-ldif2ldap \
+ ldap/admin/src/scripts/template-monitor \
+ ldap/admin/src/scripts/template-restoreconfig \
+ ldap/admin/src/scripts/template-saveconfig \
+ ldap/admin/src/scripts/template-start-slapd \
+ ldap/admin/src/scripts/template-stop-slapd \
+ ldap/admin/src/scripts/template-suffix2instance \
+ ldap/admin/src/scripts/template-vlvindex \
+ ldap/admin/src/scripts/template-bak2db.pl \
+ ldap/admin/src/scripts/template-cl-dump.pl \
+ ldap/admin/src/scripts/template-db2bak.pl \
+ ldap/admin/src/scripts/template-db2index.pl \
+ ldap/admin/src/scripts/template-db2ldif.pl \
+ ldap/admin/src/scripts/template-ldif2db.pl \
+ ldap/admin/src/scripts/template-ns-accountstatus.pl \
+ ldap/admin/src/scripts/template-ns-activate.pl \
+ ldap/admin/src/scripts/template-ns-inactivate.pl \
+ ldap/admin/src/scripts/template-ns-newpwpolicy.pl \
+ ldap/admin/src/scripts/template-repl-monitor-cgi.pl \
+ ldap/admin/src/scripts/template-repl-monitor.pl \
+ ldap/admin/src/scripts/template-verify-db.pl \
+ $(srcdir)/ldap/admin/src/scripts/template-migrate5to7 \
+ $(srcdir)/ldap/admin/src/scripts/template-migrate6to7 \
+ $(srcdir)/ldap/admin/src/scripts/template-migrateInstance7 \
+ $(srcdir)/ldap/admin/src/scripts/template-migrateTo7
#////////////////////////////////////////////////////////////////
@@ -1448,7 +1538,7 @@ libreferint_plugin_la_CPPFLAGS = $(PLUGIN_CPPFLAGS)
#------------------------
# libreplication-plugin
#------------------------
-libreplication_plugin_la_SOURCES = ldap/servers/plugins/replication/cl5_api.o \
+libreplication_plugin_la_SOURCES = ldap/servers/plugins/replication/cl5_api.c \
ldap/servers/plugins/replication/cl5_clcache.c \
ldap/servers/plugins/replication/cl5_config.c \
ldap/servers/plugins/replication/cl5_init.c \
@@ -1764,6 +1854,58 @@ wrappers/pwdhash: $(top_builddir)/config.status $(top_srcdir)/wrappers/pwdhash.i
cd $(top_builddir) && $(SHELL) ./config.status $@
wrappers/rsearch: $(top_builddir)/config.status $(top_srcdir)/wrappers/rsearch.in
cd $(top_builddir) && $(SHELL) ./config.status $@
+ldap/admin/src/scripts/template-bak2db: $(top_builddir)/config.status $(top_srcdir)/ldap/admin/src/scripts/template-bak2db.in
+ cd $(top_builddir) && $(SHELL) ./config.status $@
+ldap/admin/src/scripts/template-db2bak: $(top_builddir)/config.status $(top_srcdir)/ldap/admin/src/scripts/template-db2bak.in
+ cd $(top_builddir) && $(SHELL) ./config.status $@
+ldap/admin/src/scripts/template-db2index: $(top_builddir)/config.status $(top_srcdir)/ldap/admin/src/scripts/template-db2index.in
+ cd $(top_builddir) && $(SHELL) ./config.status $@
+ldap/admin/src/scripts/template-db2ldif: $(top_builddir)/config.status $(top_srcdir)/ldap/admin/src/scripts/template-db2ldif.in
+ cd $(top_builddir) && $(SHELL) ./config.status $@
+ldap/admin/src/scripts/template-ldif2db: $(top_builddir)/config.status $(top_srcdir)/ldap/admin/src/scripts/template-ldif2db.in
+ cd $(top_builddir) && $(SHELL) ./config.status $@
+ldap/admin/src/scripts/template-ldif2ldap: $(top_builddir)/config.status $(top_srcdir)/ldap/admin/src/scripts/template-ldif2ldap.in
+ cd $(top_builddir) && $(SHELL) ./config.status $@
+ldap/admin/src/scripts/template-monitor: $(top_builddir)/config.status $(top_srcdir)/ldap/admin/src/scripts/template-monitor.in
+ cd $(top_builddir) && $(SHELL) ./config.status $@
+ldap/admin/src/scripts/template-restoreconfig: $(top_builddir)/config.status $(top_srcdir)/ldap/admin/src/scripts/template-restoreconfig.in
+ cd $(top_builddir) && $(SHELL) ./config.status $@
+ldap/admin/src/scripts/template-saveconfig: $(top_builddir)/config.status $(top_srcdir)/ldap/admin/src/scripts/template-saveconfig.in
+ cd $(top_builddir) && $(SHELL) ./config.status $@
+ldap/admin/src/scripts/template-start-slapd: $(top_builddir)/config.status $(top_srcdir)/ldap/admin/src/scripts/template-start-slapd.in
+ cd $(top_builddir) && $(SHELL) ./config.status $@
+ldap/admin/src/scripts/template-stop-slapd: $(top_builddir)/config.status $(top_srcdir)/ldap/admin/src/scripts/template-stop-slapd.in
+ cd $(top_builddir) && $(SHELL) ./config.status $@
+ldap/admin/src/scripts/template-suffix2instance: $(top_builddir)/config.status $(top_srcdir)/ldap/admin/src/scripts/template-suffix2instance.in
+ cd $(top_builddir) && $(SHELL) ./config.status $@
+ldap/admin/src/scripts/template-vlvindex: $(top_builddir)/config.status $(top_srcdir)/ldap/admin/src/scripts/template-vlvindex.in
+ cd $(top_builddir) && $(SHELL) ./config.status $@
+ldap/admin/src/scripts/template-bak2db.pl: $(top_builddir)/config.status $(top_srcdir)/ldap/admin/src/scripts/template-bak2db.pl.in
+ cd $(top_builddir) && $(SHELL) ./config.status $@
+ldap/admin/src/scripts/template-cl-dump.pl: $(top_builddir)/config.status $(top_srcdir)/ldap/admin/src/scripts/template-cl-dump.pl.in
+ cd $(top_builddir) && $(SHELL) ./config.status $@
+ldap/admin/src/scripts/template-db2bak.pl: $(top_builddir)/config.status $(top_srcdir)/ldap/admin/src/scripts/template-db2bak.pl.in
+ cd $(top_builddir) && $(SHELL) ./config.status $@
+ldap/admin/src/scripts/template-db2index.pl: $(top_builddir)/config.status $(top_srcdir)/ldap/admin/src/scripts/template-db2index.pl.in
+ cd $(top_builddir) && $(SHELL) ./config.status $@
+ldap/admin/src/scripts/template-db2ldif.pl: $(top_builddir)/config.status $(top_srcdir)/ldap/admin/src/scripts/template-db2ldif.pl.in
+ cd $(top_builddir) && $(SHELL) ./config.status $@
+ldap/admin/src/scripts/template-ldif2db.pl: $(top_builddir)/config.status $(top_srcdir)/ldap/admin/src/scripts/template-ldif2db.pl.in
+ cd $(top_builddir) && $(SHELL) ./config.status $@
+ldap/admin/src/scripts/template-ns-accountstatus.pl: $(top_builddir)/config.status $(top_srcdir)/ldap/admin/src/scripts/template-ns-accountstatus.pl.in
+ cd $(top_builddir) && $(SHELL) ./config.status $@
+ldap/admin/src/scripts/template-ns-activate.pl: $(top_builddir)/config.status $(top_srcdir)/ldap/admin/src/scripts/template-ns-activate.pl.in
+ cd $(top_builddir) && $(SHELL) ./config.status $@
+ldap/admin/src/scripts/template-ns-inactivate.pl: $(top_builddir)/config.status $(top_srcdir)/ldap/admin/src/scripts/template-ns-inactivate.pl.in
+ cd $(top_builddir) && $(SHELL) ./config.status $@
+ldap/admin/src/scripts/template-ns-newpwpolicy.pl: $(top_builddir)/config.status $(top_srcdir)/ldap/admin/src/scripts/template-ns-newpwpolicy.pl.in
+ cd $(top_builddir) && $(SHELL) ./config.status $@
+ldap/admin/src/scripts/template-repl-monitor-cgi.pl: $(top_builddir)/config.status $(top_srcdir)/ldap/admin/src/scripts/template-repl-monitor-cgi.pl.in
+ cd $(top_builddir) && $(SHELL) ./config.status $@
+ldap/admin/src/scripts/template-repl-monitor.pl: $(top_builddir)/config.status $(top_srcdir)/ldap/admin/src/scripts/template-repl-monitor.pl.in
+ cd $(top_builddir) && $(SHELL) ./config.status $@
+ldap/admin/src/scripts/template-verify-db.pl: $(top_builddir)/config.status $(top_srcdir)/ldap/admin/src/scripts/template-verify-db.pl.in
+ cd $(top_builddir) && $(SHELL) ./config.status $@
clean-noinstLIBRARIES:
-test -z "$(noinst_LIBRARIES)" || rm -f $(noinst_LIBRARIES)
@@ -2608,6 +2750,9 @@ ldap/servers/plugins/replication/$(am__dirstamp):
ldap/servers/plugins/replication/$(DEPDIR)/$(am__dirstamp):
@$(mkdir_p) ldap/servers/plugins/replication/$(DEPDIR)
@: > ldap/servers/plugins/replication/$(DEPDIR)/$(am__dirstamp)
+ldap/servers/plugins/replication/libreplication_plugin_la-cl5_api.lo: \
+ ldap/servers/plugins/replication/$(am__dirstamp) \
+ ldap/servers/plugins/replication/$(DEPDIR)/$(am__dirstamp)
ldap/servers/plugins/replication/libreplication_plugin_la-cl5_clcache.lo: \
ldap/servers/plugins/replication/$(am__dirstamp) \
ldap/servers/plugins/replication/$(DEPDIR)/$(am__dirstamp)
@@ -3519,6 +3664,25 @@ uninstall-serverSCRIPTS:
echo " rm -f '$(DESTDIR)$(serverdir)/$$f'"; \
rm -f "$(DESTDIR)$(serverdir)/$$f"; \
done
+install-taskSCRIPTS: $(task_SCRIPTS)
+ @$(NORMAL_INSTALL)
+ test -z "$(taskdir)" || $(mkdir_p) "$(DESTDIR)$(taskdir)"
+ @list='$(task_SCRIPTS)'; for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ if test -f $$d$$p; then \
+ f=`echo "$$p" | sed 's|^.*/||;$(transform)'`; \
+ echo " $(taskSCRIPT_INSTALL) '$$d$$p' '$(DESTDIR)$(taskdir)/$$f'"; \
+ $(taskSCRIPT_INSTALL) "$$d$$p" "$(DESTDIR)$(taskdir)/$$f"; \
+ else :; fi; \
+ done
+
+uninstall-taskSCRIPTS:
+ @$(NORMAL_UNINSTALL)
+ @list='$(task_SCRIPTS)'; for p in $$list; do \
+ f=`echo "$$p" | sed 's|^.*/||;$(transform)'`; \
+ echo " rm -f '$(DESTDIR)$(taskdir)/$$f'"; \
+ rm -f "$(DESTDIR)$(taskdir)/$$f"; \
+ done
mostlyclean-compile:
-rm -f *.$(OBJEXT)
@@ -3685,6 +3849,8 @@ mostlyclean-compile:
-rm -f ldap/servers/plugins/pwdstorage/libpwdstorage_plugin_la-ssha_pwd.lo
-rm -f ldap/servers/plugins/referint/libreferint_plugin_la-referint.$(OBJEXT)
-rm -f ldap/servers/plugins/referint/libreferint_plugin_la-referint.lo
+ -rm -f ldap/servers/plugins/replication/libreplication_plugin_la-cl5_api.$(OBJEXT)
+ -rm -f ldap/servers/plugins/replication/libreplication_plugin_la-cl5_api.lo
-rm -f ldap/servers/plugins/replication/libreplication_plugin_la-cl5_clcache.$(OBJEXT)
-rm -f ldap/servers/plugins/replication/libreplication_plugin_la-cl5_clcache.lo
-rm -f ldap/servers/plugins/replication/libreplication_plugin_la-cl5_config.$(OBJEXT)
@@ -4415,6 +4581,7 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@ldap/servers/plugins/pwdstorage/$(DEPDIR)/libpwdstorage_plugin_la-sha_pwd.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@ldap/servers/plugins/pwdstorage/$(DEPDIR)/libpwdstorage_plugin_la-ssha_pwd.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@ldap/servers/plugins/referint/$(DEPDIR)/libreferint_plugin_la-referint.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@ldap/servers/plugins/replication/$(DEPDIR)/libreplication_plugin_la-cl5_api.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@ldap/servers/plugins/replication/$(DEPDIR)/libreplication_plugin_la-cl5_clcache.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@ldap/servers/plugins/replication/$(DEPDIR)/libreplication_plugin_la-cl5_config.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@ldap/servers/plugins/replication/$(DEPDIR)/libreplication_plugin_la-cl5_init.Plo@am__quote@
@@ -6057,6 +6224,13 @@ ldap/servers/plugins/referint/libreferint_plugin_la-referint.lo: ldap/servers/pl
@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCC_FALSE@ $(LIBTOOL) --mode=compile --tag=CC $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libreferint_plugin_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o ldap/servers/plugins/referint/libreferint_plugin_la-referint.lo `test -f 'ldap/servers/plugins/referint/referint.c' || echo '$(srcdir)/'`ldap/servers/plugins/referint/referint.c
+ldap/servers/plugins/replication/libreplication_plugin_la-cl5_api.lo: ldap/servers/plugins/replication/cl5_api.c
+@am__fastdepCC_TRUE@ if $(LIBTOOL) --mode=compile --tag=CC $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libreplication_plugin_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT ldap/servers/plugins/replication/libreplication_plugin_la-cl5_api.lo -MD -MP -MF "ldap/servers/plugins/replication/$(DEPDIR)/libreplication_plugin_la-cl5_api.Tpo" -c -o ldap/servers/plugins/replication/libreplication_plugin_la-cl5_api.lo `test -f 'ldap/servers/plugins/replication/cl5_api.c' || echo '$(srcdir)/'`ldap/servers/plugins/replication/cl5_api.c; \
+@am__fastdepCC_TRUE@ then mv -f "ldap/servers/plugins/replication/$(DEPDIR)/libreplication_plugin_la-cl5_api.Tpo" "ldap/servers/plugins/replication/$(DEPDIR)/libreplication_plugin_la-cl5_api.Plo"; else rm -f "ldap/servers/plugins/replication/$(DEPDIR)/libreplication_plugin_la-cl5_api.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='ldap/servers/plugins/replication/cl5_api.c' object='ldap/servers/plugins/replication/libreplication_plugin_la-cl5_api.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --mode=compile --tag=CC $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libreplication_plugin_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o ldap/servers/plugins/replication/libreplication_plugin_la-cl5_api.lo `test -f 'ldap/servers/plugins/replication/cl5_api.c' || echo '$(srcdir)/'`ldap/servers/plugins/replication/cl5_api.c
+
ldap/servers/plugins/replication/libreplication_plugin_la-cl5_clcache.lo: ldap/servers/plugins/replication/cl5_clcache.c
@am__fastdepCC_TRUE@ if $(LIBTOOL) --mode=compile --tag=CC $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libreplication_plugin_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT ldap/servers/plugins/replication/libreplication_plugin_la-cl5_clcache.lo -MD -MP -MF "ldap/servers/plugins/replication/$(DEPDIR)/libreplication_plugin_la-cl5_clcache.Tpo" -c -o ldap/servers/plugins/replication/libreplication_plugin_la-cl5_clcache.lo `test -f 'ldap/servers/plugins/replication/cl5_clcache.c' || echo '$(srcdir)/'`ldap/servers/plugins/replication/cl5_clcache.c; \
@am__fastdepCC_TRUE@ then mv -f "ldap/servers/plugins/replication/$(DEPDIR)/libreplication_plugin_la-cl5_clcache.Tpo" "ldap/servers/plugins/replication/$(DEPDIR)/libreplication_plugin_la-cl5_clcache.Plo"; else rm -f "ldap/servers/plugins/replication/$(DEPDIR)/libreplication_plugin_la-cl5_clcache.Tpo"; exit 1; fi
@@ -8563,7 +8737,7 @@ distclean-tags:
distdir: $(DISTFILES)
$(am__remove_distdir)
mkdir $(distdir)
- $(mkdir_p) $(distdir)/m4 $(distdir)/wrappers
+ $(mkdir_p) $(distdir)/ldap/admin/src/scripts $(distdir)/m4 $(distdir)/wrappers
@srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; \
topsrcdirstrip=`echo "$(top_srcdir)" | sed 's|.|.|g'`; \
list='$(DISTFILES)'; for file in $$list; do \
@@ -8693,7 +8867,7 @@ check: $(BUILT_SOURCES)
all-am: Makefile $(LIBRARIES) $(LTLIBRARIES) $(PROGRAMS) $(SCRIPTS) \
$(DATA)
installdirs:
- for dir in "$(DESTDIR)$(serverdir)" "$(DESTDIR)$(serverplugindir)" "$(DESTDIR)$(bindir)" "$(DESTDIR)$(serverdir)" "$(DESTDIR)$(bindir)" "$(DESTDIR)$(serverdir)" "$(DESTDIR)$(configdir)" "$(DESTDIR)$(datadir)" "$(DESTDIR)$(propertydir)" "$(DESTDIR)$(schemadir)"; do \
+ for dir in "$(DESTDIR)$(serverdir)" "$(DESTDIR)$(serverplugindir)" "$(DESTDIR)$(bindir)" "$(DESTDIR)$(serverdir)" "$(DESTDIR)$(bindir)" "$(DESTDIR)$(serverdir)" "$(DESTDIR)$(taskdir)" "$(DESTDIR)$(configdir)" "$(DESTDIR)$(datadir)" "$(DESTDIR)$(propertydir)" "$(DESTDIR)$(schemadir)"; do \
test -z "$$dir" || $(mkdir_p) "$$dir"; \
done
install: $(BUILT_SOURCES)
@@ -8820,7 +8994,8 @@ info-am:
install-data-am: install-configDATA install-dataDATA \
install-nodist_propertyDATA install-schemaDATA \
install-serverLTLIBRARIES install-serverPROGRAMS \
- install-serverSCRIPTS install-serverpluginLTLIBRARIES
+ install-serverSCRIPTS install-serverpluginLTLIBRARIES \
+ install-taskSCRIPTS
install-exec-am: install-binPROGRAMS install-binSCRIPTS
@@ -8854,7 +9029,8 @@ uninstall-am: uninstall-binPROGRAMS uninstall-binSCRIPTS \
uninstall-configDATA uninstall-dataDATA uninstall-info-am \
uninstall-nodist_propertyDATA uninstall-schemaDATA \
uninstall-serverLTLIBRARIES uninstall-serverPROGRAMS \
- uninstall-serverSCRIPTS uninstall-serverpluginLTLIBRARIES
+ uninstall-serverSCRIPTS uninstall-serverpluginLTLIBRARIES \
+ uninstall-taskSCRIPTS
.PHONY: CTAGS GTAGS all all-am am--refresh check check-am clean \
clean-binPROGRAMS clean-generic clean-libtool \
@@ -8871,15 +9047,16 @@ uninstall-am: uninstall-binPROGRAMS uninstall-binSCRIPTS \
install-nodist_propertyDATA install-schemaDATA \
install-serverLTLIBRARIES install-serverPROGRAMS \
install-serverSCRIPTS install-serverpluginLTLIBRARIES \
- install-strip installcheck installcheck-am installdirs \
- maintainer-clean maintainer-clean-generic mostlyclean \
- mostlyclean-compile mostlyclean-generic mostlyclean-libtool \
- pdf pdf-am ps ps-am tags uninstall uninstall-am \
- uninstall-binPROGRAMS uninstall-binSCRIPTS \
+ install-strip install-taskSCRIPTS installcheck installcheck-am \
+ installdirs maintainer-clean maintainer-clean-generic \
+ mostlyclean mostlyclean-compile mostlyclean-generic \
+ mostlyclean-libtool pdf pdf-am ps ps-am tags uninstall \
+ uninstall-am uninstall-binPROGRAMS uninstall-binSCRIPTS \
uninstall-configDATA uninstall-dataDATA uninstall-info-am \
uninstall-nodist_propertyDATA uninstall-schemaDATA \
uninstall-serverLTLIBRARIES uninstall-serverPROGRAMS \
- uninstall-serverSCRIPTS uninstall-serverpluginLTLIBRARIES
+ uninstall-serverSCRIPTS uninstall-serverpluginLTLIBRARIES \
+ uninstall-taskSCRIPTS
dirver.h: Makefile
diff --git a/configure b/configure
index ec3f91745..6bff0197c 100755
--- a/configure
+++ b/configure
@@ -465,7 +465,7 @@ ac_includes_default="\
#endif"
ac_default_prefix=/opt/fedora-ds
-ac_subst_vars='SHELL PATH_SEPARATOR PACKAGE_NAME PACKAGE_TARNAME PACKAGE_VERSION PACKAGE_STRING PACKAGE_BUGREPORT exec_prefix prefix program_transform_name bindir sbindir libexecdir datadir sysconfdir sharedstatedir localstatedir libdir includedir oldincludedir infodir mandir build_alias host_alias target_alias DEFS ECHO_C ECHO_N ECHO_T LIBS INSTALL_PROGRAM INSTALL_SCRIPT INSTALL_DATA CYGPATH_W PACKAGE VERSION ACLOCAL AUTOCONF AUTOMAKE AUTOHEADER MAKEINFO install_sh STRIP ac_ct_STRIP INSTALL_STRIP_PROGRAM mkdir_p AWK SET_MAKE am__leading_dot AMTAR am__tar am__untar build build_cpu build_vendor build_os host host_cpu host_vendor host_os CXX CXXFLAGS LDFLAGS CPPFLAGS ac_ct_CXX EXEEXT OBJEXT DEPDIR am__include am__quote AMDEP_TRUE AMDEP_FALSE AMDEPBACKSLASH CXXDEPMODE am__fastdepCXX_TRUE am__fastdepCXX_FALSE CC CFLAGS ac_ct_CC CCDEPMODE am__fastdepCC_TRUE am__fastdepCC_FALSE EGREP LN_S ECHO AR ac_ct_AR RANLIB ac_ct_RANLIB CPP CXXCPP F77 FFLAGS ac_ct_F77 LIBTOOL LIBOBJS platform_defs SOLARIS_TRUE SOLARIS_FALSE debug_defs PKG_CONFIG ICU_CONFIG NETSNMP_CONFIG nspr_inc nspr_lib nspr_libdir nss_inc nss_lib nss_libdir ldapsdk_inc ldapsdk_lib ldapsdk_libdir db_inc db_incdir db_lib db_libdir sasl_inc sasl_lib sasl_libdir svrcore_inc svrcore_lib icu_lib icu_inc icu_bin netsnmp_inc netsnmp_lib netsnmp_libdir netsnmp_link configdir propertydir schemadir serverdir serverplugindir WINNT_TRUE WINNT_FALSE LTLIBOBJS'
+ac_subst_vars='SHELL PATH_SEPARATOR PACKAGE_NAME PACKAGE_TARNAME PACKAGE_VERSION PACKAGE_STRING PACKAGE_BUGREPORT exec_prefix prefix program_transform_name bindir sbindir libexecdir datadir sysconfdir sharedstatedir localstatedir libdir includedir oldincludedir infodir mandir build_alias host_alias target_alias DEFS ECHO_C ECHO_N ECHO_T LIBS INSTALL_PROGRAM INSTALL_SCRIPT INSTALL_DATA CYGPATH_W PACKAGE VERSION ACLOCAL AUTOCONF AUTOMAKE AUTOHEADER MAKEINFO install_sh STRIP ac_ct_STRIP INSTALL_STRIP_PROGRAM mkdir_p AWK SET_MAKE am__leading_dot AMTAR am__tar am__untar build build_cpu build_vendor build_os host host_cpu host_vendor host_os CXX CXXFLAGS LDFLAGS CPPFLAGS ac_ct_CXX EXEEXT OBJEXT DEPDIR am__include am__quote AMDEP_TRUE AMDEP_FALSE AMDEPBACKSLASH CXXDEPMODE am__fastdepCXX_TRUE am__fastdepCXX_FALSE CC CFLAGS ac_ct_CC CCDEPMODE am__fastdepCC_TRUE am__fastdepCC_FALSE EGREP LN_S ECHO AR ac_ct_AR RANLIB ac_ct_RANLIB CPP CXXCPP F77 FFLAGS ac_ct_F77 LIBTOOL LIBOBJS platform_defs SOLARIS_TRUE SOLARIS_FALSE debug_defs PKG_CONFIG ICU_CONFIG NETSNMP_CONFIG nspr_inc nspr_lib nspr_libdir nss_inc nss_lib nss_libdir ldapsdk_inc ldapsdk_lib ldapsdk_libdir ldapsdk_bindir db_inc db_incdir db_lib db_libdir db_bindir sasl_inc sasl_lib sasl_libdir svrcore_inc svrcore_lib icu_lib icu_inc icu_bin netsnmp_inc netsnmp_lib netsnmp_libdir netsnmp_link configdir propertydir schemadir serverdir serverplugindir scripttemplatedir WINNT_TRUE WINNT_FALSE LTLIBOBJS'
ac_subst_files=''
# Initialize some variables set by options.
@@ -23904,6 +23904,8 @@ if test -z "$ldapsdk_inc" -o -z "$ldapsdk_lib"; then
echo "$as_me: error: LDAPSDK not found, specify with --with-ldapsdk-inc|-lib." >&2;}
{ (exit 1); exit 1; }; }
fi
+ldapsdk_bindir=/usr/lib/mozldap6
+
save_cppflags="$CPPFLAGS"
CPPFLAGS="$ldapsdk_inc $nss_inc $nspr_inc"
echo "$as_me:$LINENO: checking for ldap.h" >&5
@@ -24038,6 +24040,7 @@ else
echo "$as_me:$LINENO: result: no" >&5
echo "${ECHO_T}no" >&6
fi;
+db_bindir=/usr/bin
if test -z "$db_inc"; then
echo "$as_me:$LINENO: checking for db.h" >&5
@@ -24943,6 +24946,8 @@ fi
+
+
@@ -24954,6 +24959,8 @@ propertydir=/fedora-ds/property
schemadir=/fedora-ds/schema
serverdir=/usr/lib/fedora-ds
serverplugindir=/usr/lib/fedora-ds/plugins
+scripttemplatedir=/fedora-ds/script-templates
+
@@ -24999,6 +25006,61 @@ fi
ac_config_files="$ac_config_files wrappers/rsearch"
+
+ ac_config_files="$ac_config_files ldap/admin/src/scripts/template-bak2db"
+
+ ac_config_files="$ac_config_files ldap/admin/src/scripts/template-db2bak"
+
+ ac_config_files="$ac_config_files ldap/admin/src/scripts/template-db2index"
+
+ ac_config_files="$ac_config_files ldap/admin/src/scripts/template-db2ldif"
+
+ ac_config_files="$ac_config_files ldap/admin/src/scripts/template-ldif2db"
+
+ ac_config_files="$ac_config_files ldap/admin/src/scripts/template-ldif2ldap"
+
+ ac_config_files="$ac_config_files ldap/admin/src/scripts/template-monitor"
+
+ ac_config_files="$ac_config_files ldap/admin/src/scripts/template-restoreconfig"
+
+ ac_config_files="$ac_config_files ldap/admin/src/scripts/template-saveconfig"
+
+ ac_config_files="$ac_config_files ldap/admin/src/scripts/template-start-slapd"
+
+ ac_config_files="$ac_config_files ldap/admin/src/scripts/template-stop-slapd"
+
+ ac_config_files="$ac_config_files ldap/admin/src/scripts/template-suffix2instance"
+
+ ac_config_files="$ac_config_files ldap/admin/src/scripts/template-vlvindex"
+
+
+ ac_config_files="$ac_config_files ldap/admin/src/scripts/template-bak2db.pl"
+
+ ac_config_files="$ac_config_files ldap/admin/src/scripts/template-cl-dump.pl"
+
+ ac_config_files="$ac_config_files ldap/admin/src/scripts/template-db2bak.pl"
+
+ ac_config_files="$ac_config_files ldap/admin/src/scripts/template-db2index.pl"
+
+ ac_config_files="$ac_config_files ldap/admin/src/scripts/template-db2ldif.pl"
+
+ ac_config_files="$ac_config_files ldap/admin/src/scripts/template-ldif2db.pl"
+
+ ac_config_files="$ac_config_files ldap/admin/src/scripts/template-ns-accountstatus.pl"
+
+ ac_config_files="$ac_config_files ldap/admin/src/scripts/template-ns-activate.pl"
+
+ ac_config_files="$ac_config_files ldap/admin/src/scripts/template-ns-inactivate.pl"
+
+ ac_config_files="$ac_config_files ldap/admin/src/scripts/template-ns-newpwpolicy.pl"
+
+ ac_config_files="$ac_config_files ldap/admin/src/scripts/template-repl-monitor-cgi.pl"
+
+ ac_config_files="$ac_config_files ldap/admin/src/scripts/template-repl-monitor.pl"
+
+ ac_config_files="$ac_config_files ldap/admin/src/scripts/template-verify-db.pl"
+
+
cat >confcache <<\_ACEOF
# This file is a shell script that caches the results of configure
# tests run on this system so they can be shared between configure
@@ -25607,6 +25669,32 @@ do
"wrappers/mmldif" ) CONFIG_FILES="$CONFIG_FILES wrappers/mmldif" ;;
"wrappers/pwdhash" ) CONFIG_FILES="$CONFIG_FILES wrappers/pwdhash" ;;
"wrappers/rsearch" ) CONFIG_FILES="$CONFIG_FILES wrappers/rsearch" ;;
+ "ldap/admin/src/scripts/template-bak2db" ) CONFIG_FILES="$CONFIG_FILES ldap/admin/src/scripts/template-bak2db" ;;
+ "ldap/admin/src/scripts/template-db2bak" ) CONFIG_FILES="$CONFIG_FILES ldap/admin/src/scripts/template-db2bak" ;;
+ "ldap/admin/src/scripts/template-db2index" ) CONFIG_FILES="$CONFIG_FILES ldap/admin/src/scripts/template-db2index" ;;
+ "ldap/admin/src/scripts/template-db2ldif" ) CONFIG_FILES="$CONFIG_FILES ldap/admin/src/scripts/template-db2ldif" ;;
+ "ldap/admin/src/scripts/template-ldif2db" ) CONFIG_FILES="$CONFIG_FILES ldap/admin/src/scripts/template-ldif2db" ;;
+ "ldap/admin/src/scripts/template-ldif2ldap" ) CONFIG_FILES="$CONFIG_FILES ldap/admin/src/scripts/template-ldif2ldap" ;;
+ "ldap/admin/src/scripts/template-monitor" ) CONFIG_FILES="$CONFIG_FILES ldap/admin/src/scripts/template-monitor" ;;
+ "ldap/admin/src/scripts/template-restoreconfig" ) CONFIG_FILES="$CONFIG_FILES ldap/admin/src/scripts/template-restoreconfig" ;;
+ "ldap/admin/src/scripts/template-saveconfig" ) CONFIG_FILES="$CONFIG_FILES ldap/admin/src/scripts/template-saveconfig" ;;
+ "ldap/admin/src/scripts/template-start-slapd" ) CONFIG_FILES="$CONFIG_FILES ldap/admin/src/scripts/template-start-slapd" ;;
+ "ldap/admin/src/scripts/template-stop-slapd" ) CONFIG_FILES="$CONFIG_FILES ldap/admin/src/scripts/template-stop-slapd" ;;
+ "ldap/admin/src/scripts/template-suffix2instance" ) CONFIG_FILES="$CONFIG_FILES ldap/admin/src/scripts/template-suffix2instance" ;;
+ "ldap/admin/src/scripts/template-vlvindex" ) CONFIG_FILES="$CONFIG_FILES ldap/admin/src/scripts/template-vlvindex" ;;
+ "ldap/admin/src/scripts/template-bak2db.pl" ) CONFIG_FILES="$CONFIG_FILES ldap/admin/src/scripts/template-bak2db.pl" ;;
+ "ldap/admin/src/scripts/template-cl-dump.pl" ) CONFIG_FILES="$CONFIG_FILES ldap/admin/src/scripts/template-cl-dump.pl" ;;
+ "ldap/admin/src/scripts/template-db2bak.pl" ) CONFIG_FILES="$CONFIG_FILES ldap/admin/src/scripts/template-db2bak.pl" ;;
+ "ldap/admin/src/scripts/template-db2index.pl" ) CONFIG_FILES="$CONFIG_FILES ldap/admin/src/scripts/template-db2index.pl" ;;
+ "ldap/admin/src/scripts/template-db2ldif.pl" ) CONFIG_FILES="$CONFIG_FILES ldap/admin/src/scripts/template-db2ldif.pl" ;;
+ "ldap/admin/src/scripts/template-ldif2db.pl" ) CONFIG_FILES="$CONFIG_FILES ldap/admin/src/scripts/template-ldif2db.pl" ;;
+ "ldap/admin/src/scripts/template-ns-accountstatus.pl" ) CONFIG_FILES="$CONFIG_FILES ldap/admin/src/scripts/template-ns-accountstatus.pl" ;;
+ "ldap/admin/src/scripts/template-ns-activate.pl" ) CONFIG_FILES="$CONFIG_FILES ldap/admin/src/scripts/template-ns-activate.pl" ;;
+ "ldap/admin/src/scripts/template-ns-inactivate.pl" ) CONFIG_FILES="$CONFIG_FILES ldap/admin/src/scripts/template-ns-inactivate.pl" ;;
+ "ldap/admin/src/scripts/template-ns-newpwpolicy.pl" ) CONFIG_FILES="$CONFIG_FILES ldap/admin/src/scripts/template-ns-newpwpolicy.pl" ;;
+ "ldap/admin/src/scripts/template-repl-monitor-cgi.pl" ) CONFIG_FILES="$CONFIG_FILES ldap/admin/src/scripts/template-repl-monitor-cgi.pl" ;;
+ "ldap/admin/src/scripts/template-repl-monitor.pl" ) CONFIG_FILES="$CONFIG_FILES ldap/admin/src/scripts/template-repl-monitor.pl" ;;
+ "ldap/admin/src/scripts/template-verify-db.pl" ) CONFIG_FILES="$CONFIG_FILES ldap/admin/src/scripts/template-verify-db.pl" ;;
"depfiles" ) CONFIG_COMMANDS="$CONFIG_COMMANDS depfiles" ;;
*) { { echo "$as_me:$LINENO: error: invalid argument: $ac_config_target" >&5
echo "$as_me: error: invalid argument: $ac_config_target" >&2;}
@@ -25774,10 +25862,12 @@ s,@nss_libdir@,$nss_libdir,;t t
s,@ldapsdk_inc@,$ldapsdk_inc,;t t
s,@ldapsdk_lib@,$ldapsdk_lib,;t t
s,@ldapsdk_libdir@,$ldapsdk_libdir,;t t
+s,@ldapsdk_bindir@,$ldapsdk_bindir,;t t
s,@db_inc@,$db_inc,;t t
s,@db_incdir@,$db_incdir,;t t
s,@db_lib@,$db_lib,;t t
s,@db_libdir@,$db_libdir,;t t
+s,@db_bindir@,$db_bindir,;t t
s,@sasl_inc@,$sasl_inc,;t t
s,@sasl_lib@,$sasl_lib,;t t
s,@sasl_libdir@,$sasl_libdir,;t t
@@ -25795,6 +25885,7 @@ s,@propertydir@,$propertydir,;t t
s,@schemadir@,$schemadir,;t t
s,@serverdir@,$serverdir,;t t
s,@serverplugindir@,$serverplugindir,;t t
+s,@scripttemplatedir@,$scripttemplatedir,;t t
s,@WINNT_TRUE@,$WINNT_TRUE,;t t
s,@WINNT_FALSE@,$WINNT_FALSE,;t t
s,@LTLIBOBJS@,$LTLIBOBJS,;t t
diff --git a/configure.ac b/configure.ac
index 46643b315..b4366f816 100644
--- a/configure.ac
+++ b/configure.ac
@@ -131,6 +131,7 @@ propertydir=/fedora-ds/property
schemadir=/fedora-ds/schema
serverdir=/usr/lib/fedora-ds
serverplugindir=/usr/lib/fedora-ds/plugins
+scripttemplatedir=/fedora-ds/script-templates
AC_SUBST(bindir)
AC_SUBST(configdir)
AC_SUBST(datadir)
@@ -138,6 +139,7 @@ AC_SUBST(propertydir)
AC_SUBST(schemadir)
AC_SUBST(serverdir)
AC_SUBST(serverplugindir)
+AC_SUBST(scripttemplatedir)
# WINNT should be true if building on Windows system not using
# cygnus, mingw, or the like and using cmd.exe as the shell
@@ -156,6 +158,20 @@ AC_CONFIG_FILES([wrappers/mmldif])
AC_CONFIG_FILES([wrappers/pwdhash])
AC_CONFIG_FILES([wrappers/rsearch])
+AC_CONFIG_FILES([ldap/admin/src/scripts/template-bak2db])
+AC_CONFIG_FILES([ldap/admin/src/scripts/template-db2bak])
+AC_CONFIG_FILES([ldap/admin/src/scripts/template-db2index])
+AC_CONFIG_FILES([ldap/admin/src/scripts/template-db2ldif])
+AC_CONFIG_FILES([ldap/admin/src/scripts/template-ldif2db])
+AC_CONFIG_FILES([ldap/admin/src/scripts/template-ldif2ldap])
+AC_CONFIG_FILES([ldap/admin/src/scripts/template-monitor])
+AC_CONFIG_FILES([ldap/admin/src/scripts/template-restoreconfig])
+AC_CONFIG_FILES([ldap/admin/src/scripts/template-saveconfig])
+AC_CONFIG_FILES([ldap/admin/src/scripts/template-start-slapd])
+AC_CONFIG_FILES([ldap/admin/src/scripts/template-stop-slapd])
+AC_CONFIG_FILES([ldap/admin/src/scripts/template-suffix2instance])
+AC_CONFIG_FILES([ldap/admin/src/scripts/template-vlvindex])
+
AC_CONFIG_FILES([ldap/admin/src/scripts/template-bak2db.pl])
AC_CONFIG_FILES([ldap/admin/src/scripts/template-cl-dump.pl])
AC_CONFIG_FILES([ldap/admin/src/scripts/template-db2bak.pl])
diff --git a/ldap/admin/src/create_instance.c b/ldap/admin/src/create_instance.c
index d07da2343..21bfad802 100644
--- a/ldap/admin/src/create_instance.c
+++ b/ldap/admin/src/create_instance.c
@@ -688,12 +688,12 @@ char *gen_script(char *s_root, char *name, char *fmt, ...)
return NULL;
}
-char *gen_perl_script_auto(char *s_root, char *cs_path, char *name,
- server_config_s *cf)
+char *gen_script_auto(char *s_root, char *cs_path,
+ char *name, server_config_s *cf)
{
char myperl[PATH_SIZE];
char fn[PATH_SIZE], ofn[PATH_SIZE];
- const char *table[12][2];
+ const char *table[16][2];
if (PR_FAILURE == PR_Access(cs_path, PR_ACCESS_EXISTS)) {
printf("Notice: %s does not exist, skipping %s . . .\n", cs_path, name);
@@ -738,9 +738,16 @@ char *gen_perl_script_auto(char *s_root, char *cs_path, char *name,
table[8][1] = cf->ldif_dir;
table[9][0] = "SERV-ID";
table[9][1] = cf->servid;
+
table[10][0] = "BAK-DIR";
table[10][1] = cf->bak_dir;
- table[11][0] = table[11][1] = NULL;
+ table[11][0] = "SERVER-DIR";
+ table[11][1] = cf->sroot;
+ table[12][0] = "CONFIG-DIR";
+ table[12][1] = cf->config_dir;
+ table[13][0] = "RUN-DIR";
+ table[13][1] = cf->run_dir;
+ table[14][0] = table[14][1] = NULL;
if (generate_script(ofn, fn, NEWSCRIPT_MODE, table) != 0) {
return make_error("Could not write %s to %s (%s).", ofn, fn,
@@ -755,7 +762,7 @@ char *gen_perl_script_auto_for_migration(char *s_root, char *cs_path, char *name
{
char myperl[PATH_SIZE];
char fn[PATH_SIZE], ofn[PATH_SIZE];
- const char *table[12][2];
+ const char *table[16][2];
char *fnp = NULL;
int fnlen = 0;
@@ -802,9 +809,16 @@ char *gen_perl_script_auto_for_migration(char *s_root, char *cs_path, char *name
table[8][1] = cf->ldif_dir;
table[9][0] = "SERV-ID";
table[9][1] = cf->servid;
+
table[10][0] = "BAK-DIR";
table[10][1] = cf->bak_dir;
- table[11][0] = table[11][1] = NULL;
+ table[11][0] = "SERVER-DIR";
+ table[11][1] = cf->sroot;
+ table[12][0] = "CONFIG-DIR";
+ table[12][1] = cf->config_dir;
+ table[13][0] = "RUN-DIR";
+ table[13][1] = cf->run_dir;
+ table[14][0] = table[14][1] = NULL;
if (generate_script(ofn, fn, NEWSCRIPT_MODE, table) != 0) {
return make_error("Could not write %s to %s (%s).", ofn, fn,
@@ -1026,243 +1040,6 @@ create_scripts(server_config_s *cf, char *param_name)
PR_snprintf(subdir, sizeof(subdir), "%s%c"PRODUCT_NAME"-%s",
sroot, FILE_PATHSEP, cf->servid);
#ifdef XP_UNIX
- /* Start/stop/rotate/restart scripts */
- if (getenv("USE_DEBUGGER"))
- {
- char *debugger = getenv("DSINST_DEBUGGER");
- char *debugger_command = getenv("DSINST_DEBUGGER_CMD");
- if (! debugger) {
- debugger = "gdb";
- }
- if (! debugger_command) {
- debugger_command = "echo"; /* e.g. do nothing */
- }
-#ifdef OSF1
- printf("-D %s -i %s/pid -d %s -z\n", cf->config_dir, cf->run_dir,
- cf->loglevel ? cf->loglevel : "0");
- t = gen_script(cf->inst_dir, START_SCRIPT,
- "\n"
- "# Script that starts the %s.\n"
- "# Exit status can be:\n"
- "# 0: Server started successfully\n"
- "# 1: Server could not be started\n"
- "# 2: Server already running\n"
- "\n"
- "NETSITE_ROOT=%s\n"
- "export NETSITE_ROOT\n"
- "%s=%s\n"
- "export %s\n"
- "PIDFILE=%s/pid\n"
- "if test -f $PIDFILE ; then\n"
- " PID=`cat $PIDFILE`\n"
- " if kill -0 $PID > /dev/null 2>&1 ; then\n"
- " echo There is an %s process already running: $PID\n"
- " exit 2;\n"
- " else\n"
- " rm -f $PIDFILE\n"
- " fi\n"
- "fi\n"
- "cd %s; ./%s -D %s -i %s/pid -d %s -z \"$@\" &\n"
- "loop_counter=1\n"
- "max_count=120\n"
- "while test $loop_counter -le $max_count; do\n"
- " loop_counter=`expr $loop_counter + 1`\n"
- " if test ! -f $PIDFILE ; then\n"
- " sleep 1;\n"
- " else\n"
- " PID=`cat $PIDFILE`\n"
- /* rbyrne: setuputil takes any message here as an error:
- " echo Server has been started. ns-slapd process started: $PID\n"*/
- " exit 0;\n"
- " fi\n"
- "done\n"
- "echo Server not running!! Failed to start ns-slapd process.\n"
- "exit 1\n",
- PRODUCT_BIN,
- sroot, DS_CONFIG_DIR, cf->config_dir, DS_CONFIG_DIR, cf->run_dir,
- PRODUCT_BIN,
- sroot, PRODUCT_BIN, cf->config_dir, cf->run_dir,
- cf->loglevel ? cf->loglevel : "0"
- );
-#else
- t = gen_script(cf->inst_dir, START_SCRIPT,
- "\n"
- "# Script that starts the ns-slapd server.\n"
- "# Exit status can be:\n"
- "# 0: Server started successfully\n"
- "# 1: Server could not be started\n"
- "# 2: Server already running\n"
- "\n"
- "NETSITE_ROOT=%s\n"
- "export NETSITE_ROOT\n"
- "%s=%s\n"
- "export %s\n"
- "PIDFILE=%s/pid\n"
- "if test -f $PIDFILE ; then\n"
- " PID=`cat $PIDFILE`\n"
- " if kill -0 $PID > /dev/null 2>&1 ; then\n"
- " echo There is an ns-slapd process already running: $PID\n"
- " exit 2;\n"
- " else\n"
- " rm -f $PIDFILE\n"
- " fi\n"
- "fi\n"
- "if [ -x /usr/bin/xterm ]; then\n"
- " xterm=/usr/bin/xterm\n"
- "else\n"
- " xterm=/usr/openwin/bin/xterm\n"
- "fi\n"
- "cd %s; $xterm -title debugger -e %s -c \"dbxenv follow_fork_mode child ; break main ; %s ; run -D %s -i %s/pid -d %s -z $*\" %s &\n"
- "loop_counter=1\n"
- "max_count=120\n"
- "while test $loop_counter -le $max_count; do\n"
- " loop_counter=`expr $loop_counter + 1`\n"
- " if test ! -f $PIDFILE ; then\n"
- " sleep 1;\n"
- " else\n"
- " PID=`cat $PIDFILE`\n"
- /* rbyrne: setuputil takes any message here as an error:
- " echo Server has been started. ns-slapd process started: $PID\n"*/
- " exit 0;\n"
- " fi\n"
- "done\n"
- "echo Server not running!! Failed to start ns-slapd process.\n"
- "exit 1\n",
- sroot, DS_CONFIG_DIR, cf->config_dir, DS_CONFIG_DIR, cf->run_dir,
- sroot, debugger, debugger_command, cf->config_dir, cf->run_dir,
- cf->loglevel ? cf->loglevel : "0", PRODUCT_BIN
- );
-#endif
- }
- else
- {
- t = gen_script(cf->inst_dir, START_SCRIPT,
- "\n"
- "# Script that starts the ns-slapd server.\n"
- "# Exit status can be:\n"
- "# 0: Server started successfully\n"
- "# 1: Server could not be started\n"
- "# 2: Server already running\n"
- "\n"
- "NETSITE_ROOT=%s\n"
- "export NETSITE_ROOT\n"
- "%s=%s\n"
- "export %s\n"
- "PIDFILE=%s/pid\n"
- "STARTPIDFILE=%s/startpid\n"
- "if test -f $STARTPIDFILE ; then\n"
- " PID=`cat $STARTPIDFILE`\n"
- " if kill -0 $PID > /dev/null 2>&1 ; then\n"
- " echo There is an %s process already running: $PID\n"
- " exit 2;\n"
- " else\n"
- " rm -f $STARTPIDFILE\n"
- " fi\n"
- "fi\n"
- "if test -f $PIDFILE ; then\n"
- " PID=`cat $PIDFILE`\n"
- " if kill -0 $PID > /dev/null 2>&1 ; then\n"
- " echo There is an %s running: $PID\n"
- " exit 2;\n"
- " else\n"
- " rm -f $PIDFILE\n"
- " fi\n"
- "fi\n"
- "cd %s; ./%s -D %s -i %s/pid -w $STARTPIDFILE \"$@\"\n"
- "if [ $? -ne 0 ]; then\n"
- " exit 1\n"
- "fi\n"
- "\n"
- "loop_counter=1\n"
- "# wait for 10 seconds for the start pid file to appear\n"
- "max_count=10\n"
- "while test $loop_counter -le $max_count; do\n"
- " loop_counter=`expr $loop_counter + 1`\n"
- " if test ! -f $STARTPIDFILE ; then\n"
- " sleep 1;\n"
- " else\n"
- " PID=`cat $STARTPIDFILE`\n"
- " fi\n"
- "done\n"
- "if test ! -f $STARTPIDFILE ; then\n"
- " echo Server failed to start !!! Please check errors log for problems\n"
- " exit 1\n"
- "fi\n"
- "loop_counter=1\n"
- "# wait for 10 minutes (600 times 1 seconds)\n"
- "max_count=600\n" /* 10 minutes */
- "while test $loop_counter -le $max_count; do\n"
- " loop_counter=`expr $loop_counter + 1`\n"
- " if test ! -f $PIDFILE ; then\n"
- " if kill -0 $PID > /dev/null 2>&1 ; then\n"
- " sleep 1\n"
- " else\n"
- " echo Server failed to start !!! Please check errors log for problems\n"
- " exit 1\n"
- " fi\n"
- " else\n"
- " PID=`cat $PIDFILE`\n"
- /* rbyrne: setuputil takes any message here as an error:
- " echo Server has been started. ns-slapd process started: $PID\n"*/
- " exit 0;\n"
- " fi\n"
- "done\n"
- "echo Server not running!! Failed to start ns-slapd process. Please check the errors log for problems.\n"
- "exit 1\n",
- sroot, DS_CONFIG_DIR, cf->config_dir, DS_CONFIG_DIR, cf->run_dir,
- cf->run_dir, PRODUCT_BIN, PRODUCT_BIN,
- cf->sroot, PRODUCT_BIN, cf->config_dir, cf->run_dir
- );
- }
- if(t) return t;
-
- t = gen_script(cf->inst_dir, STOP_SCRIPT,
- "\n"
- "# Script that stops the ns-slapd server.\n"
- "# Exit status can be:\n"
- "# 0: Server stopped successfully\n"
- "# 1: Server could not be stopped\n"
- "# 2: Server was not running\n"
- "\n"
- "PIDFILE=%s/pid\n"
- "if test ! -f $PIDFILE ; then\n"
- " echo No ns-slapd PID file found. Server is probably not running\n"
- " exit 2\n"
- "fi\n"
- "PID=`cat $PIDFILE`\n"
- "# see if the server is already stopped\n"
- "kill -0 $PID > /dev/null 2>&1 || {\n"
- " echo Server not running\n"
- " if test -f $PIDFILE ; then\n"
- " rm -f $PIDFILE\n"
- " fi\n"
- " exit 2\n"
- "}\n"
- "# server is running - kill it\n"
- "kill $PID\n"
- "loop_counter=1\n"
- "# wait for 10 minutes (600 times 1 second)\n"
- "max_count=600\n" /* 10 minutes */
- "while test $loop_counter -le $max_count; do\n"
- " loop_counter=`expr $loop_counter + 1`\n"
- " if kill -0 $PID > /dev/null 2>&1 ; then\n"
- " sleep 1;\n"
- " else\n"
- " if test -f $PIDFILE ; then\n"
- " rm -f $PIDFILE\n"
- " fi\n"
- /* rbyrne: setuputil takes any message here as an error:
- " echo Server has been stopped. ns-slapd process stopped: $PID\n"*/
- " exit 0\n"
- " fi\n"
- "done\n"
- "if test -f $PIDFILE ; then\n"
- " echo Server still running!! Failed to stop the ns-slapd process: $PID. Please check the errors log for problems.\n"
- "fi\n"
- "exit 1\n",
- cf->run_dir);
- if(t) return t;
-
t = gen_script(cf->inst_dir, RESTART_SCRIPT,
"\n"
"# Script that restarts the ns-slapd server.\n"
@@ -1991,50 +1768,29 @@ ds_cre_subdirs(server_config_s *cf, struct passwd* pw)
}
#define CREATE_LDIF2DB() \
- gen_perl_script_auto(mysroot, mycs_path, "ldif2db.pl", cf)
+ gen_script_auto(mysroot, mycs_path, "ldif2db.pl", cf)
#define CREATE_DB2INDEX() \
- gen_perl_script_auto(mysroot, mycs_path, "db2index.pl", cf)
+ gen_script_auto(mysroot, mycs_path, "db2index.pl", cf)
#define CREATE_DB2LDIF() \
- gen_perl_script_auto(mysroot, mycs_path, "db2ldif.pl", cf)
+ gen_script_auto(mysroot, mycs_path, "db2ldif.pl", cf)
#define CREATE_DB2BAK() \
- gen_perl_script_auto(mysroot, mycs_path, "db2bak.pl", cf)
+ gen_script_auto(mysroot, mycs_path, "db2bak.pl", cf)
#define CREATE_BAK2DB() \
- gen_perl_script_auto(mysroot, mycs_path, "bak2db.pl", cf)
+ gen_script_auto(mysroot, mycs_path, "bak2db.pl", cf)
#define CREATE_VERIFYDB() \
- gen_perl_script_auto(mysroot, mycs_path, "verify-db.pl", cf)
+ gen_script_auto(mysroot, mycs_path, "verify-db.pl", cf)
/* tentatively moved to mycs_path */
#define CREATE_REPL_MONITOR_CGI() \
- gen_perl_script_auto(mysroot, mycs_path, "repl-monitor-cgi.pl", cf)
+ gen_script_auto(mysroot, mycs_path, "repl-monitor-cgi.pl", cf)
#define CREATE_ACCOUNT_INACT(_commandName) \
- gen_perl_script_auto(mysroot, cs_path, _commandName, cf)
-
-#define CREATE_DSML() \
- gen_perl_script_auto(mysroot, mycs_path, "dsml-activate.pl", cf)
-
-#define CREATE_MIGRATETO5() \
- gen_perl_script_auto_for_migration(mysroot, mycs_path, "migrateTo5", cf)
-
-#define CREATE_MIGRATE50TO51() \
- gen_perl_script_auto_for_migration(mysroot, mycs_path, "migrate50to51", cf)
-
-#define CREATE_MIGRATEINSTANCE5() \
- gen_perl_script_auto_for_migration(mysroot, mycs_path, "migrateInstance5", cf)
-
-#define CREATE_MIGRATE5TO6() \
- gen_perl_script_auto_for_migration(mysroot, mycs_path, "migrate5to6", cf)
-
-#define CREATE_MIGRATEINSTANCE6() \
- gen_perl_script_auto_for_migration(mysroot, mycs_path, "migrateInstance6", cf)
-
-#define CREATE_MIGRATETO6() \
- gen_perl_script_auto_for_migration(mysroot, mycs_path, "migrateTo6", cf)
+ gen_script_auto(mysroot, cs_path, _commandName, cf)
#define CREATE_MIGRATE5TO7() \
gen_perl_script_auto_for_migration(mysroot, mycs_path, "migrate5to7", cf)
@@ -2049,7 +1805,46 @@ ds_cre_subdirs(server_config_s *cf, struct passwd* pw)
gen_perl_script_auto_for_migration(mysroot, mycs_path, "migrateTo7", cf)
#define CREATE_NEWPWPOLICY() \
- gen_perl_script_auto(mysroot, mycs_path, "ns-newpwpolicy.pl", cf)
+ gen_script_auto(mysroot, mycs_path, "ns-newpwpolicy.pl", cf)
+
+#define CREATE_BAK2DB_SH() \
+ gen_script_auto(mysroot, mycs_path, "bak2db", cf)
+
+#define CREATE_DB2BAK_SH() \
+ gen_script_auto(mysroot, mycs_path, "db2bak", cf)
+
+#define CREATE_DB2INDEX_SH() \
+ gen_script_auto(mysroot, mycs_path, "db2index", cf)
+
+#define CREATE_DB2LDIF_SH() \
+ gen_script_auto(mysroot, mycs_path, "db2ldif", cf)
+
+#define CREATE_LDIF2DB_SH() \
+ gen_script_auto(mysroot, mycs_path, "ldif2db", cf)
+
+#define CREATE_LDIF2LDAP_SH() \
+ gen_script_auto(mysroot, mycs_path, "ldif2ldap", cf)
+
+#define CREATE_MONITOR_SH() \
+ gen_script_auto(mysroot, mycs_path, "monitor", cf)
+
+#define CREATE_RESTORECONFIG_SH() \
+ gen_script_auto(mysroot, mycs_path, "restoreconfig", cf)
+
+#define CREATE_SAVECONFIG_SH() \
+ gen_script_auto(mysroot, mycs_path, "saveconfig", cf)
+
+#define CREATE_START_SLAPD_SH() \
+ gen_script_auto(mysroot, mycs_path, "start-slapd", cf)
+
+#define CREATE_STOP_SLAPD_SH() \
+ gen_script_auto(mysroot, mycs_path, "stop-slapd", cf)
+
+#define CREATE_SUFFIX2INSTANCE_SH() \
+ gen_script_auto(mysroot, mycs_path, "suffix2instance", cf)
+
+#define CREATE_VLVINDEX_SH() \
+ gen_script_auto(mysroot, mycs_path, "vlvindex", cf)
#ifdef XP_UNIX
char *ds_gen_scripts(char *sroot, server_config_s *cf, char *cs_path)
@@ -2073,164 +1868,61 @@ char *ds_gen_scripts(char *sroot, server_config_s *cf, char *cs_path)
mysroot = sroot;
mycs_path = cs_path;
- t = gen_script(cs_path, "monitor",
- "if [ \"x$1\" != \"x\" ];\nthen MDN=\"$1\";\nelse MDN=\"cn=monitor\";\n fi\n"
-
- "cd %s\nPATH=%s:$PATH;export PATH\n"
- "ldapsearch -p %s -b \"$MDN\" -s base \"objectClass=*\"\n",
- tools, tools, cf->servport);
+ t = CREATE_LDIF2DB();
if(t) return t;
-
- t = gen_script(cs_path, "saveconfig",
- "cd %s\n"
- "echo saving configuration ...\n"
- "conf_ldif=%s/confbak/%s-`date +%%Y_%%m_%%d_%%H%%M%%S`.ldif\n"
- "./%s db2ldif -N -D %s "
- "-s \"%s\" -a $conf_ldif -n NetscapeRoot 2>&1\n"
- "if [ \"$?\" -ge 1 ] \nthen\n"
- " echo Error occurred while saving configuration\n"
- " exit 1\n"
- "fi\n"
- "exit 0\n",
- server,
- cf->config_dir, cf->servid,
- PRODUCT_BIN, cf->config_dir,
- cf->netscaperoot);
+
+ t = CREATE_DB2INDEX();
if(t) return t;
-
- t = gen_script(cs_path, "restoreconfig",
- "cd %s\n"
- "conf_ldif=`ls -1t %s/confbak/%s-*.ldif | head -1`\n"
- "if [ -z \"$conf_ldif\" ]\n"
- "then\n"
- " echo No configuration to restore in %s/confbak ; exit 1\n"
- "fi\n"
- "echo Restoring $conf_ldif\n"
- "./%s ldif2db -D %s"
- " -i $conf_ldif -n NetscapeRoot 2>&1\n"
- "exit $?\n",
- server,
- cf->config_dir, cf->servid,
- cf->config_dir,
- PRODUCT_BIN, cf->config_dir);
+
+ t = CREATE_MIGRATE5TO7();
if(t) return t;
-
- t = gen_script(cs_path, "ldif2db",
- "cd %s\n"
- "if [ $# -lt 4 ]\nthen\n"
- "\techo \"Usage: ldif2db -n backend_instance | {-s includesuffix}* [{-x excludesuffix}*]\"\n"
- "\techo \" {-i ldiffile}* [-O]\"\n"
- "\techo \"Note: either \\\"-n backend_instance\\\" or \\\"-s includesuffix\\\" and \\\"-i ldiffile\\\" are required.\"\n"
- "\texit 1\n"
- "fi\n\n"
- "echo importing data ...\n"
- "./%s ldif2db -D %s \"$@\" 2>&1\n"
- "exit $?\n",
- server, PRODUCT_BIN, cf->config_dir);
+
+ t = CREATE_MIGRATE6TO7();
if(t) return t;
-#if defined(UPGRADEDB)
- t = gen_script(cs_path, "upgradedb",
- "cd %s\n"
- "if [ \"$#\" -eq 1 ]\nthen\n"
- "\tbak_dir=$1\nelse\n"
- "\tbak_dir=%s/upgradedb_`date +%%Y_%%m_%%d_%%H_%%M_%%S`\nfi\n\n"
- "echo upgrade index files ...\n"
- "./%s upgradedb -D %s -a $bak_dir\n",
- server, cf->bak_dir, PRODUCT_BIN, cf->config_dir);
+ t = CREATE_MIGRATEINSTANCE7();
if(t) return t;
-#endif
- /* new code for dsml import */
- /* OBSOLETE??? */
- t = gen_script(cs_path, "dsml2db",
- "cd %s\n"
- "if [ $# -lt 4 ]\nthen\n"
- "\techo \"Usage: dsml2db -n backend_instance | {-s includesuffix}* [{-x excludesuffix}*]\"\n"
- "\techo \" {-i dsmlfile}\"\n"
- "\techo \"Note: either \\\"-n backend_instance\\\" or \\\"-s includesuffix\\\" and \\\"-i dsmlfile\\\" are required.\"\n"
- "\texit 1\n"
- "fi\n\n"
- "set_dsml=0\n"
- "dsml_file=\"mydummy\"\n"
- "space=\" \"\n"
- "i=0\n"
- "for arg in \"$@\"\ndo\n"
- "\tif [ \"$arg\" = '-i' ];\n\tthen\n"
- "\t\tset_dsml=1\n"
- "\telif [ $set_dsml -eq 1 ];\n\tthen\n"
- "\t\tdsml_file=$arg\n"
- "\t\tset_dsml=2\n"
- "\telse\n"
- "\t\teval a$i=\\\"$arg\\\"\n"
- "\t\ti=`expr $i + 1`\n"
- "\tfi\n"
- "done\n"
- "max=$i; i=0;\n"
- "shift $#\n"
- "while [ $i -lt $max ]; do\n"
- "\teval arg=\\$a$i\n"
- "\tset -- \"$@\" \"$arg\"\n"
- "\ti=`expr $i + 1`\n"
- "done\n"
- "\tif [ $dsml_file = \"mydummy\" ]\n\tthen\n\t"
- "echo \"Need a DSML file as input\""
- "\n\t\t exit 1"
- "\n\tfi\n"
- "\tif [ -f $dsml_file ] && [ -r $dsml_file ]\n\tthen\n"
- "\t\t%s/bin/base/jre/bin/java -Dverify=true -classpath %s/java/jars/crimson.jar:%s/java/ldapjdk.jar:%s/java/jars/xmltools.jar com.netscape.xmltools.DSML2LDIF $dsml_file\n"
- "\t\tif [ $? = 0 ]; then\n"
- "\t\techo importing data ...\n"
- "\t\t%s/bin/base/jre/bin/java -classpath %s/java/jars/crimson.jar:%s/java/ldapjdk.jar:%s/java/jars/xmltools.jar com.netscape.xmltools.DSML2LDIF $dsml_file | ./ns-slapd ldif2db -D %s \"$@\" -i -\n"
- "\t\texit $?\n"
- "\t\tfi\n"
- "\telse\n"
- "\t\techo \"File $dsml_file invalid. Absolute path is required.\"\n\t\texit 1\n"
- "\tfi\n",
- server,sroot,sroot,sroot,sroot,sroot,sroot,sroot,sroot,cs_path);
+ t = CREATE_MIGRATETO7();
if(t) return t;
-
- t = gen_script(cs_path, "ldif2ldap",
- "cd %s\n"
- "./ldapmodify -a -p %s -D \"$1\" -w \"$2\" -f $3\n",
- tools, cf->servport);
+
+ t = CREATE_BAK2DB_SH();
if(t) return t;
-
- t = CREATE_LDIF2DB();
+
+ t = CREATE_DB2BAK_SH();
if(t) return t;
- t = CREATE_DB2INDEX();
+ t = CREATE_DB2INDEX_SH();
if(t) return t;
-/*
- t = CREATE_MIGRATETO5();
+
+ t = CREATE_DB2LDIF_SH();
if(t) return t;
-
- t = CREATE_MIGRATE50TO51();
+
+ t = CREATE_LDIF2DB_SH();
if(t) return t;
- t = CREATE_MIGRATEINSTANCE5();
+ t = CREATE_LDIF2LDAP_SH();
if(t) return t;
- t = CREATE_MIGRATE5TO6();
+ t = CREATE_MONITOR_SH();
if(t) return t;
- t = CREATE_MIGRATEINSTANCE6();
+ t = CREATE_RESTORECONFIG_SH();
if(t) return t;
- t = CREATE_MIGRATETO6();
+ t = CREATE_SAVECONFIG_SH();
if(t) return t;
-*/
- t = CREATE_MIGRATE5TO7();
+ t = CREATE_START_SLAPD_SH();
if(t) return t;
- t = CREATE_MIGRATE6TO7();
+ t = CREATE_STOP_SLAPD_SH();
if(t) return t;
- t = CREATE_MIGRATEINSTANCE7();
+ t = CREATE_SUFFIX2INSTANCE_SH();
if(t) return t;
- t = CREATE_MIGRATETO7();
+ t = CREATE_VLVINDEX_SH();
if(t) return t;
t = gen_script(cs_path, "getpwenc",
@@ -2245,146 +1937,9 @@ char *ds_gen_scripts(char *sroot, server_config_s *cf, char *cs_path)
server, cf->config_dir, cs_path);
if(t) return t;
- t = gen_script(cs_path, "db2ldif",
- "cd %s\n"
- "if [ \"$#\" -lt 2 ];\nthen\n"
- "\techo \"Usage: db2ldif {-n backend_instance}* | {-s includesuffix}*\"\n"
- "\techo \" [{-x excludesuffix}*] [-a outputfile]\"\n"
- "\techo \" [-N] [-r] [-C] [-u] [-U] [-m] [-M] [-1]\"\n"
- "\techo \"Note: either \\\"-n backend_instance\\\" or \\\"-s includesuffix\\\" is required.\"\n"
- "\texit 1\n"
- "fi\n\n"
- "set_ldif=0\n"
- "ldif_file=\"mydummy\"\n"
- "for arg in \"$@\"\ndo\n"
- "\tif [ \"$arg\" = '-a' ];\n\tthen\n"
- "\t\tset_ldif=1\n"
- "\telif [ $set_ldif -eq 1 ];\n\tthen\n"
- "\t\tldif_file=$arg\n"
- "\t\tset_ldif=2\n"
- "\tfi\n"
- "done\n"
- "if [ $ldif_file = \"mydummy\" ]\nthen\n"
- "\tldif_file=%s/%s-`date +%%Y_%%m_%%d_%%H%%M%%S`.ldif\nfi\n"
- "if [ $set_ldif -eq 2 ]\nthen\n"
- "./%s db2ldif -D %s \"$@\"\nelse\n"
- "./%s db2ldif -D %s -a $ldif_file \"$@\"\nfi\n",
- server,
- cf->ldif_dir, cf->servid,
- PRODUCT_BIN, cf->config_dir,
- PRODUCT_BIN, cf->config_dir);
- if(t) return t;
-
- /* new code for dsml export */
- t = gen_script(cs_path, "db2dsml",
- "cd %s\n"
- "if [ \"$#\" -lt 2 ];\nthen\n"
- "\techo \"Usage: db2dsml {-n backend_instance} | {-s includesuffix}*\"\n"
- "\techo \" [{-x excludesuffix}*] [-a outputfile]\"\n"
- "\techo \" [-u]\"\n"
- "\techo \"Note: either \\\"-n backend_instance\\\" or \\\"-s includesuffix\\\" is required.\"\n"
- "\texit 1\n"
- "fi\n\n"
- "set_dsml=0\n"
- "dsml_file=\"mydummy\"\n"
- "arg_list=\"\"\n"
- "space=\" \"\n"
- "for arg in \"$@\"\ndo\n"
- "\tif [ \"$arg\" = '-a' ];\n\tthen\n"
- "\t\tset_dsml=1\n"
- "\telif [ $set_dsml -eq 1 ];\n\tthen\n"
- "\t\tdsml_file=$arg\n"
- "\t\tset_dsml=2\n"
- "\telse\n"
- "\t\targ_list=$arg_list$space$arg\n"
- "\tfi\n"
- "done\n"
- "if [ $dsml_file = \"mydummy\" ]\nthen\n"
- "\tdsml_file=%s/dsml/`date +%%Y_%%m_%%d_%%H%%M%%S`.dsml\n"
- "\techo dsmlfile: $dsml_file\n"
- "fi\n"
- "%s/bin/base/jre/bin/java -Dverify=true -classpath %s/java/ldapjdk.jar:%s/java/jars/xmltools.jar com.netscape.xmltools.LDIF2DSML -s -o $dsml_file \n"
- "if [ $? = 0 ]; then\n"
- "\t./ns-slapd db2ldif -D %s \"$@\" -a - | %s/bin/base/jre/bin/java -classpath %s/java/ldapjdk.jar:%s/java/jars/xmltools.jar com.netscape.xmltools.LDIF2DSML -s -o $dsml_file \n"
- "fi\n",
- server, cs_path, sroot, sroot, sroot, cs_path, sroot, sroot, sroot);
- if(t) return t;
-
t = CREATE_DB2LDIF();
if(t) return t;
-#if defined(UPGRADEDB)
- t = gen_script(cs_path, "db2index",
- "cd %s\n"
- "if [ $# -eq 0 ]\n"
- "then\n"
- "\tbak_dir=%s/bak/reindex_`date +%%Y_%%m_%%d_%%H_%%M_%%S`\n"
- "\t./%s upgradedb -D %s -f -a \"$bak_dir\"\n"
- "elif [ $# -lt 4 ]\n"
- "then\n"
- "\techo \"Usage: db2index [-n backend_instance | {-s includesuffix}* -t attribute[:indextypes[:matchingrules]] -T vlvattribute]\"\n"
- "\texit 1\n"
- "else\n"
- "\t./%s db2index -D %s \"$@\"\n"
- "fi\n\n",
- server, cf->ldif_dir,
- PRODUCT_BIN, cf->config_dir,
- PRODUCT_BIN, cf->config_dir);
- if(t) return t;
-#endif
-
- t = gen_script(cs_path, "vlvindex",
- "cd %s\n"
- "if [ $# -lt 4 ]\n"
- "then\n"
- "\techo \"Usage: vlvindex -n backend_instance | {-s includesuffix}* -T attribute\"\n"
- "\techo Note: either \\\"-n backend_instance\\\" or \\\"-s includesuffix\\\" are required.\n"
- "\texit 1\n"
- "fi\n\n"
- "./%s db2index -D %s \"$@\"\n",
- server, PRODUCT_BIN, cf->config_dir);
- if(t) return t;
-
- t = gen_script(cs_path, "db2bak",
- "cd %s\n"
- "if [ \"$#\" -eq 1 ]\nthen\n"
- "\tbak_dir=$1\nelse\n"
- "\tbak_dir=%s/`date +%%Y_%%m_%%d_%%H_%%M_%%S`\nfi\n\n"
- "./%s db2archive -D %s -a $bak_dir\n",
- server, cf->bak_dir, PRODUCT_BIN, cf->config_dir);
- if(t) return t;
-
- t = CREATE_DB2BAK();
- if(t) return t;
-
- t = gen_script(cs_path, "bak2db",
- "if [ $# -lt 1 ] || [ $# -gt 3 ]\nthen\n"
- " echo \"Usage: bak2db archivedir [-n backendname]\"\n"
- " exit 1\n"
- "else\n"
- " archivedir=$1\n"
- " shift\n"
- "fi\n"
- "while getopts \"n:\" flag\ndo\n"
- " case $flag in\n"
- " n) bename=$OPTARG;;\n"
- " *) echo \"Usage: bak2db archivedir [-n backendname]\"; exit 2;;\n"
- " esac\n"
- "done\n\n"
- "if [ 1 = `expr $archivedir : \"\\/\"` ]\nthen\n"
- " archivedir=$archivedir\n"
- "else\n"
- " # relative\n"
- " archivedir=`pwd`/$archivedir\nfi\n\n"
- "cd %s\n"
- "if [ \"$#\" -eq 2 ]\nthen\n"
- " ./%s archive2db -D %s -a $archivedir -n $bename\n"
- "else\n"
- " ./%s archive2db -D %s -a $archivedir\n"
- "fi\n",
- server, PRODUCT_BIN, cf->config_dir, PRODUCT_BIN, cf->config_dir);
- if(t) return t;
-
t = CREATE_BAK2DB();
if(t) return t;
@@ -2403,50 +1958,9 @@ char *ds_gen_scripts(char *sroot, server_config_s *cf, char *cs_path)
t = CREATE_ACCOUNT_INACT("ns-accountstatus.pl");
if(t) return t;
- t = CREATE_DSML();
- if(t) return t;
-
t = CREATE_NEWPWPOLICY();
if(t) return t;
- t = gen_script(cs_path, "suffix2instance",
- "cd %s\n"
- "if [ $# -lt 2 ]\n"
- "then\n"
- "\techo Usage: suffix2instance {-s includesuffix}*\n"
- "\texit 1\n"
- "fi\n\n"
- "./%s suffix2instance -D %s \"$@\" 2>&1\n",
- server, PRODUCT_BIN, cf->config_dir);
- if(t) return t;
-
- /*Generate the java commandline tools in bin/slapd/server*/
- for (cls = 0; cls < 7; cls++) {
- t = gen_script(server, cl_scripts[cls],
- "cd %s\n\n"
- "lang=${LANG:=en}\n"
- "while [ $# -ge 1 ]\n"
- "do\n"
- " if [ $1 = '-l' ]\n"
- " then\n"
- " shift\n"
- " lang=$1\n"
- " else\n"
- " arg=\"$arg $1\"\n"
- " fi\n"
- " shift\n"
- "done\n"
- "./bin/base/jre/bin/jre -classpath ./bin/base/jre/lib:"
- "./bin/base/jre/lib/rt.jar:./bin/base/jre/lib/i18n.jar:"
- "./java/base.jar:./java/jars/ds40.jar:./java/jars/ds40_${lang}.jar:"
- "./java/swingall.jar:./java/ssl.zip:"
- "./java/ldapjdk.jar:./java/mcc40.jar:./java/mcc40_${lang}.jar:"
- "./java/nmclf40.jar:./java/nmclf40_${lang}.jar"
- " com.netscape.admin.dirserv.cmdln.%s $arg\n",
- sroot, cl_javafiles[cls]);
- if(t) return t;
- }
-
return (t);
}
#else
@@ -2655,25 +2169,6 @@ char *ds_gen_scripts(char *sroot, server_config_s *cf, char *cs_path)
t = CREATE_DB2INDEX();
if(t) return t;
-/*
- t = CREATE_MIGRATETO5();
- if(t) return t;
-
- t = CREATE_MIGRATE50TO51();
- if(t) return t;
-
- t = CREATE_MIGRATEINSTANCE5();
- if(t) return t;
-
- t = CREATE_MIGRATETO6();
- if(t) return t;
-
- t = CREATE_MIGRATE5TO6();
- if(t) return t;
-
- t = CREATE_MIGRATEINSTANCE6();
- if(t) return t;
-*/
t = CREATE_MIGRATE5TO7();
if(t) return t;
@@ -3010,9 +2505,6 @@ char *ds_gen_scripts(char *sroot, server_config_s *cf, char *cs_path)
t = CREATE_ACCOUNT_INACT("ns-accountstatus.pl");
if(t) return t;
- t = CREATE_DSML();
- if(t) return t;
-
t = gen_script(cs_path, "dsml-activate.bat",
"@echo off\n"
"setlocal\n"
diff --git a/ldap/admin/src/scripts/template-bak2db.in b/ldap/admin/src/scripts/template-bak2db.in
new file mode 100755
index 000000000..e0e394087
--- /dev/null
+++ b/ldap/admin/src/scripts/template-bak2db.in
@@ -0,0 +1,39 @@
+#!/bin/sh
+
+prefix="{{DS-ROOT}}"
+LD_LIBRARY_PATH=$prefix@nss_libdir@:$prefix/usr/lib:@nss_libdir@
+export LD_LIBRARY_PATH
+SHLIB_PATH=$prefix@nss_libdir@:$prefix/usr/lib:@nss_libdir@
+export SHLIB_PATH
+
+if [ $# -lt 1 ] || [ $# -gt 3 ]
+then
+ echo "Usage: bak2db archivedir [-n backendname]"
+ exit 1
+else
+ archivedir=$1
+ shift
+fi
+while getopts "n:" flag
+do
+ case $flag in
+ n) bename=$OPTARG;;
+ *) echo "Usage: bak2db archivedir [-n backendname]"; exit 2;;
+ esac
+done
+
+if [ 1 = `expr $archivedir : "\/"` ]
+then
+ archivedir=$archivedir
+else
+ # relative
+ archivedir=`pwd`/$archivedir
+fi
+
+cd {{SERVER-DIR}}
+if [ "$#" -eq 2 ]
+then
+ ./ns-slapd archive2db -D {{CONFIG-DIR}} -a $archivedir -n $bename
+else
+ ./ns-slapd archive2db -D {{CONFIG-DIR}} -a $archivedir
+fi
diff --git a/ldap/admin/src/scripts/template-db2bak.in b/ldap/admin/src/scripts/template-db2bak.in
new file mode 100755
index 000000000..73508d3df
--- /dev/null
+++ b/ldap/admin/src/scripts/template-db2bak.in
@@ -0,0 +1,17 @@
+#!/bin/sh
+
+prefix="{{DS-ROOT}}"
+LD_LIBRARY_PATH=$prefix@nss_libdir@:$prefix/usr/lib:@nss_libdir@
+export LD_LIBRARY_PATH
+SHLIB_PATH=$prefix@nss_libdir@:$prefix/usr/lib:@nss_libdir@
+export SHLIB_PATH
+
+cd {{SERVER-DIR}}
+if [ "$#" -eq 1 ]
+then
+ bak_dir=$1
+else
+ bak_dir={{BAK-DIR}}/`date +%Y_%m_%d_%H_%M_%S`
+fi
+
+./ns-slapd db2archive -D {{CONFIG-DIR}} -a $bak_dir
diff --git a/ldap/admin/src/scripts/template-db2index.in b/ldap/admin/src/scripts/template-db2index.in
new file mode 100755
index 000000000..b13c7965c
--- /dev/null
+++ b/ldap/admin/src/scripts/template-db2index.in
@@ -0,0 +1,21 @@
+#!/bin/sh
+
+prefix="{{DS-ROOT}}"
+LD_LIBRARY_PATH=$prefix@nss_libdir@:$prefix/usr/lib:@nss_libdir@
+export LD_LIBRARY_PATH
+SHLIB_PATH=$prefix@nss_libdir@:$prefix/usr/lib:@nss_libdir@
+export SHLIB_PATH
+
+cd {{SERVER-DIR}}
+if [ $# -eq 0 ]
+then
+ bak_dir={{BAK-DIR}}/reindex_`date +%Y_%m_%d_%H_%M_%S`
+ ./ns-slapd upgradedb -D {{CONFIG-DIR}} -f -a "$bak_dir"
+elif [ $# -lt 4 ]
+then
+ echo "Usage: db2index [-n backend_instance | {-s includesuffix}* -t attribute[:indextypes[:matchingrules]] -T vlvattribute]"
+ exit 1
+else
+ ./ns-slapd db2index -D {{CONFIG-DIR}} "$@"
+fi
+
diff --git a/ldap/admin/src/scripts/template-db2ldif.in b/ldap/admin/src/scripts/template-db2ldif.in
new file mode 100755
index 000000000..846655ade
--- /dev/null
+++ b/ldap/admin/src/scripts/template-db2ldif.in
@@ -0,0 +1,41 @@
+#!/bin/sh
+
+prefix="{{DS-ROOT}}"
+LD_LIBRARY_PATH=$prefix@nss_libdir@:$prefix/usr/lib:@nss_libdir@
+export LD_LIBRARY_PATH
+SHLIB_PATH=$prefix@nss_libdir@:$prefix/usr/lib:@nss_libdir@
+export SHLIB_PATH
+
+cd {{SERVER-DIR}}
+if [ "$#" -lt 2 ];
+then
+ echo "Usage: db2ldif {-n backend_instance}* | {-s includesuffix}*"
+ echo " [{-x excludesuffix}*] [-a outputfile]"
+ echo " [-N] [-r] [-C] [-u] [-U] [-m] [-M] [-1]"
+ echo "Note: either \"-n backend_instance\" or \"-s includesuffix\" is required."
+ exit 1
+fi
+
+set_ldif=0
+ldif_file="mydummy"
+for arg in "$@"
+do
+ if [ "$arg" = '-a' ];
+ then
+ set_ldif=1
+ elif [ $set_ldif -eq 1 ];
+ then
+ ldif_file=$arg
+ set_ldif=2
+ fi
+done
+if [ $ldif_file = "mydummy" ]
+then
+ ldif_file={{LDIF-DIR}}/laputa-`date +%Y_%m_%d_%H%M%S`.ldif
+fi
+if [ $set_ldif -eq 2 ]
+then
+./ns-slapd db2ldif -D {{CONFIG-DIR}} "$@"
+else
+./ns-slapd db2ldif -D {{CONFIG-DIR}} -a $ldif_file "$@"
+fi
diff --git a/ldap/admin/src/scripts/template-ldif2db.in b/ldap/admin/src/scripts/template-ldif2db.in
new file mode 100755
index 000000000..65a2e215f
--- /dev/null
+++ b/ldap/admin/src/scripts/template-ldif2db.in
@@ -0,0 +1,20 @@
+#!/bin/sh
+
+prefix="{{DS-ROOT}}"
+LD_LIBRARY_PATH=$prefix@nss_libdir@:$prefix/usr/lib:@nss_libdir@
+export LD_LIBRARY_PATH
+SHLIB_PATH=$prefix@nss_libdir@:$prefix/usr/lib:@nss_libdir@
+export SHLIB_PATH
+
+cd {{SERVER-DIR}}
+if [ $# -lt 4 ]
+then
+ echo "Usage: ldif2db -n backend_instance | {-s includesuffix}* [{-x excludesuffix}*]"
+ echo " {-i ldiffile}* [-O]"
+ echo "Note: either \"-n backend_instance\" or \"-s includesuffix\" and \"-i ldiffile\" are required."
+ exit 1
+fi
+
+echo importing data ...
+./ns-slapd ldif2db -D {{CONFIG-DIR}} "$@" 2>&1
+exit $?
diff --git a/ldap/admin/src/scripts/template-ldif2ldap.in b/ldap/admin/src/scripts/template-ldif2ldap.in
new file mode 100755
index 000000000..0983396a9
--- /dev/null
+++ b/ldap/admin/src/scripts/template-ldif2ldap.in
@@ -0,0 +1,10 @@
+#!/bin/sh
+
+prefix="{{DS-ROOT}}"
+PATH=$prefix@ldapsdk_bindir@:@ldapsdk_bindir@
+LD_LIBRARY_PATH=$prefix@ldapsdk_libdir@:@ldapsdk_libdir@:$prefix@nss_libdir@:$prefix/usr/lib:@nss_libdir@
+export LD_LIBRARY_PATH
+SHLIB_PATH=$prefix@ldapsdk_libdir@:@ldapsdk_libdir@:$prefix@nss_libdir@:$prefix/usr/lib:@nss_libdir@
+export SHLIB_PATH
+
+ldapmodify -a -p {{SERVER-PORT}} -D "$1" -w "$2" -f $3
diff --git a/ldap/admin/src/scripts/template-monitor.in b/ldap/admin/src/scripts/template-monitor.in
new file mode 100755
index 000000000..487aac49b
--- /dev/null
+++ b/ldap/admin/src/scripts/template-monitor.in
@@ -0,0 +1,13 @@
+#!/bin/sh
+
+PATH=$prefix@ldapsdk_bindir@:@ldapsdk_bindir@
+LD_LIBRARY_PATH=$prefix@ldapsdk_libdir@:@ldapsdk_libdir@:$prefix@nss_libdir@:$prefix/usr/lib:@nss_libdir@
+export LD_LIBRARY_PATH
+SHLIB_PATH=$prefix@ldapsdk_libdir@:@ldapsdk_libdir@:$prefix@nss_libdir@:$prefix/usr/lib:@nss_libdir@
+export SHLIB_PATH
+
+if [ "x$1" != "x" ];
+then MDN="$1";
+else MDN="cn=monitor";
+ fi
+ldapsearch -p {{SERVER-PORT}} -b "$MDN" -s base "objectClass=*"
diff --git a/ldap/admin/src/scripts/template-restoreconfig.in b/ldap/admin/src/scripts/template-restoreconfig.in
new file mode 100755
index 000000000..9f7be2d23
--- /dev/null
+++ b/ldap/admin/src/scripts/template-restoreconfig.in
@@ -0,0 +1,17 @@
+#!/bin/sh
+
+prefix="{{DS-ROOT}}"
+LD_LIBRARY_PATH=$prefix@nss_libdir@:$prefix/usr/lib:@nss_libdir@
+export LD_LIBRARY_PATH
+SHLIB_PATH=$prefix@nss_libdir@:$prefix/usr/lib:@nss_libdir@
+export SHLIB_PATH
+
+cd {{SERVER-DIR}}
+conf_ldif=`ls -1t {{BAK-DIR}}/{{SERV-ID}}-*.ldif | head -1`
+if [ -z "$conf_ldif" ]
+then
+ echo No configuration to restore in {{BAK-DIR}} ; exit 1
+fi
+echo Restoring $conf_ldif
+./ns-slapd ldif2db -D {{CONFIG-DIR}} -i $conf_ldif -n NetscapeRoot 2>&1
+exit $?
diff --git a/ldap/admin/src/scripts/template-saveconfig.in b/ldap/admin/src/scripts/template-saveconfig.in
new file mode 100755
index 000000000..8d9f7c115
--- /dev/null
+++ b/ldap/admin/src/scripts/template-saveconfig.in
@@ -0,0 +1,18 @@
+#!/bin/sh
+
+prefix="{{DS-ROOT}}"
+LD_LIBRARY_PATH=$prefix@nss_libdir@:$prefix/usr/lib:@nss_libdir@
+export LD_LIBRARY_PATH
+SHLIB_PATH=$prefix@nss_libdir@:$prefix/usr/lib:@nss_libdir@
+export SHLIB_PATH
+
+cd {{SERVER-DIR}}
+echo saving configuration ...
+conf_ldif={{BAK-DIR}}/{{SERV-ID}}-`date +%Y_%m_%d_%H%M%S`.ldif
+./ns-slapd db2ldif -N -D {{CONFIG-DIR}} -s "o=NetscapeRoot" -a $conf_ldif -n NetscapeRoot 2>&1
+if [ "$?" -ge 1 ]
+then
+ echo Error occurred while saving configuration
+ exit 1
+fi
+exit 0
diff --git a/ldap/admin/src/scripts/template-start-slapd.in b/ldap/admin/src/scripts/template-start-slapd.in
new file mode 100755
index 000000000..b408fd8f5
--- /dev/null
+++ b/ldap/admin/src/scripts/template-start-slapd.in
@@ -0,0 +1,77 @@
+#!/bin/sh
+
+prefix="{{DS-ROOT}}"
+LD_LIBRARY_PATH=$prefix@nss_libdir@:$prefix/usr/lib:@nss_libdir@
+export LD_LIBRARY_PATH
+SHLIB_PATH=$prefix@nss_libdir@:$prefix/usr/lib:@nss_libdir@
+export SHLIB_PATH
+
+# Script that starts the ns-slapd server.
+# Exit status can be:
+# 0: Server started successfully
+# 1: Server could not be started
+# 2: Server already running
+
+NETSITE_ROOT={{SERVER-DIR}}
+export NETSITE_ROOT
+DS_CONFIG_DIR={{CONFIG-DIR}}
+export DS_CONFIG_DIR
+PIDFILE={{RUN-DIR}}/pid
+STARTPIDFILE={{RUN-DIR}}/startpid
+if test -f $STARTPIDFILE ; then
+ PID=`cat $STARTPIDFILE`
+ if kill -0 $PID > /dev/null 2>&1 ; then
+ echo There is an ns-slapd process already running: $PID
+ exit 2;
+ else
+ rm -f $STARTPIDFILE
+ fi
+fi
+if test -f $PIDFILE ; then
+ PID=`cat $PIDFILE`
+ if kill -0 $PID > /dev/null 2>&1 ; then
+ echo There is an ns-slapd running: $PID
+ exit 2;
+ else
+ rm -f $PIDFILE
+ fi
+fi
+cd {{SERVER-DIR}}; ./ns-slapd -D {{CONFIG-DIR}} -i $PIDFILE -w $STARTPIDFILE "$@"
+if [ $? -ne 0 ]; then
+ exit 1
+fi
+
+loop_counter=1
+# wait for 10 seconds for the start pid file to appear
+max_count=10
+while test $loop_counter -le $max_count; do
+ loop_counter=`expr $loop_counter + 1`
+ if test ! -f $STARTPIDFILE ; then
+ sleep 1;
+ else
+ PID=`cat $STARTPIDFILE`
+ fi
+done
+if test ! -f $STARTPIDFILE ; then
+ echo Server failed to start !!! Please check errors log for problems
+ exit 1
+fi
+loop_counter=1
+# wait for 10 minutes (600 times 1 seconds)
+max_count=600
+while test $loop_counter -le $max_count; do
+ loop_counter=`expr $loop_counter + 1`
+ if test ! -f $PIDFILE ; then
+ if kill -0 $PID > /dev/null 2>&1 ; then
+ sleep 1
+ else
+ echo Server failed to start !!! Please check errors log for problems
+ exit 1
+ fi
+ else
+ PID=`cat $PIDFILE`
+ exit 0;
+ fi
+done
+echo Server not running!! Failed to start ns-slapd process. Please check the errors log for problems.
+exit 1
diff --git a/ldap/admin/src/scripts/template-stop-slapd.in b/ldap/admin/src/scripts/template-stop-slapd.in
new file mode 100755
index 000000000..f7c3e9aed
--- /dev/null
+++ b/ldap/admin/src/scripts/template-stop-slapd.in
@@ -0,0 +1,48 @@
+#!/bin/sh
+
+prefix="{{DS-ROOT}}"
+LD_LIBRARY_PATH=$prefix@nss_libdir@:$prefix/usr/lib:@nss_libdir@
+export LD_LIBRARY_PATH
+SHLIB_PATH=$prefix@nss_libdir@:$prefix/usr/lib:@nss_libdir@
+export SHLIB_PATH
+
+# Script that stops the ns-slapd server.
+# Exit status can be:
+# 0: Server stopped successfully
+# 1: Server could not be stopped
+# 2: Server was not running
+
+PIDFILE={{RUN-DIR}}/pid
+if test ! -f $PIDFILE ; then
+ echo No ns-slapd PID file found. Server is probably not running
+ exit 2
+fi
+PID=`cat $PIDFILE`
+# see if the server is already stopped
+kill -0 $PID > /dev/null 2>&1 || {
+ echo Server not running
+ if test -f $PIDFILE ; then
+ rm -f $PIDFILE
+ fi
+ exit 2
+}
+# server is running - kill it
+kill $PID
+loop_counter=1
+# wait for 10 minutes (600 times 1 second)
+max_count=600
+while test $loop_counter -le $max_count; do
+ loop_counter=`expr $loop_counter + 1`
+ if kill -0 $PID > /dev/null 2>&1 ; then
+ sleep 1;
+ else
+ if test -f $PIDFILE ; then
+ rm -f $PIDFILE
+ fi
+ exit 0
+ fi
+done
+if test -f $PIDFILE ; then
+ echo Server still running!! Failed to stop the ns-slapd process: $PID. Please check the errors log for problems.
+fi
+exit 1
diff --git a/ldap/admin/src/scripts/template-suffix2instance.in b/ldap/admin/src/scripts/template-suffix2instance.in
new file mode 100755
index 000000000..1a2ce3250
--- /dev/null
+++ b/ldap/admin/src/scripts/template-suffix2instance.in
@@ -0,0 +1,16 @@
+#!/bin/sh
+
+prefix="{{DS-ROOT}}"
+LD_LIBRARY_PATH=$prefix@nss_libdir@:$prefix/usr/lib:@nss_libdir@
+export LD_LIBRARY_PATH
+SHLIB_PATH=$prefix@nss_libdir@:$prefix/usr/lib:@nss_libdir@
+export SHLIB_PATH
+
+cd {{SERVER-DIR}}
+if [ $# -lt 2 ]
+then
+ echo Usage: suffix2instance {-s includesuffix}*
+ exit 1
+fi
+
+./ns-slapd suffix2instance -D {{CONFIG-DIR}} "$@" 2>&1
diff --git a/ldap/admin/src/scripts/template-upgradedb.in b/ldap/admin/src/scripts/template-upgradedb.in
new file mode 100755
index 000000000..621d562cc
--- /dev/null
+++ b/ldap/admin/src/scripts/template-upgradedb.in
@@ -0,0 +1,18 @@
+#!/bin/sh
+
+prefix="{{DS-ROOT}}"
+LD_LIBRARY_PATH=$prefix@nss_libdir@:$prefix/usr/lib:@nss_libdir@
+export LD_LIBRARY_PATH
+SHLIB_PATH=$prefix@nss_libdir@:$prefix/usr/lib:@nss_libdir@
+export SHLIB_PATH
+
+cd {{SERVER-DIR}}
+if [ "$#" -eq 1 ]
+then
+ bak_dir=$1
+else
+ bak_dir={{BAK-DIR}}/upgradedb_`date +%Y_%m_%d_%H_%M_%S`
+fi
+
+echo upgrade index files ...
+./ns-slapd upgradedb -D {{CONFIG-DIR}} -a $bak_dir
diff --git a/ldap/admin/src/scripts/template-verify-db.pl.in b/ldap/admin/src/scripts/template-verify-db.pl.in
index 8cf45afa0..756a10502 100644
--- a/ldap/admin/src/scripts/template-verify-db.pl.in
+++ b/ldap/admin/src/scripts/template-verify-db.pl.in
@@ -111,7 +111,6 @@ print("*****************************************************************\n");
# get dirs having DBVERSION
my $dbdirs = getDbDir(".");
-my $brand_ds = {{DS-BRAND}};
my $prefix = "{{DS-ROOT}}";
$ENV{'PATH'} = '$prefix@db_bindir@:$prefix/usr/lib:@db_bindir@:/usr/lib';
diff --git a/ldap/admin/src/scripts/template-vlvindex.in b/ldap/admin/src/scripts/template-vlvindex.in
new file mode 100755
index 000000000..472fc15a5
--- /dev/null
+++ b/ldap/admin/src/scripts/template-vlvindex.in
@@ -0,0 +1,17 @@
+#!/bin/sh
+
+prefix="{{DS-ROOT}}"
+LD_LIBRARY_PATH=$prefix@nss_libdir@:$prefix/usr/lib:@nss_libdir@
+export LD_LIBRARY_PATH
+SHLIB_PATH=$prefix@nss_libdir@:$prefix/usr/lib:@nss_libdir@
+export SHLIB_PATH
+
+cd {{SERVER-DIR}}
+if [ $# -lt 4 ]
+then
+ echo "Usage: vlvindex -n backend_instance | {-s includesuffix}* -T attribute"
+ echo Note: either \"-n backend_instance\" or \"-s includesuffix\" are required.
+ exit 1
+fi
+
+./ns-slapd db2index -D {{CONFIG-DIR}} "$@"
| 0 |
0ae39ce5b3050d49005d632ed420d8c80e834b7e
|
389ds/389-ds-base
|
Bug 750625 - Fix Coverity (11055) Explicit null dereferenced
https://bugzilla.redhat.com/show_bug.cgi?id=750625
plugins/automember/automember.c (automember_pre_op)
Bug Description: Passing null variable "e" to function
"automember_parse_config_entry", which dereferences it.
Missing a check of the NULL possibility of target entry "e".
Fix Description: Check if "e" is NULL or not. If it is, goto "bail".
|
commit 0ae39ce5b3050d49005d632ed420d8c80e834b7e
Author: Noriko Hosoi <[email protected]>
Date: Tue Nov 1 17:49:39 2011 -0700
Bug 750625 - Fix Coverity (11055) Explicit null dereferenced
https://bugzilla.redhat.com/show_bug.cgi?id=750625
plugins/automember/automember.c (automember_pre_op)
Bug Description: Passing null variable "e" to function
"automember_parse_config_entry", which dereferences it.
Missing a check of the NULL possibility of target entry "e".
Fix Description: Check if "e" is NULL or not. If it is, goto "bail".
diff --git a/ldap/servers/plugins/automember/automember.c b/ldap/servers/plugins/automember/automember.c
index 89ed1dabb..010b5ac97 100644
--- a/ldap/servers/plugins/automember/automember.c
+++ b/ldap/servers/plugins/automember/automember.c
@@ -1593,6 +1593,11 @@ automember_pre_op(Slapi_PBlock * pb, int modop)
if (LDAP_CHANGETYPE_ADD == modop) {
slapi_pblock_get(pb, SLAPI_ADD_ENTRY, &e);
+ /* If the entry doesn't exist, just bail and
+ * let the server handle it. */
+ if (e == NULL) {
+ goto bail;
+ }
} else if (LDAP_CHANGETYPE_MODIFY == modop) {
/* Fetch the entry being modified so we can
* create the resulting entry for validation. */
| 0 |
1c4d25369dd749a13a71f60af4c3d43326e9e4a0
|
389ds/389-ds-base
|
Bug 688341 - (cov#10704,10705) Fix Coverity code maintainability issues
We are computing the size of memory needed by using the incorrect
pointer type.
|
commit 1c4d25369dd749a13a71f60af4c3d43326e9e4a0
Author: Nathan Kinder <[email protected]>
Date: Thu Mar 17 09:17:22 2011 -0700
Bug 688341 - (cov#10704,10705) Fix Coverity code maintainability issues
We are computing the size of memory needed by using the incorrect
pointer type.
diff --git a/ldap/servers/slapd/entry.c b/ldap/servers/slapd/entry.c
index ceea42939..c628ba58c 100644
--- a/ldap/servers/slapd/entry.c
+++ b/ldap/servers/slapd/entry.c
@@ -2419,11 +2419,11 @@ slapi_entry_vattrcache_find_values_and_type_ex( const Slapi_Entry *e,
char *vattr_type=NULL;
r= SLAPI_ENTRY_VATTR_RESOLVED_EXISTS;
- *results = (Slapi_ValueSet**)slapi_ch_calloc(1, sizeof(*results));
+ *results = (Slapi_ValueSet**)slapi_ch_calloc(1, sizeof(**results));
**results = valueset_dup(&(tmp_attr->a_present_values));
*actual_type_name =
- (char**)slapi_ch_malloc(sizeof(*actual_type_name));
+ (char**)slapi_ch_malloc(sizeof(**actual_type_name));
slapi_attr_get_type( tmp_attr, &vattr_type );
**actual_type_name = slapi_ch_strdup(vattr_type);
| 0 |
5462a0698e1a2174de908889696c032e581f02b6
|
389ds/389-ds-base
|
[176293] repl_chain_on_update() logs a message for every modify operation
Checking in Ulf's proposal; should not get connid from pid if the op is internal. (plus the value is just for debug print which is disabled now)
|
commit 5462a0698e1a2174de908889696c032e581f02b6
Author: Noriko Hosoi <[email protected]>
Date: Mon Jan 16 19:06:03 2006 +0000
[176293] repl_chain_on_update() logs a message for every modify operation
Checking in Ulf's proposal; should not get connid from pid if the op is internal. (plus the value is just for debug print which is disabled now)
diff --git a/ldap/servers/plugins/replication/replutil.c b/ldap/servers/plugins/replication/replutil.c
index 55e6e0ef9..280aa7e2b 100644
--- a/ldap/servers/plugins/replication/replutil.c
+++ b/ldap/servers/plugins/replication/replutil.c
@@ -827,9 +827,19 @@ repl_chain_on_update(Slapi_PBlock *pb, Slapi_DN * target_dn,
PRBool local_online = PR_FALSE; /* true if the local db is online */
PRBool chain_online = PR_FALSE; /* true if the chain db is online */
int ii;
- int opid, connid;
+ int opid;
+#ifdef DEBUG_CHAIN_ON_UPDATE
+ int connid;
+#endif
+ slapi_pblock_get(pb, SLAPI_OPERATION, &op);
+#ifdef DEBUG_CHAIN_ON_UPDATE
+ if (operation_is_flag_set(op, OP_FLAG_INTERNAL)) {
+ connid=-1; /* -1: internal op in a log msg */
+ } else {
+ slapi_pblock_get(pb, SLAPI_CONN_ID, &connid);
+ }
+#endif
- slapi_pblock_get(pb, SLAPI_CONN_ID, &connid);
slapi_pblock_get(pb, SLAPI_OPERATION_ID, &opid);
/* first, we have to decide which backend is the local backend
* and which is the chaining one
@@ -854,13 +864,13 @@ repl_chain_on_update(Slapi_PBlock *pb, Slapi_DN * target_dn,
local_online = PR_TRUE;
}
}
-/*
+#ifdef DEBUG_CHAIN_ON_UPDATE
slapi_log_error(SLAPI_LOG_REPL, repl_plugin_name, "repl_chain_on_update: conn=%d op=%d be "
- "%s is the %s backend and is %s\n",
- connid, opid,
- mtn_be_names[ii], (chaining_backend == ii) ? "chaining" : "local",
- (mtn_be_states[ii] == SLAPI_BE_STATE_ON) ? "online" : "offline");
-*/
+ "%s is the %s backend and is %s\n",
+ connid, opid,
+ mtn_be_names[ii], (chaining_backend == ii) ? "chaining" : "local",
+ (mtn_be_states[ii] == SLAPI_BE_STATE_ON) ? "online" : "offline");
+#endif
}
/* if no chaining backends are defined, just use the local one */
@@ -868,8 +878,6 @@ repl_chain_on_update(Slapi_PBlock *pb, Slapi_DN * target_dn,
return local_backend;
}
- slapi_pblock_get(pb, SLAPI_OPERATION, &op);
-
/* All internal operations go to the local backend */
if (operation_is_flag_set(op, OP_FLAG_INTERNAL)) {
return local_backend;
@@ -884,11 +892,11 @@ repl_chain_on_update(Slapi_PBlock *pb, Slapi_DN * target_dn,
(op_type == SLAPI_OPERATION_BIND) ||
(op_type == SLAPI_OPERATION_UNBIND) ||
(op_type == SLAPI_OPERATION_COMPARE))) {
-/*
+#ifdef DEBUG_CHAIN_ON_UPDATE
slapi_log_error(SLAPI_LOG_REPL, repl_plugin_name, "repl_chain_on_update: conn=%d op=%d op is "
"%d: using local backend\n",
connid, opid, op_type);
-*/
+#endif
return local_backend;
}
@@ -903,10 +911,10 @@ repl_chain_on_update(Slapi_PBlock *pb, Slapi_DN * target_dn,
*/
slapi_pblock_get(pb, SLAPI_REQUESTOR_DN, &requestor_dn);
if (slapi_dn_isroot(requestor_dn)) {
-/*
+#ifdef DEBUG_CHAIN_ON_UPDATE
slapi_log_error(SLAPI_LOG_REPL, repl_plugin_name, "repl_chain_on_update: conn=%d op=%d requestor "
"is root: using local backend\n", connid, opid);
-*/
+#endif
return local_backend;
}
@@ -915,10 +923,10 @@ repl_chain_on_update(Slapi_PBlock *pb, Slapi_DN * target_dn,
*/
slapi_pblock_get(pb, SLAPI_IS_REPLICATED_OPERATION, &repl_op);
if (repl_op) {
-/*
+#ifdef DEBUG_CHAIN_ON_UPDATE
slapi_log_error(SLAPI_LOG_REPL, repl_plugin_name, "repl_chain_on_update: conn=%d op=%d op is "
"replicated: using local backend\n", connid, opid);
-*/
+#endif
return local_backend;
}
@@ -926,10 +934,10 @@ repl_chain_on_update(Slapi_PBlock *pb, Slapi_DN * target_dn,
* or any normal non replicated client operation while local is disabled (import) :
* use the chaining backend
*/
-/*
+#ifdef DEBUG_CHAIN_ON_UPDATE
slapi_log_error(SLAPI_LOG_REPL, repl_plugin_name, "repl_chain_on_update: conn=%d op=%d using "
"chaining backend\n", connid, opid);
-*/
+#endif
return chaining_backend;
}
| 0 |
066a7b49df9c9addfa97e115f483a4f7d5f37c02
|
389ds/389-ds-base
|
Issue 50260 - Fix test according to #51222 fix
Description: Managed Entry plugin behaviour was fixed and
returned codes were cleaned up. Now we allow to continue
modrdn and delete managing entry operations execution
even when managed entry doesn't exists.
Also allow 'cn=directory manager' to delete managed entry
on direct update.
Make the updates fail using another way.
https://pagure.io/389-ds-base/issue/50260
https://pagure.io/389-ds-base/issue/51222
Reviewed by: mreynolds (Thanks!)
|
commit 066a7b49df9c9addfa97e115f483a4f7d5f37c02
Author: Simon Pichugin <[email protected]>
Date: Wed Aug 5 14:35:06 2020 +0200
Issue 50260 - Fix test according to #51222 fix
Description: Managed Entry plugin behaviour was fixed and
returned codes were cleaned up. Now we allow to continue
modrdn and delete managing entry operations execution
even when managed entry doesn't exists.
Also allow 'cn=directory manager' to delete managed entry
on direct update.
Make the updates fail using another way.
https://pagure.io/389-ds-base/issue/50260
https://pagure.io/389-ds-base/issue/51222
Reviewed by: mreynolds (Thanks!)
diff --git a/dirsrvtests/tests/suites/betxns/betxn_test.py b/dirsrvtests/tests/suites/betxns/betxn_test.py
index aab2392bc..b5ca01047 100644
--- a/dirsrvtests/tests/suites/betxns/betxn_test.py
+++ b/dirsrvtests/tests/suites/betxns/betxn_test.py
@@ -18,12 +18,14 @@ from lib389.plugins import (SevenBitCheckPlugin, AttributeUniquenessPlugin,
from lib389.idm.user import UserAccounts, TEST_USER_PROPERTIES
from lib389.idm.organizationalunit import OrganizationalUnits
from lib389.idm.group import Groups, Group
+from lib389.idm.domain import Domain
from lib389._constants import DEFAULT_SUFFIX
pytestmark = pytest.mark.tier1
logging.getLogger(__name__).setLevel(logging.DEBUG)
log = logging.getLogger(__name__)
+USER_PASSWORD = 'password'
def test_betxt_7bit(topology_st):
@@ -253,6 +255,15 @@ def test_ri_and_mep_cache_corruption(topology_st):
5. Success
"""
+ # Add ACI so we can test that non-DM user can't delete managed entry
+ domain = Domain(topology_st.standalone, DEFAULT_SUFFIX)
+ ACI_TARGET = f"(target = \"ldap:///{DEFAULT_SUFFIX}\")"
+ ACI_TARGETATTR = "(targetattr = *)"
+ ACI_ALLOW = "(version 3.0; acl \"Admin Access\"; allow (all) "
+ ACI_SUBJECT = "(userdn = \"ldap:///anyone\");)"
+ ACI_BODY = ACI_TARGET + ACI_TARGETATTR + ACI_ALLOW + ACI_SUBJECT
+ domain.add('aci', ACI_BODY)
+
# Start plugins
topology_st.standalone.config.set('nsslapd-dynamic-plugins', 'on')
mep_plugin = ManagedEntriesPlugin(topology_st.standalone)
@@ -270,15 +281,15 @@ def test_ri_and_mep_cache_corruption(topology_st):
mep_template1 = mep_templates.create(properties={
'cn': 'MEP template',
'mepRDNAttr': 'cn',
- 'mepStaticAttr': 'objectclass: posixGroup|objectclass: extensibleObject'.split('|'),
+ 'mepStaticAttr': 'objectclass: groupOfNames|objectclass: extensibleObject'.split('|'),
'mepMappedAttr': 'cn: $cn|uid: $cn|gidNumber: $uidNumber'.split('|')
})
mep_configs = MEPConfigs(topology_st.standalone)
mep_configs.create(properties={'cn': 'config',
- 'originScope': ou_people.dn,
- 'originFilter': 'objectclass=posixAccount',
- 'managedBase': ou_groups.dn,
- 'managedTemplate': mep_template1.dn})
+ 'originScope': ou_people.dn,
+ 'originFilter': 'objectclass=posixAccount',
+ 'managedBase': ou_groups.dn,
+ 'managedTemplate': mep_template1.dn})
# Add an entry that meets the MEP scope
users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX,
@@ -291,6 +302,8 @@ def test_ri_and_mep_cache_corruption(topology_st):
'gidNumber': '20011',
'homeDirectory': '/home/test-user1'
})
+ user.reset_password(USER_PASSWORD)
+ user_bound_conn = user.bind(USER_PASSWORD)
# Add group
groups = Groups(topology_st.standalone, DEFAULT_SUFFIX)
@@ -304,22 +317,25 @@ def test_ri_and_mep_cache_corruption(topology_st):
# Test MEP be txn pre op failure does not corrupt entry cache
# Should get the same exception for both rename attempts
+ # Try to remove the entry while bound as Admin (non-DM)
+ managed_groups_user_conn = Groups(user_bound_conn, ou_groups.dn, rdn=None)
+ managed_entry_user_conn = managed_groups_user_conn.get(user.rdn)
with pytest.raises(ldap.UNWILLING_TO_PERFORM):
- mep_group.rename("cn=modrdn group")
-
+ managed_entry_user_conn.rename("cn=modrdn group")
with pytest.raises(ldap.UNWILLING_TO_PERFORM):
- mep_group.rename("cn=modrdn group")
+ managed_entry_user_conn.rename("cn=modrdn group")
# Mess with MEP so it fails
mep_plugin.disable()
- mep_group.delete()
+ users_mep_group = UserAccounts(topology_st.standalone, mep_group.dn, rdn=None)
+ users_mep_group.create_test_user(1001)
mep_plugin.enable()
# Add another group to verify entry cache is not corrupted
test_group = groups.create(properties={'cn': 'test_group'})
- # Delete user, should fail in MEP be txn post op, and user should still be a member
- with pytest.raises(ldap.NO_SUCH_OBJECT):
+ # Try to delete user - it fails because managed entry can't be deleted
+ with pytest.raises(ldap.NOT_ALLOWED_ON_NONLEAF):
user.delete()
# Verify membership is intact
| 0 |
553d7dd74884a0d8cb7b21974e008ef564805e38
|
389ds/389-ds-base
|
Issue 49204 - Need to update function declaration
Description: util_is_cachesize_sane had conflicting types
https://pagure.io/389-ds-base/issue/49204
Reviewed by: mreynolds (one line commit rule)
|
commit 553d7dd74884a0d8cb7b21974e008ef564805e38
Author: Mark Reynolds <[email protected]>
Date: Tue Apr 18 10:01:23 2017 -0400
Issue 49204 - Need to update function declaration
Description: util_is_cachesize_sane had conflicting types
https://pagure.io/389-ds-base/issue/49204
Reviewed by: mreynolds (one line commit rule)
diff --git a/ldap/servers/slapd/slapi-private.h b/ldap/servers/slapd/slapi-private.h
index 0c765809a..d9547d87c 100644
--- a/ldap/servers/slapd/slapi-private.h
+++ b/ldap/servers/slapd/slapi-private.h
@@ -1392,7 +1392,7 @@ typedef enum _util_cachesize_result {
* \return util_cachesize_result.
* \sa util_cachesize_result, spal_meminfo_get
*/
-util_cachesize_result util_is_cachesize_sane(slapi_pal_meminfo *mi, size_t *cachesize);
+util_cachesize_result util_is_cachesize_sane(slapi_pal_meminfo *mi, uint64_t *cachesize);
/**
* Retrieve the number of threads the server should run with based on this hardware.
| 0 |
de8fd7d0e596e4de885b4dda6bf5329469880c45
|
389ds/389-ds-base
|
Ticket 328 - make sure all internal search filters are properly escaped
Fix description:
Fixing double-free issues introduced with commit 3cf9a521fa43183c657a5dca507dec3a4379e383.
1) If ctx.buf is realloc'ed in filter_stuff_func, slapi_filter_sprintf
returns already freed buf. And the buf is freed by caller, again.
2) If filter escape occurs in get_substring_filter, freed val is
returned to caller via "struct slapi_filter *f" and it is freed
again.
|
commit de8fd7d0e596e4de885b4dda6bf5329469880c45
Author: Noriko Hosoi <[email protected]>
Date: Tue Aug 7 12:21:04 2012 -0700
Ticket 328 - make sure all internal search filters are properly escaped
Fix description:
Fixing double-free issues introduced with commit 3cf9a521fa43183c657a5dca507dec3a4379e383.
1) If ctx.buf is realloc'ed in filter_stuff_func, slapi_filter_sprintf
returns already freed buf. And the buf is freed by caller, again.
2) If filter escape occurs in get_substring_filter, freed val is
returned to caller via "struct slapi_filter *f" and it is freed
again.
diff --git a/ldap/servers/slapd/filter.c b/ldap/servers/slapd/filter.c
index 65e9de43a..2b3593689 100644
--- a/ldap/servers/slapd/filter.c
+++ b/ldap/servers/slapd/filter.c
@@ -516,6 +516,7 @@ get_substring_filter(
if(eval){
slapi_ch_free_string(&val);
val = eval;
+ f->f_sub_initial = val;
}
*fstr = slapi_ch_realloc( *fstr, strlen( *fstr ) +
strlen( val ) + 1 );
@@ -546,6 +547,7 @@ get_substring_filter(
if(eval){
slapi_ch_free_string(&val);
val = eval;
+ f->f_sub_final = val;
}
*fstr = slapi_ch_realloc( *fstr, strlen( *fstr ) +
strlen( val ) + 2 );
diff --git a/ldap/servers/slapd/util.c b/ldap/servers/slapd/util.c
index fc399e764..a70910fd0 100644
--- a/ldap/servers/slapd/util.c
+++ b/ldap/servers/slapd/util.c
@@ -431,7 +431,7 @@ slapi_filter_sprintf(const char *fmt, ...)
}
va_end(args);
- return buf;
+ return ctx.buf;
}
/*
| 0 |
8da07138e5011bf7f4fab53fe47152bc12b8239b
|
389ds/389-ds-base
|
510147 clean up the replication log msg
_cl5DBOpen removes a changelog db if there is no matching replica for the file.
The manner to remove the changelog db file was not good -- not using the API
that Berkeley DB provided, but removing it with NSPR delete function PR_Delete.
This fix replaces PR_Delete with the Berkeley DB API dbremove.
|
commit 8da07138e5011bf7f4fab53fe47152bc12b8239b
Author: Noriko Hosoi <[email protected]>
Date: Mon Jul 20 15:40:10 2009 -0700
510147 clean up the replication log msg
_cl5DBOpen removes a changelog db if there is no matching replica for the file.
The manner to remove the changelog db file was not good -- not using the API
that Berkeley DB provided, but removing it with NSPR delete function PR_Delete.
This fix replaces PR_Delete with the Berkeley DB API dbremove.
diff --git a/ldap/servers/plugins/replication/cl5_api.c b/ldap/servers/plugins/replication/cl5_api.c
index dfc5765d1..94b80fda2 100644
--- a/ldap/servers/plugins/replication/cl5_api.c
+++ b/ldap/servers/plugins/replication/cl5_api.c
@@ -2514,14 +2514,15 @@ static int _cl5DBOpen ()
slapi_log_error(SLAPI_LOG_REPL, repl_plugin_name_cl, "_cl5DBOpen: "
"file %s has no matching replica; removing\n", entry->name);
- PR_snprintf(fullpathname, MAXPATHLEN, "%s/%s", s_cl5Desc.dbDir, entry->name);
- if (PR_Delete(fullpathname) != PR_SUCCESS)
- {
- slapi_log_error(SLAPI_LOG_REPL, repl_plugin_name_cl, "_cl5DBOpen: "
- "failed to remove (%s) file; NSPR error - %d\n",
- entry->name, PR_GetError ());
-
- }
+ rc = s_cl5Desc.dbEnv->dbremove(s_cl5Desc.dbEnv,
+ 0, fullpathname, 0, 0);
+ if (rc != 0)
+ {
+ slapi_log_error(SLAPI_LOG_REPL, repl_plugin_name_cl,
+ "_cl5DBOpen: failed to remove (%s) file; "
+ "libdb error - %d (%s)\n",
+ fullpathname, rc, db_strerror(rc));
+ }
}
}
}
| 0 |
3845c2d1663eb1b570e09ad84eac70a48333d20a
|
389ds/389-ds-base
|
Resolves: 439628
Summary: Check for indirect memberships when removing memberOf attributes.
|
commit 3845c2d1663eb1b570e09ad84eac70a48333d20a
Author: Nathan Kinder <[email protected]>
Date: Mon Apr 21 17:45:15 2008 +0000
Resolves: 439628
Summary: Check for indirect memberships when removing memberOf attributes.
diff --git a/ldap/servers/plugins/memberof/memberof.c b/ldap/servers/plugins/memberof/memberof.c
index 2555045b4..c9341571b 100644
--- a/ldap/servers/plugins/memberof/memberof.c
+++ b/ldap/servers/plugins/memberof/memberof.c
@@ -87,7 +87,7 @@ static Slapi_Mutex *memberof_operation_lock = 0;
typedef struct _memberofstringll
{
- char *dn;
+ const char *dn;
void *next;
} memberofstringll;
@@ -135,8 +135,9 @@ static int memberof_is_legit_member(Slapi_PBlock *pb, char *group_dn,
static int memberof_del_dn_from_groups(Slapi_PBlock *pb, char *dn);
static int memberof_call_foreach_dn(Slapi_PBlock *pb, char *dn,
char *type, plugin_search_entry_callback callback, void *callback_data);
-static int memberof_is_group_member(Slapi_Value *groupdn, Slapi_Value *memberdn);
-static int memberof_test_membership(Slapi_PBlock *pb, char *dn);
+static int memberof_is_direct_member(Slapi_Value *groupdn, Slapi_Value *memberdn);
+static int memberof_is_member(Slapi_Value *groupdn, Slapi_Value *memberdn);
+static int memberof_test_membership(Slapi_PBlock *pb, char *group_dn);
static int memberof_test_membership_callback(Slapi_Entry *e, void *callback_data);
static int memberof_del_dn_type_callback(Slapi_Entry *e, void *callback_data);
static int memberof_replace_dn_type_callback(Slapi_Entry *e, void *callback_data);
@@ -378,6 +379,12 @@ int memberof_del_dn_type_callback(Slapi_Entry *e, void *callback_data)
return rc;
}
+/*
+ * Does a callback search of "type=dn" under the db suffix that "dn" is in.
+ * If "dn" is a user, you'd want "type" to be "member". If "dn" is a group,
+ * you could want type to be either "member" or "memberOf" depending on the
+ * case.
+ */
int memberof_call_foreach_dn(Slapi_PBlock *pb, char *dn,
char *type, plugin_search_entry_callback callback, void *callback_data)
{
@@ -399,7 +406,6 @@ int memberof_call_foreach_dn(Slapi_PBlock *pb, char *dn,
base_sdn = (Slapi_DN*)slapi_be_getsuffix(be,0);
}
-
if(base_sdn)
{
int filter_size =
@@ -804,6 +810,9 @@ int memberof_modop_one_replace_r(Slapi_PBlock *pb, int mod_op, char *group_dn,
Slapi_Entry *e = 0;
memberofstringll *ll = 0;
char *op_str = 0;
+ Slapi_Value *to_dn_val = slapi_value_new_string(op_to);
+ Slapi_Value *this_dn_val = slapi_value_new_string(op_this);
+
/* determine if this is a group op or single entry */
op_to_sdn = slapi_sdn_new_dn_byref(op_to);
@@ -853,7 +862,6 @@ int memberof_modop_one_replace_r(Slapi_PBlock *pb, int mod_op, char *group_dn,
{
/* group */
Slapi_Value *ll_dn_val = 0;
- Slapi_Value *to_dn_val = slapi_value_new_string(op_to);
Slapi_Attr *members = 0;
ll = stack;
@@ -865,7 +873,6 @@ int memberof_modop_one_replace_r(Slapi_PBlock *pb, int mod_op, char *group_dn,
if(0 == memberof_compare(&ll_dn_val, &to_dn_val))
{
- slapi_value_free(&to_dn_val);
slapi_value_free(&ll_dn_val);
/* someone set up infinitely
@@ -882,8 +889,6 @@ int memberof_modop_one_replace_r(Slapi_PBlock *pb, int mod_op, char *group_dn,
ll = ll->next;
}
- slapi_value_free(&to_dn_val);
-
/* do op on group */
slapi_log_error( SLAPI_LOG_PLUGIN,
MEMBEROF_PLUGIN_SUBSYSTEM,
@@ -913,9 +918,6 @@ int memberof_modop_one_replace_r(Slapi_PBlock *pb, int mod_op, char *group_dn,
}
/* continue with operation */
{
- Slapi_Value *to_dn_val = slapi_value_new_string(op_to);
- Slapi_Value *this_dn_val = slapi_value_new_string(op_this);
-
/* We want to avoid listing a group as a memberOf itself
* in case someone set up a circular grouping.
*/
@@ -925,15 +927,12 @@ int memberof_modop_one_replace_r(Slapi_PBlock *pb, int mod_op, char *group_dn,
MEMBEROF_PLUGIN_SUBSYSTEM,
"memberof_modop_one_r: not processing memberOf "
"operations on self entry: %s\n", this_dn_val);
- slapi_value_free(&to_dn_val);
- slapi_value_free(&this_dn_val);
goto bail;
}
- /* We don't need the Slapi_Value copies of the DN's anymore */
- slapi_value_free(&to_dn_val);
- slapi_value_free(&this_dn_val);
-
+ /* We need to deal with delete cases separately. We may not
+ * want to remove a memberof attribute from an entry since
+ * it could still be a member in some other indirect manner. */
if(stack && LDAP_MOD_DELETE == mod_op)
{
if(memberof_is_legit_member(pb, group_dn,
@@ -948,49 +947,64 @@ int memberof_modop_one_replace_r(Slapi_PBlock *pb, int mod_op, char *group_dn,
}
}
- /* single entry - do mod */
- mod_pb = slapi_pblock_new();
+ /* Check if the entry is still an indirect member. If it is, we
+ * don't want to remove the memberOf value. */
+ if((LDAP_MOD_DELETE != mod_op) || (0 == memberof_is_member(this_dn_val, to_dn_val))) {
+ /* If we're about to add a memberOf value to an entry, we should first check
+ * if the value already exists. */
+ if((LDAP_MOD_ADD == mod_op) && (slapi_entry_attr_has_syntax_value(e,
+ MEMBEROF_ATTR, this_dn_val)))
+ {
+ slapi_log_error( SLAPI_LOG_PLUGIN, MEMBEROF_PLUGIN_SUBSYSTEM,
+ "memberof_modop_one_r: memberOf value %s already exists in "
+ "entry %s\n", op_this, op_to);
+ goto bail;
+ }
- mods[0] = &mod;
- if(LDAP_MOD_REPLACE == mod_op)
- {
- mods[1] = &replace_mod;
- mods[2] = 0;
- }
- else
- {
- mods[1] = 0;
- }
+ /* single entry - do mod */
+ mod_pb = slapi_pblock_new();
- val[0] = op_this;
- val[1] = 0;
+ mods[0] = &mod;
+ if(LDAP_MOD_REPLACE == mod_op)
+ {
+ mods[1] = &replace_mod;
+ mods[2] = 0;
+ }
+ else
+ {
+ mods[1] = 0;
+ }
- mod.mod_op = LDAP_MOD_REPLACE == mod_op?LDAP_MOD_DELETE:mod_op;
- mod.mod_type = MEMBEROF_ATTR;
- mod.mod_values = val;
+ val[0] = op_this;
+ val[1] = 0;
- if(LDAP_MOD_REPLACE == mod_op)
- {
- replace_val[0] = replace_with;
- replace_val[1] = 0;
+ mod.mod_op = LDAP_MOD_REPLACE == mod_op?LDAP_MOD_DELETE:mod_op;
+ mod.mod_type = MEMBEROF_ATTR;
+ mod.mod_values = val;
- replace_mod.mod_op = LDAP_MOD_ADD;
- replace_mod.mod_type = MEMBEROF_ATTR;
- replace_mod.mod_values = replace_val;
- }
+ if(LDAP_MOD_REPLACE == mod_op)
+ {
+ replace_val[0] = replace_with;
+ replace_val[1] = 0;
- slapi_modify_internal_set_pb(
- mod_pb, op_to,
- mods, 0, 0,
- memberof_get_plugin_id(), 0);
+ replace_mod.mod_op = LDAP_MOD_ADD;
+ replace_mod.mod_type = MEMBEROF_ATTR;
+ replace_mod.mod_values = replace_val;
+ }
+
+ slapi_modify_internal_set_pb(
+ mod_pb, op_to,
+ mods, 0, 0,
+ memberof_get_plugin_id(), 0);
- slapi_modify_internal_pb(mod_pb);
+ slapi_modify_internal_pb(mod_pb);
- slapi_pblock_get(mod_pb,
- SLAPI_PLUGIN_INTOP_RESULT,
- &rc);
+ slapi_pblock_get(mod_pb,
+ SLAPI_PLUGIN_INTOP_RESULT,
+ &rc);
- slapi_pblock_destroy(mod_pb);
+ slapi_pblock_destroy(mod_pb);
+ }
if(LDAP_MOD_DELETE == mod_op)
{
@@ -1010,6 +1024,8 @@ int memberof_modop_one_replace_r(Slapi_PBlock *pb, int mod_op, char *group_dn,
}
bail:
+ slapi_value_free(&to_dn_val);
+ slapi_value_free(&this_dn_val);
slapi_entry_free(e);
return rc;
}
@@ -1284,11 +1300,12 @@ int memberof_add_groups_search_callback(Slapi_Entry *e, void *callback_data)
((memberof_add_groups*)callback_data)->target_dn);
}
-/* memberof_is_group_member()
- * tests membership of memberdn in group groupdn
+/* memberof_is_direct_member()
+ *
+ * tests for direct membership of memberdn in group groupdn
* returns non-zero when true, zero otherwise
*/
-int memberof_is_group_member(Slapi_Value *groupdn, Slapi_Value *memberdn)
+int memberof_is_direct_member(Slapi_Value *groupdn, Slapi_Value *memberdn)
{
int rc = 0;
Slapi_DN *sdn = 0;
@@ -1316,9 +1333,164 @@ int memberof_is_group_member(Slapi_Value *groupdn, Slapi_Value *memberdn)
return rc;
}
+/* memberof_is_member()
+ *
+ * tests for membership of memberdn in group groupdn. This
+ * will check for both direct and indirect membership.
+ * returns non-zero when true, zero otherwise
+ */
+int memberof_is_member(Slapi_Value *groupdn, Slapi_Value *memberdn)
+{
+ memberofstringll *stack = 0;
+
+ /* Do a quick check to see if the entry is a direct
+ * member before tracing through nested groups. */
+ if(memberof_is_direct_member(groupdn, memberdn))
+ {
+ /* entry is a direct member */
+ return 1;
+ }
+
+ return memberof_is_member_r(groupdn, memberdn, stack);
+}
+
+/* memberof_is_member_r()
+ *
+ * Recursive function to do the work for the memberof_is_member()
+ * function. This will basically check if "memberdn" is a member
+ * of the group represented by "groupdn". Only "member" attribute
+ * values will be used to make this determination, not "memberOf"
+ * attribute values.
+ *
+ * returns non-zero when true, zero otherwise
+ */
+int memberof_is_member_r(Slapi_Value *groupdn, Slapi_Value *memberdn, memberofstringll *stack)
+{
+ Slapi_DN *member_sdn = 0;
+ Slapi_DN *base_sdn = 0;
+ Slapi_PBlock *search_pb = slapi_pblock_new();
+ Slapi_Backend *be = 0;
+ Slapi_Value *ll_dn_val = 0;
+ memberofstringll *ll = stack;
+ char *filter_str = 0;
+ int rc = 0;
+
+ /* Check if we've processed memberdn already to detect looped
+ * groupings. We want to do this right away to avoid any
+ * unecessary processing. */
+ while(ll)
+ {
+ ll_dn_val = slapi_value_new_string(ll->dn);
+
+ if(0 == memberof_compare(&ll_dn_val, &memberdn))
+ {
+ slapi_value_free(&ll_dn_val);
+
+ /* someone set up infinitely
+ * recursive groups - bail out */
+ slapi_log_error( SLAPI_LOG_FATAL,
+ MEMBEROF_PLUGIN_SUBSYSTEM,
+ "memberof_is_member_r: group recursion"
+ " detected in %s\n"
+ ,slapi_value_get_string(memberdn));
+ goto bail;
+ }
+
+ slapi_value_free(&ll_dn_val);
+ ll = ll->next;
+ }
+
+ /* push memberdn onto the stack to detect loops */
+ ll = (memberofstringll*)slapi_ch_malloc(sizeof(memberofstringll));
+ ll->dn = slapi_value_get_string(memberdn);
+ ll->next = stack;
+
+ /* Find the backend suffix that memberdn is in so we can
+ * use it as a search base. */
+ member_sdn = slapi_sdn_new_dn_byref(slapi_value_get_string(memberdn));
+ be = slapi_be_select(member_sdn);
+ if(be)
+ {
+ base_sdn = (Slapi_DN*)slapi_be_getsuffix(be,0);
+ }
+
+ /* Do a search for "member=<memberdn>". Go through matches to
+ * see if it is our group. If not, search for "member=<matchdn>"
+ * and keep looping until we've exhausted it. */
+ if(base_sdn)
+ {
+ int filter_size =
+ (strlen(MEMBEROF_GROUP_ATTR) +
+ strlen(slapi_value_get_string(memberdn)) + 4); /* 4 for (=) + null */
+ filter_str = (char*)slapi_ch_malloc(filter_size);
+ sprintf(filter_str, "(%s=%s)", MEMBEROF_GROUP_ATTR, slapi_value_get_string(memberdn));
+ }
+
+ if(filter_str)
+ {
+ slapi_search_internal_set_pb(search_pb, slapi_sdn_get_dn(base_sdn),
+ LDAP_SCOPE_SUBTREE, filter_str, 0, 0,
+ 0, 0,
+ memberof_get_plugin_id(),
+ 0);
+
+ if(slapi_search_internal_pb(search_pb))
+ {
+ /* get result and log an error */
+ int res = 0;
+ slapi_pblock_get(search_pb, SLAPI_PLUGIN_INTOP_RESULT, &res);
+ slapi_log_error( SLAPI_LOG_FATAL, MEMBEROF_PLUGIN_SUBSYSTEM,
+ "memberof_is_member_r: error searching for groups: %d",
+ res);
+ goto bail;
+ } else {
+ Slapi_Entry **entries = NULL;
+ slapi_pblock_get(search_pb, SLAPI_PLUGIN_INTOP_SEARCH_ENTRIES, &entries);
+ if ( NULL != entries && NULL != entries[0])
+ {
+ int i;
+
+ for(i = 0; entries[i] != NULL; i++)
+ {
+ /* Iterate through the matches checking if the dn is our groupdn. */
+ if(strcasecmp(slapi_entry_get_ndn(entries[i]), slapi_value_get_string(groupdn)) == 0)
+ {
+ /* This is the group we've been searching for, so
+ * set rc and bail. */
+ rc = 1;
+ break;
+ } else {
+ /* This is not the group you're looking for...
+ * Find all of the groups that this group is a member of to
+ * see if any of them are the group we are trying to find.
+ * We do this by doing a recursive call on this function. */
+ Slapi_Value *entrydn = slapi_value_new_string(slapi_entry_get_ndn(entries[i]));
+ rc = memberof_is_member_r(groupdn, entrydn, ll);
+ slapi_value_free(&entrydn);
+ }
+ }
+ }
+ }
+ }
+
+ bail:
+ slapi_ch_free((void **)&ll);
+ slapi_ch_free_string(&filter_str);
+ slapi_sdn_free(&member_sdn);
+ slapi_free_search_results_internal(search_pb);
+ slapi_pblock_destroy(search_pb);
+
+ return rc;
+}
+
/* memberof_test_membership()
+ *
+ * Finds all entries who are a "memberOf" the group
+ * represented by "group_dn". For each matching entry, we
+ * call memberof_test_membership_callback().
+ *
* for each attribute in the memberof attribute
- * determine if the entry is still a member
+ * determine if the entry is still a member.
*
* test each for direct membership
* move groups entry is memberof to member group
@@ -1326,12 +1498,19 @@ int memberof_is_group_member(Slapi_Value *groupdn, Slapi_Value *memberdn)
* iterate until a pass fails to move a group over to member groups
* remaining groups should be deleted
*/
-int memberof_test_membership(Slapi_PBlock *pb, char *dn)
+int memberof_test_membership(Slapi_PBlock *pb, char *group_dn)
{
- return memberof_call_foreach_dn(pb, dn, MEMBEROF_ATTR,
+ return memberof_call_foreach_dn(pb, group_dn, MEMBEROF_ATTR,
memberof_test_membership_callback ,0);
}
+/*
+ * memberof_test_membership_callback()
+ *
+ * A callback function to do the work of memberof_test_membership().
+ * Note that this not only tests membership, but updates the memberOf
+ * attributes in the entry to be correct.
+ */
int memberof_test_membership_callback(Slapi_Entry *e, void *callback_data)
{
int rc = 0;
@@ -1375,8 +1554,8 @@ int memberof_test_membership_callback(Slapi_Entry *e, void *callback_data)
while(val)
{
- /* test for membership */
- if(memberof_is_group_member(val, entry_dn))
+ /* test for direct membership */
+ if(memberof_is_direct_member(val, entry_dn))
{
/* it is a member */
member_array[m_index] = val;
@@ -1401,12 +1580,19 @@ int memberof_test_membership_callback(Slapi_Entry *e, void *callback_data)
{
member_found = 0;
+ /* For each group that this entry is a verified member of, see if
+ * any of the candidate groups are members. If they are, add them
+ * to the list of verified groups that this entry is a member of.
+ */
while(outer_index < m_index)
{
int inner_index = 0;
while(inner_index < c_index)
{
+ /* Check for a special value in this position
+ * that indicates that the candidate was moved
+ * to the member array. */
if((void*)1 ==
candidate_array[inner_index])
{
@@ -1415,7 +1601,7 @@ int memberof_test_membership_callback(Slapi_Entry *e, void *callback_data)
continue;
}
- if(memberof_is_group_member(
+ if(memberof_is_direct_member(
candidate_array[inner_index],
member_array[outer_index]))
{
@@ -1443,6 +1629,9 @@ int memberof_test_membership_callback(Slapi_Entry *e, void *callback_data)
outer_index = 0;
while(outer_index < c_index)
{
+ /* Check for a special value in this position
+ * that indicates that the candidate was moved
+ * to the member array. */
if((void*)1 == candidate_array[outer_index])
{
/* item moved, skip */
@@ -1708,7 +1897,7 @@ int memberof_is_legit_member(Slapi_PBlock *pb, char *group_dn,
Slapi_Attr *memberof = 0;
Slapi_Value *memberdn = 0;
int hint = 0;
- char *delete_group_dn = 0;
+ const char *delete_group_dn = 0;
slapi_log_error( SLAPI_LOG_TRACE, MEMBEROF_PLUGIN_SUBSYSTEM,
"--> memberof_is_legit_member\n" );
| 0 |
d317718a124d2759eafbaa761914cfc9b8ffdf8b
|
389ds/389-ds-base
|
Bug 697027 - 7 - minor memory leaks found by Valgrind + TET
https://bugzilla.redhat.com/show_bug.cgi?id=697027
[Case 7]
Description: Adding slapi_sdn_done to free memories held in sdn.
|
commit d317718a124d2759eafbaa761914cfc9b8ffdf8b
Author: Noriko Hosoi <[email protected]>
Date: Fri Apr 15 13:10:01 2011 -0700
Bug 697027 - 7 - minor memory leaks found by Valgrind + TET
https://bugzilla.redhat.com/show_bug.cgi?id=697027
[Case 7]
Description: Adding slapi_sdn_done to free memories held in sdn.
diff --git a/ldap/servers/slapd/bind.c b/ldap/servers/slapd/bind.c
index 679cbff9e..9670de2bd 100644
--- a/ldap/servers/slapd/bind.c
+++ b/ldap/servers/slapd/bind.c
@@ -316,6 +316,7 @@ do_bind( Slapi_PBlock *pb )
{
auto_bind = 1; /* flag the bind method */
dn = slapi_ch_strdup(pb->pb_conn->c_dn);
+ slapi_sdn_done(&sdn);
slapi_sdn_init_dn_passin(&sdn,dn);
}
}
| 0 |
23a75834fbf3c67419cb52896544f80fb98e06c8
|
389ds/389-ds-base
|
Issue 4593 - Log an additional message if the server certificate nickname doesn't match nsSSLPersonalitySSL value
Description:
Added a test to check if additional message is present in the error log
if nsSSLPersonalitySSL value does not match the certificate nickname.
Also brought back changes to ldap/servers/slapd/back-ldbm/ldbm_attrcrypt.c,
because they were removed in commit 07b5a79a3a9ec9c6d5575f2a893fd48bdcdd3c81
Relates: https://github.com/389ds/389-ds-base/issues/4593
Reviewed by: @vashirov, @Firstyear, @droideck (Thanks!)
|
commit 23a75834fbf3c67419cb52896544f80fb98e06c8
Author: Barbora Simonova <[email protected]>
Date: Mon May 31 15:48:00 2021 +0200
Issue 4593 - Log an additional message if the server certificate nickname doesn't match nsSSLPersonalitySSL value
Description:
Added a test to check if additional message is present in the error log
if nsSSLPersonalitySSL value does not match the certificate nickname.
Also brought back changes to ldap/servers/slapd/back-ldbm/ldbm_attrcrypt.c,
because they were removed in commit 07b5a79a3a9ec9c6d5575f2a893fd48bdcdd3c81
Relates: https://github.com/389ds/389-ds-base/issues/4593
Reviewed by: @vashirov, @Firstyear, @droideck (Thanks!)
diff --git a/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py b/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
index 9fec213ee..e7d2f2eb0 100644
--- a/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
+++ b/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
@@ -10,6 +10,7 @@ from decimal import *
import os
import logging
import pytest
+import subprocess
from lib389._mapped_object import DSLdapObject
from lib389.topologies import topology_st
from lib389.plugins import AutoMembershipPlugin, ReferentialIntegrityPlugin, AutoMembershipDefinitions
@@ -18,6 +19,7 @@ from lib389.idm.group import Groups
from lib389.idm.organizationalunit import OrganizationalUnits
from lib389._constants import DEFAULT_SUFFIX, LOG_ACCESS_LEVEL, PASSWORD
from lib389.utils import ds_is_older, ds_is_newer
+from lib389.config import RSA
import ldap
import glob
import re
@@ -1114,6 +1116,42 @@ def test_enable_external_libs_debug_log(topology_st):
assert not standalone.ds_error_log.match('.*libldap/libber.*')
[email protected](ds_is_older('1.4.3'), reason="Might fail because of bug 1895460")
[email protected]
[email protected]
+def test_cert_personality_log_help(topology_st):
+ """Test changing the nsSSLPersonalitySSL attribute will raise help message in log
+
+ :id: d6f17f64-d784-438e-89b6-8595bdf6defb
+ :customerscenario: True
+ :setup: Standalone
+ :steps:
+ 1. Create instance
+ 2. Change nsSSLPersonalitySSL to wrong certificate nickname
+ 3. Check there is a help message in error log
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ """
+
+ WRONG_NICK = 'otherNick'
+ standalone = topology_st.standalone
+ standalone.enable_tls()
+
+ log.info('Change nsSSLPersonalitySSL to wrong certificate nickname')
+ config_RSA = RSA(standalone)
+ config_RSA.set('nsSSLPersonalitySSL', WRONG_NICK)
+
+ with pytest.raises(subprocess.CalledProcessError):
+ standalone.restart()
+
+ assert standalone.ds_error_log.match(r".*Please, make sure that nsSSLPersonalitySSL value "
+ r"is correctly set to the certificate from NSS database "
+ r"\(currently, nsSSLPersonalitySSL attribute "
+ r"is set to '{}'\)\..*".format(WRONG_NICK))
+
+
if __name__ == '__main__':
# Run isolated
# -s for DEBUG mode
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_attrcrypt.c b/ldap/servers/slapd/back-ldbm/ldbm_attrcrypt.c
index 9d399004b..93a70705a 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_attrcrypt.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_attrcrypt.c
@@ -350,6 +350,9 @@ attrcrypt_fetch_public_key(SECKEYPublicKey **public_key)
errorCode = PR_GetError();
slapi_log_err(SLAPI_LOG_ERR, "attrcrypt_fetch_public_key", "Can't find certificate %s: %d - %s\n",
cert_name, errorCode, slapd_pr_strerror(errorCode));
+ if (PR_FILE_NOT_FOUND_ERROR == errorCode) {
+ slapd_cert_not_found_error_help(cert_name);
+ }
}
if (cert != NULL) {
key = slapd_CERT_ExtractPublicKey(cert);
@@ -397,6 +400,9 @@ attrcrypt_fetch_private_key(SECKEYPrivateKey **private_key)
errorCode = PR_GetError();
slapi_log_err(SLAPI_LOG_ERR, "attrcrypt_fetch_private_key", "Can't find certificate %s: %d - %s\n",
cert_name, errorCode, slapd_pr_strerror(errorCode));
+ if (PR_FILE_NOT_FOUND_ERROR == errorCode) {
+ slapd_cert_not_found_error_help(cert_name);
+ }
}
if (cert != NULL) {
key = slapd_get_unlocked_key_for_cert(cert, NULL);
| 0 |
457cab95d09b17539fc34029a73de2fe85f55321
|
389ds/389-ds-base
|
Issue 50955 - Fix memory leaks in chaining plugin(part 2)
Description: Fix compiler warning
relates: https://pagure.io/389-ds-base/issue/50955
Reviewed by: mreynolds(one line commit rule)
|
commit 457cab95d09b17539fc34029a73de2fe85f55321
Author: Mark Reynolds <[email protected]>
Date: Mon Mar 23 13:20:06 2020 -0400
Issue 50955 - Fix memory leaks in chaining plugin(part 2)
Description: Fix compiler warning
relates: https://pagure.io/389-ds-base/issue/50955
Reviewed by: mreynolds(one line commit rule)
diff --git a/ldap/servers/plugins/chainingdb/cb.h b/ldap/servers/plugins/chainingdb/cb.h
index 7927715cd..9af6dab64 100644
--- a/ldap/servers/plugins/chainingdb/cb.h
+++ b/ldap/servers/plugins/chainingdb/cb.h
@@ -435,7 +435,7 @@ void cb_send_ldap_result(Slapi_PBlock *pb, int err, char *m, char *t, int ne, st
void cb_stale_all_connections(cb_backend_instance *be);
int cb_config_add_instance_check_callback(Slapi_PBlock *pb, Slapi_Entry *entryBefore, Slapi_Entry *e, int *returncode, char *returntext, void *arg);
int cb_instance_modify_config_check_callback(Slapi_PBlock *pb, Slapi_Entry *entryBefore, Slapi_Entry *e, int *returncode, char *returntext, void *arg);
-
+void cb_instance_free(cb_backend_instance *inst);
int chaining_back_add(Slapi_PBlock *pb);
int chaining_back_delete(Slapi_PBlock *pb);
| 0 |
790e723e4f30b3d245535ce7a9d5d00477878341
|
389ds/389-ds-base
|
Ticket #48967 - passwordMinAge attribute doesn't limit the minimum age of the password
Description: There was a logic error in check_pw_minage. Password-
MinAge was ignored by the error. This patch fixes the logic error.
https://fedorahosted.org/389/ticket/48967
Reviewed and tested by [email protected] and [email protected].
(Thank you, William and Simon!)
|
commit 790e723e4f30b3d245535ce7a9d5d00477878341
Author: Noriko Hosoi <[email protected]>
Date: Tue Aug 23 14:18:32 2016 -0700
Ticket #48967 - passwordMinAge attribute doesn't limit the minimum age of the password
Description: There was a logic error in check_pw_minage. Password-
MinAge was ignored by the error. This patch fixes the logic error.
https://fedorahosted.org/389/ticket/48967
Reviewed and tested by [email protected] and [email protected].
(Thank you, William and Simon!)
diff --git a/ldap/servers/slapd/pw.c b/ldap/servers/slapd/pw.c
index 6b865ece8..7469b9e13 100644
--- a/ldap/servers/slapd/pw.c
+++ b/ldap/servers/slapd/pw.c
@@ -729,7 +729,7 @@ check_pw_minage ( Slapi_PBlock *pb, const Slapi_DN *sdn, struct berval **vals)
pwpolicy = new_passwdPolicy(pb, dn);
slapi_pblock_get ( pb, SLAPI_PWPOLICY, &pwresponse_req );
- if (!pb->pb_op->o_isroot && !pwpolicy->pw_minage) {
+ if (!pb->pb_op->o_isroot && pwpolicy->pw_minage) {
Slapi_Entry *e;
char *passwordAllowChangeTime;
| 0 |
660629d99dd270dc26472c10d3e9f7e2dea899b5
|
389ds/389-ds-base
|
Bug 630094 - (cov#15520) Fix unreachable code issue if perfctrs code
The final frees of priv->memory and priv will never be reached since
the function returns prior to these calls. It looks as if an
"error:" label was removed at some point, as the WIN32 code in this
function has goto statements using that label, but the label is not
defined.
The fix is to add the "error:" label in ifdef blocks for WIN32 that
calls the free of priv. The free of priv->memory is not necessary
since WIN32 doesn't use it and non-WIN32 builds don't use the error
label at all.
|
commit 660629d99dd270dc26472c10d3e9f7e2dea899b5
Author: Nathan Kinder <[email protected]>
Date: Thu Sep 9 08:25:23 2010 -0700
Bug 630094 - (cov#15520) Fix unreachable code issue if perfctrs code
The final frees of priv->memory and priv will never be reached since
the function returns prior to these calls. It looks as if an
"error:" label was removed at some point, as the WIN32 code in this
function has goto statements using that label, but the label is not
defined.
The fix is to add the "error:" label in ifdef blocks for WIN32 that
calls the free of priv. The free of priv->memory is not necessary
since WIN32 doesn't use it and non-WIN32 builds don't use the error
label at all.
diff --git a/ldap/servers/slapd/back-ldbm/perfctrs.c b/ldap/servers/slapd/back-ldbm/perfctrs.c
index bb5ba00fe..27efb7ddf 100644
--- a/ldap/servers/slapd/back-ldbm/perfctrs.c
+++ b/ldap/servers/slapd/back-ldbm/perfctrs.c
@@ -211,10 +211,11 @@ void perfctrs_init(struct ldbminfo *li, perfctrs_private **ret_priv)
*ret_priv = priv;
return;
-#if !defined(_WIN32)
- if (priv) slapi_ch_free((void**)&priv->memory);
-#endif
+#if defined(_WIN32)
+error:
slapi_ch_free((void**)&priv);
+ return;
+#endif
}
/* Terminate perf ctrs */
| 0 |
92b319a832c74288b21aff2bfbd57abdae0e514c
|
389ds/389-ds-base
|
Ticket 48832 - Add DirSrvTools.getLocalhost() function
Description: Some tests require us to know the exact localhost value
that is on the first place after 127.0.0.1.
(some Directory Server attributes is sensible at this matter)
https://fedorahosted.org/389/ticket/48832
Reviewed by: mreynolds (Thanks!)
|
commit 92b319a832c74288b21aff2bfbd57abdae0e514c
Author: Simon Pichugin <[email protected]>
Date: Fri Jul 29 17:34:48 2016 +0200
Ticket 48832 - Add DirSrvTools.getLocalhost() function
Description: Some tests require us to know the exact localhost value
that is on the first place after 127.0.0.1.
(some Directory Server attributes is sensible at this matter)
https://fedorahosted.org/389/ticket/48832
Reviewed by: mreynolds (Thanks!)
diff --git a/src/lib389/lib389/tools.py b/src/lib389/lib389/tools.py
index 59649c882..351714304 100644
--- a/src/lib389/lib389/tools.py
+++ b/src/lib389/lib389/tools.py
@@ -1029,6 +1029,19 @@ class DirSrvTools(object):
"Error: /etc/hosts does not contain '%s' as first host for %s"
% (expectedHost, ipPattern))
+ @staticmethod
+ def getLocalhost():
+ """Get the first host value after 127.0.0.1
+ from /etc/hosts file
+ """
+
+ with open('/etc/hosts', 'r') as f:
+ for line in f.readlines():
+ if line.startswith('127.0.0.1'):
+ localhost = line.split()[1]
+ return localhost
+ return None
+
@staticmethod
def testLocalhost():
'''
| 0 |
11ddce5b98f70173ab2d437c1467e4e784ce6afa
|
389ds/389-ds-base
|
Bug 611790 - fix coverify Defect Type: Null pointer dereferences issues 11940 - 12166
https://bugzilla.redhat.com/show_bug.cgi?id=611790
Resolves: bug 611790
Bug description: Fix coverify Defect Type: Null pointer dereferences issues 11940 - 12166
Fix description: Catch possible NULL pointer in sendGetReq(), sendPostReq(), and parseAtPath().
|
commit 11ddce5b98f70173ab2d437c1467e4e784ce6afa
Author: Endi S. Dewata <[email protected]>
Date: Tue Jul 6 11:55:22 2010 -0500
Bug 611790 - fix coverify Defect Type: Null pointer dereferences issues 11940 - 12166
https://bugzilla.redhat.com/show_bug.cgi?id=611790
Resolves: bug 611790
Bug description: Fix coverify Defect Type: Null pointer dereferences issues 11940 - 12166
Fix description: Catch possible NULL pointer in sendGetReq(), sendPostReq(), and parseAtPath().
diff --git a/ldap/servers/plugins/http/http_impl.c b/ldap/servers/plugins/http/http_impl.c
index c9e82be31..f96d75ad1 100644
--- a/ldap/servers/plugins/http/http_impl.c
+++ b/ldap/servers/plugins/http/http_impl.c
@@ -605,6 +605,10 @@ static PRStatus sendGetReq(PRFileDesc *fd, const char *path)
int buflen = (HTTP_GET_STD_LEN + strlen(path));
reqBUF = (char *)PR_Calloc(1, buflen);
+ if (!reqBUF) {
+ status = PR_FAILURE;
+ goto out;
+ }
strcpy(reqBUF, HTTP_GET);
strcat(reqBUF, " ");
@@ -615,7 +619,7 @@ static PRStatus sendGetReq(PRFileDesc *fd, const char *path)
http_connection_time_out = httpConfig->connectionTimeOut;
status = sendFullData( fd, reqBUF, http_connection_time_out);
-
+out:
if (reqBUF) {
PR_Free(reqBUF);
reqBUF = 0;
@@ -651,10 +655,10 @@ static PRStatus sendFullData( PRFileDesc *fd, char *buf, int timeOut)
static PRStatus sendPostReq(PRFileDesc *fd, const char *path, httpheader **httpheaderArray, char *body)
{
- PRStatus status = PR_SUCCESS;
+ PRStatus status = PR_SUCCESS;
char body_len_str[20];
- char *reqBUF = NULL;
- PRInt32 http_connection_time_out = 0;
+ char *reqBUF = NULL;
+ PRInt32 http_connection_time_out = 0;
int i = 0;
int body_len, buflen = 0;
@@ -665,9 +669,9 @@ static PRStatus sendPostReq(PRFileDesc *fd, const char *path, httpheader **httph
}
PR_snprintf(body_len_str, sizeof(body_len_str), "%d", body_len);
- buflen = (HTTP_POST_STD_LEN + strlen(path) + body_len + strlen(body_len_str));
+ buflen = (HTTP_POST_STD_LEN + strlen(path) + body_len + strlen(body_len_str));
- for (i = 0; httpheaderArray[i] != NULL; i++) {
+ for (i = 0; httpheaderArray[i] != NULL; i++) {
if (httpheaderArray[i]->name != NULL)
{
@@ -676,22 +680,26 @@ static PRStatus sendPostReq(PRFileDesc *fd, const char *path, httpheader **httph
buflen += strlen(httpheaderArray[i]->value) + 2;
}
- }
+ }
- reqBUF = (char *)PR_Calloc(1, buflen);
-
- strcpy(reqBUF, HTTP_POST);
- strcat(reqBUF, " ");
- strcat(reqBUF, path);
- strcat(reqBUF, " ");
- strcat(reqBUF, HTTP_PROTOCOL);
- strcat(reqBUF, "\r\n");
- strcat(reqBUF, HTTP_CONTENT_LENGTH);
- strcat(reqBUF, " ");
- strcat(reqBUF, body_len_str);
- strcat(reqBUF, "\r\n");
- strcat(reqBUF, HTTP_CONTENT_TYPE_URL_ENCODED);
- strcat(reqBUF, "\r\n");
+ reqBUF = (char *)PR_Calloc(1, buflen);
+ if (!reqBUF) {
+ status = PR_FAILURE;
+ goto out;
+ }
+
+ strcpy(reqBUF, HTTP_POST);
+ strcat(reqBUF, " ");
+ strcat(reqBUF, path);
+ strcat(reqBUF, " ");
+ strcat(reqBUF, HTTP_PROTOCOL);
+ strcat(reqBUF, "\r\n");
+ strcat(reqBUF, HTTP_CONTENT_LENGTH);
+ strcat(reqBUF, " ");
+ strcat(reqBUF, body_len_str);
+ strcat(reqBUF, "\r\n");
+ strcat(reqBUF, HTTP_CONTENT_TYPE_URL_ENCODED);
+ strcat(reqBUF, "\r\n");
for (i = 0; httpheaderArray[i] != NULL; i++) {
@@ -704,22 +712,22 @@ static PRStatus sendPostReq(PRFileDesc *fd, const char *path, httpheader **httph
}
- strcat(reqBUF, "\r\n");
+ strcat(reqBUF, "\r\n");
if (body) {
strcat(reqBUF, body);
}
- strcat(reqBUF, "\0");
+ strcat(reqBUF, "\0");
LDAPDebug( LDAP_DEBUG_PLUGIN, "---------->reqBUF is %s \n",reqBUF,0,0);
- http_connection_time_out = httpConfig->connectionTimeOut;
+ http_connection_time_out = httpConfig->connectionTimeOut;
status = sendFullData( fd, reqBUF, http_connection_time_out);
-
- if (reqBUF) {
+out:
+ if (reqBUF) {
PR_Free(reqBUF);
reqBUF = 0;
- }
- return status;
+ }
+ return status;
}
@@ -871,19 +879,22 @@ static PRStatus parseAtPath(const char *url, char **path)
{
PRStatus status = PR_SUCCESS;
char *dir = "%s%s";
+
*path = (char *)PR_Calloc(1, strlen(dir) + strlen(url) + 2);
+ if (!*path) {
+ /* Error : HTTP_BAD_URL */
+ status = PR_FAILURE;
+ goto out;
+ }
- /* Just write the path and check for a starting / */
- if ('/' != *url) {
+ /* Just write the path and check for a starting / */
+ if ('/' != *url) {
sprintf(*path, dir, "/", url);
} else {
strcpy(*path, url);
}
- if (!*path) {
- /* Error : HTTP_BAD_URL */
- status = PR_FAILURE;
- }
- return status;
+out:
+ return status;
}
static void toLowerCase(char* str)
| 0 |
214c7a725c8f2872ede5905fa7ea7904976f060b
|
389ds/389-ds-base
|
Added sasl link to ldap-agent binary.
|
commit 214c7a725c8f2872ede5905fa7ea7904976f060b
Author: Nathan Kinder <[email protected]>
Date: Mon Aug 20 18:33:32 2007 +0000
Added sasl link to ldap-agent binary.
diff --git a/Makefile.am b/Makefile.am
index b0c943d8f..e4f231374 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -859,7 +859,7 @@ ldap_agent_bin_SOURCES = ldap/servers/snmp/main.c \
ldap/servers/slapd/agtmmap.c
ldap_agent_bin_CPPFLAGS = $(AM_CPPFLAGS) @netsnmp_inc@ @ldapsdk_inc@ @nss_inc@ @nspr_inc@
-ldap_agent_bin_LDADD = $(LDAPSDK_LINK) $(NSS_LINK) $(NSPR_LINK) $(NETSNMP_LINK)
+ldap_agent_bin_LDADD = $(LDAPSDK_LINK) $(SASL_LINK) $(NSS_LINK) $(NSPR_LINK) $(NETSNMP_LINK)
#------------------------
# ldclt
diff --git a/Makefile.in b/Makefile.in
index 403ad13f1..bb15f2088 100644
--- a/Makefile.in
+++ b/Makefile.in
@@ -561,7 +561,7 @@ am_ldap_agent_bin_OBJECTS = \
ldap_agent_bin_OBJECTS = $(am_ldap_agent_bin_OBJECTS)
ldap_agent_bin_DEPENDENCIES = $(am__DEPENDENCIES_1) \
$(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) \
- $(am__DEPENDENCIES_1)
+ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1)
am__ldclt_bin_SOURCES_DIST = ldap/servers/slapd/tools/ldaptool-sasl.c \
ldap/servers/slapd/tools/ldclt/data.c \
ldap/servers/slapd/tools/ldclt/ldapfct.c \
@@ -1752,7 +1752,7 @@ ldap_agent_bin_SOURCES = ldap/servers/snmp/main.c \
ldap/servers/slapd/agtmmap.c
ldap_agent_bin_CPPFLAGS = $(AM_CPPFLAGS) @netsnmp_inc@ @ldapsdk_inc@ @nss_inc@ @nspr_inc@
-ldap_agent_bin_LDADD = $(LDAPSDK_LINK) $(NSS_LINK) $(NSPR_LINK) $(NETSNMP_LINK)
+ldap_agent_bin_LDADD = $(LDAPSDK_LINK) $(SASL_LINK) $(NSS_LINK) $(NSPR_LINK) $(NETSNMP_LINK)
#------------------------
# ldclt
| 0 |
d559d4665b18702b51161a25737b62799d8ef430
|
389ds/389-ds-base
|
Ticket #584 - Existence of an entry is not checked when its password is to be deleted
Bug description: When attempting to delete a password from an
entry, a password syntax checking api check_pw_syntax_ext missed
a check if the target entry exists or not. Note: add and replace
checks it and handles the case correctly.
Fix description: In this patch the check is added to the delete
case, as well.
Reviewed by Rich (Thank you!!)
|
commit d559d4665b18702b51161a25737b62799d8ef430
Author: Noriko Hosoi <[email protected]>
Date: Wed Feb 13 14:13:56 2013 -0800
Ticket #584 - Existence of an entry is not checked when its password is to be deleted
Bug description: When attempting to delete a password from an
entry, a password syntax checking api check_pw_syntax_ext missed
a check if the target entry exists or not. Note: add and replace
checks it and handles the case correctly.
Fix description: In this patch the check is added to the delete
case, as well.
Reviewed by Rich (Thank you!!)
diff --git a/ldap/servers/slapd/pw.c b/ldap/servers/slapd/pw.c
index 671a230de..b95a19dbd 100644
--- a/ldap/servers/slapd/pw.c
+++ b/ldap/servers/slapd/pw.c
@@ -770,7 +770,7 @@ int
check_pw_syntax_ext ( Slapi_PBlock *pb, const Slapi_DN *sdn, Slapi_Value **vals,
char **old_pw, Slapi_Entry *e, int mod_op, Slapi_Mods *smods)
{
- Slapi_Attr *attr;
+ Slapi_Attr *attr;
int i, pwresponse_req = 0;
int is_replication = 0;
int internal_op = 0;
@@ -787,6 +787,12 @@ check_pw_syntax_ext ( Slapi_PBlock *pb, const Slapi_DN *sdn, Slapi_Value **vals,
* PASS == 0.
*/
if (LDAP_MOD_DELETE == (mod_op & LDAP_MOD_OP)) {
+ /* check if the entry exists or not */
+ e = get_entry(pb, dn);
+ if (e == NULL) {
+ return -1;
+ }
+ slapi_entry_free(e);
return 0;
}
if (NULL == vals) {
| 0 |
1eec89b8486f1e5d710f73e494557cab2670acd7
|
389ds/389-ds-base
|
189176 - Fixed aci for directory administrators group.
|
commit 1eec89b8486f1e5d710f73e494557cab2670acd7
Author: Nathan Kinder <[email protected]>
Date: Tue Apr 18 17:39:57 2006 +0000
189176 - Fixed aci for directory administrators group.
diff --git a/ldap/ldif/template.ldif b/ldap/ldif/template.ldif
index e0c54d0bd..a0ab7914d 100644
--- a/ldap/ldif/template.ldif
+++ b/ldap/ldif/template.ldif
@@ -44,7 +44,7 @@ aci: (targetattr ="*")(version 3.0;
cn=Configuration Administrators, ou=Groups, ou=TopologyManagement, o=NetscapeRoot");)
aci: (targetattr ="*")(version 3.0;
acl "Directory Administrators Group";allow (all) (groupdn = "ldap:///
- ou=Directory Administrators, %%%SUFFIX%%%");)
+ cn=Directory Administrators, %%%SUFFIX%%%");)
dn: cn=Directory Administrators, %%%SUFFIX%%%
objectClass: top
| 0 |
4beed0d0584c8b17d8b48a03320e46bd89aa5211
|
389ds/389-ds-base
|
Add Simple Paged Results
For more details, see the design doc at http://directory.fedoraproject.org/wiki/Simple_Paged_Results_Design
|
commit 4beed0d0584c8b17d8b48a03320e46bd89aa5211
Author: Noriko Hosoi <[email protected]>
Date: Fri May 15 16:10:32 2009 -0700
Add Simple Paged Results
For more details, see the design doc at http://directory.fedoraproject.org/wiki/Simple_Paged_Results_Design
diff --git a/Makefile.am b/Makefile.am
index 8a6139e1d..ffc27ddfd 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -509,6 +509,7 @@ libslapd_la_SOURCES = ldap/servers/slapd/add.c \
ldap/servers/slapd/objset.c \
ldap/servers/slapd/operation.c \
ldap/servers/slapd/opshared.c \
+ ldap/servers/slapd/pagedresults.c \
ldap/servers/slapd/pblock.c \
ldap/servers/slapd/plugin.c \
ldap/servers/slapd/plugin_acl.c \
@@ -532,6 +533,7 @@ libslapd_la_SOURCES = ldap/servers/slapd/add.c \
ldap/servers/slapd/slapi_counter.c \
ldap/servers/slapd/slapi2nspr.c \
ldap/servers/slapd/snmp_collator.c \
+ ldap/servers/slapd/sort.c \
ldap/servers/slapd/ssl.c \
ldap/servers/slapd/str2filter.c \
ldap/servers/slapd/subentry.c \
diff --git a/Makefile.in b/Makefile.in
index 42b0c56c0..04be11150 100644
--- a/Makefile.in
+++ b/Makefile.in
@@ -525,8 +525,8 @@ am__libslapd_la_SOURCES_DIST = ldap/servers/slapd/add.c \
ldap/servers/slapd/modutil.c ldap/servers/slapd/ntuserpin.c \
ldap/servers/slapd/object.c ldap/servers/slapd/objset.c \
ldap/servers/slapd/operation.c ldap/servers/slapd/opshared.c \
- ldap/servers/slapd/pblock.c ldap/servers/slapd/plugin.c \
- ldap/servers/slapd/plugin_acl.c \
+ ldap/servers/slapd/pagedresults.c ldap/servers/slapd/pblock.c \
+ ldap/servers/slapd/plugin.c ldap/servers/slapd/plugin_acl.c \
ldap/servers/slapd/plugin_internal_op.c \
ldap/servers/slapd/plugin_mr.c \
ldap/servers/slapd/plugin_role.c \
@@ -540,10 +540,11 @@ am__libslapd_la_SOURCES_DIST = ldap/servers/slapd/add.c \
ldap/servers/slapd/slapd_plhash.c \
ldap/servers/slapd/slapi_counter.c \
ldap/servers/slapd/slapi2nspr.c \
- ldap/servers/slapd/snmp_collator.c ldap/servers/slapd/ssl.c \
- ldap/servers/slapd/str2filter.c ldap/servers/slapd/subentry.c \
- ldap/servers/slapd/task.c ldap/servers/slapd/time.c \
- ldap/servers/slapd/uniqueid.c ldap/servers/slapd/uniqueidgen.c \
+ ldap/servers/slapd/snmp_collator.c ldap/servers/slapd/sort.c \
+ ldap/servers/slapd/ssl.c ldap/servers/slapd/str2filter.c \
+ ldap/servers/slapd/subentry.c ldap/servers/slapd/task.c \
+ ldap/servers/slapd/time.c ldap/servers/slapd/uniqueid.c \
+ ldap/servers/slapd/uniqueidgen.c \
ldap/servers/slapd/utf8compare.c ldap/servers/slapd/util.c \
ldap/servers/slapd/uuid.c ldap/servers/slapd/value.c \
ldap/servers/slapd/valueset.c ldap/servers/slapd/vattr.c \
@@ -603,6 +604,7 @@ am_libslapd_la_OBJECTS = ldap/servers/slapd/libslapd_la-add.lo \
ldap/servers/slapd/libslapd_la-objset.lo \
ldap/servers/slapd/libslapd_la-operation.lo \
ldap/servers/slapd/libslapd_la-opshared.lo \
+ ldap/servers/slapd/libslapd_la-pagedresults.lo \
ldap/servers/slapd/libslapd_la-pblock.lo \
ldap/servers/slapd/libslapd_la-plugin.lo \
ldap/servers/slapd/libslapd_la-plugin_acl.lo \
@@ -626,6 +628,7 @@ am_libslapd_la_OBJECTS = ldap/servers/slapd/libslapd_la-add.lo \
ldap/servers/slapd/libslapd_la-slapi_counter.lo \
ldap/servers/slapd/libslapd_la-slapi2nspr.lo \
ldap/servers/slapd/libslapd_la-snmp_collator.lo \
+ ldap/servers/slapd/libslapd_la-sort.lo \
ldap/servers/slapd/libslapd_la-ssl.lo \
ldap/servers/slapd/libslapd_la-str2filter.lo \
ldap/servers/slapd/libslapd_la-subentry.lo \
@@ -1541,8 +1544,8 @@ libslapd_la_SOURCES = ldap/servers/slapd/add.c \
ldap/servers/slapd/modutil.c ldap/servers/slapd/ntuserpin.c \
ldap/servers/slapd/object.c ldap/servers/slapd/objset.c \
ldap/servers/slapd/operation.c ldap/servers/slapd/opshared.c \
- ldap/servers/slapd/pblock.c ldap/servers/slapd/plugin.c \
- ldap/servers/slapd/plugin_acl.c \
+ ldap/servers/slapd/pagedresults.c ldap/servers/slapd/pblock.c \
+ ldap/servers/slapd/plugin.c ldap/servers/slapd/plugin_acl.c \
ldap/servers/slapd/plugin_internal_op.c \
ldap/servers/slapd/plugin_mr.c \
ldap/servers/slapd/plugin_role.c \
@@ -1556,10 +1559,11 @@ libslapd_la_SOURCES = ldap/servers/slapd/add.c \
ldap/servers/slapd/slapd_plhash.c \
ldap/servers/slapd/slapi_counter.c \
ldap/servers/slapd/slapi2nspr.c \
- ldap/servers/slapd/snmp_collator.c ldap/servers/slapd/ssl.c \
- ldap/servers/slapd/str2filter.c ldap/servers/slapd/subentry.c \
- ldap/servers/slapd/task.c ldap/servers/slapd/time.c \
- ldap/servers/slapd/uniqueid.c ldap/servers/slapd/uniqueidgen.c \
+ ldap/servers/slapd/snmp_collator.c ldap/servers/slapd/sort.c \
+ ldap/servers/slapd/ssl.c ldap/servers/slapd/str2filter.c \
+ ldap/servers/slapd/subentry.c ldap/servers/slapd/task.c \
+ ldap/servers/slapd/time.c ldap/servers/slapd/uniqueid.c \
+ ldap/servers/slapd/uniqueidgen.c \
ldap/servers/slapd/utf8compare.c ldap/servers/slapd/util.c \
ldap/servers/slapd/uuid.c ldap/servers/slapd/value.c \
ldap/servers/slapd/valueset.c ldap/servers/slapd/vattr.c \
@@ -3475,6 +3479,9 @@ ldap/servers/slapd/libslapd_la-operation.lo: \
ldap/servers/slapd/libslapd_la-opshared.lo: \
ldap/servers/slapd/$(am__dirstamp) \
ldap/servers/slapd/$(DEPDIR)/$(am__dirstamp)
+ldap/servers/slapd/libslapd_la-pagedresults.lo: \
+ ldap/servers/slapd/$(am__dirstamp) \
+ ldap/servers/slapd/$(DEPDIR)/$(am__dirstamp)
ldap/servers/slapd/libslapd_la-pblock.lo: \
ldap/servers/slapd/$(am__dirstamp) \
ldap/servers/slapd/$(DEPDIR)/$(am__dirstamp)
@@ -3544,6 +3551,9 @@ ldap/servers/slapd/libslapd_la-slapi2nspr.lo: \
ldap/servers/slapd/libslapd_la-snmp_collator.lo: \
ldap/servers/slapd/$(am__dirstamp) \
ldap/servers/slapd/$(DEPDIR)/$(am__dirstamp)
+ldap/servers/slapd/libslapd_la-sort.lo: \
+ ldap/servers/slapd/$(am__dirstamp) \
+ ldap/servers/slapd/$(DEPDIR)/$(am__dirstamp)
ldap/servers/slapd/libslapd_la-ssl.lo: \
ldap/servers/slapd/$(am__dirstamp) \
ldap/servers/slapd/$(DEPDIR)/$(am__dirstamp)
@@ -4634,6 +4644,8 @@ mostlyclean-compile:
-rm -f ldap/servers/slapd/libslapd_la-operation.lo
-rm -f ldap/servers/slapd/libslapd_la-opshared.$(OBJEXT)
-rm -f ldap/servers/slapd/libslapd_la-opshared.lo
+ -rm -f ldap/servers/slapd/libslapd_la-pagedresults.$(OBJEXT)
+ -rm -f ldap/servers/slapd/libslapd_la-pagedresults.lo
-rm -f ldap/servers/slapd/libslapd_la-pblock.$(OBJEXT)
-rm -f ldap/servers/slapd/libslapd_la-pblock.lo
-rm -f ldap/servers/slapd/libslapd_la-plugin.$(OBJEXT)
@@ -4682,6 +4694,8 @@ mostlyclean-compile:
-rm -f ldap/servers/slapd/libslapd_la-slapi_counter_sunos_sparcv9.lo
-rm -f ldap/servers/slapd/libslapd_la-snmp_collator.$(OBJEXT)
-rm -f ldap/servers/slapd/libslapd_la-snmp_collator.lo
+ -rm -f ldap/servers/slapd/libslapd_la-sort.$(OBJEXT)
+ -rm -f ldap/servers/slapd/libslapd_la-sort.lo
-rm -f ldap/servers/slapd/libslapd_la-ssl.$(OBJEXT)
-rm -f ldap/servers/slapd/libslapd_la-ssl.lo
-rm -f ldap/servers/slapd/libslapd_la-str2filter.$(OBJEXT)
@@ -5111,6 +5125,7 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@ldap/servers/slapd/$(DEPDIR)/libslapd_la-objset.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@ldap/servers/slapd/$(DEPDIR)/libslapd_la-operation.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@ldap/servers/slapd/$(DEPDIR)/libslapd_la-opshared.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@ldap/servers/slapd/$(DEPDIR)/libslapd_la-pagedresults.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@ldap/servers/slapd/$(DEPDIR)/libslapd_la-pblock.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@ldap/servers/slapd/$(DEPDIR)/libslapd_la-plugin.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@ldap/servers/slapd/$(DEPDIR)/libslapd_la-plugin_acl.Plo@am__quote@
@@ -5135,6 +5150,7 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@ldap/servers/slapd/$(DEPDIR)/libslapd_la-slapi_counter.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@ldap/servers/slapd/$(DEPDIR)/libslapd_la-slapi_counter_sunos_sparcv9.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@ldap/servers/slapd/$(DEPDIR)/libslapd_la-snmp_collator.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@ldap/servers/slapd/$(DEPDIR)/libslapd_la-sort.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@ldap/servers/slapd/$(DEPDIR)/libslapd_la-ssl.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@ldap/servers/slapd/$(DEPDIR)/libslapd_la-str2filter.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@ldap/servers/slapd/$(DEPDIR)/libslapd_la-subentry.Plo@am__quote@
@@ -7390,6 +7406,13 @@ ldap/servers/slapd/libslapd_la-opshared.lo: ldap/servers/slapd/opshared.c
@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libslapd_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o ldap/servers/slapd/libslapd_la-opshared.lo `test -f 'ldap/servers/slapd/opshared.c' || echo '$(srcdir)/'`ldap/servers/slapd/opshared.c
+ldap/servers/slapd/libslapd_la-pagedresults.lo: ldap/servers/slapd/pagedresults.c
+@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libslapd_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT ldap/servers/slapd/libslapd_la-pagedresults.lo -MD -MP -MF ldap/servers/slapd/$(DEPDIR)/libslapd_la-pagedresults.Tpo -c -o ldap/servers/slapd/libslapd_la-pagedresults.lo `test -f 'ldap/servers/slapd/pagedresults.c' || echo '$(srcdir)/'`ldap/servers/slapd/pagedresults.c
+@am__fastdepCC_TRUE@ mv -f ldap/servers/slapd/$(DEPDIR)/libslapd_la-pagedresults.Tpo ldap/servers/slapd/$(DEPDIR)/libslapd_la-pagedresults.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='ldap/servers/slapd/pagedresults.c' object='ldap/servers/slapd/libslapd_la-pagedresults.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libslapd_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o ldap/servers/slapd/libslapd_la-pagedresults.lo `test -f 'ldap/servers/slapd/pagedresults.c' || echo '$(srcdir)/'`ldap/servers/slapd/pagedresults.c
+
ldap/servers/slapd/libslapd_la-pblock.lo: ldap/servers/slapd/pblock.c
@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libslapd_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT ldap/servers/slapd/libslapd_la-pblock.lo -MD -MP -MF ldap/servers/slapd/$(DEPDIR)/libslapd_la-pblock.Tpo -c -o ldap/servers/slapd/libslapd_la-pblock.lo `test -f 'ldap/servers/slapd/pblock.c' || echo '$(srcdir)/'`ldap/servers/slapd/pblock.c
@am__fastdepCC_TRUE@ mv -f ldap/servers/slapd/$(DEPDIR)/libslapd_la-pblock.Tpo ldap/servers/slapd/$(DEPDIR)/libslapd_la-pblock.Plo
@@ -7551,6 +7574,13 @@ ldap/servers/slapd/libslapd_la-snmp_collator.lo: ldap/servers/slapd/snmp_collato
@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libslapd_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o ldap/servers/slapd/libslapd_la-snmp_collator.lo `test -f 'ldap/servers/slapd/snmp_collator.c' || echo '$(srcdir)/'`ldap/servers/slapd/snmp_collator.c
+ldap/servers/slapd/libslapd_la-sort.lo: ldap/servers/slapd/sort.c
+@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libslapd_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT ldap/servers/slapd/libslapd_la-sort.lo -MD -MP -MF ldap/servers/slapd/$(DEPDIR)/libslapd_la-sort.Tpo -c -o ldap/servers/slapd/libslapd_la-sort.lo `test -f 'ldap/servers/slapd/sort.c' || echo '$(srcdir)/'`ldap/servers/slapd/sort.c
+@am__fastdepCC_TRUE@ mv -f ldap/servers/slapd/$(DEPDIR)/libslapd_la-sort.Tpo ldap/servers/slapd/$(DEPDIR)/libslapd_la-sort.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='ldap/servers/slapd/sort.c' object='ldap/servers/slapd/libslapd_la-sort.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libslapd_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o ldap/servers/slapd/libslapd_la-sort.lo `test -f 'ldap/servers/slapd/sort.c' || echo '$(srcdir)/'`ldap/servers/slapd/sort.c
+
ldap/servers/slapd/libslapd_la-ssl.lo: ldap/servers/slapd/ssl.c
@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(libslapd_la_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT ldap/servers/slapd/libslapd_la-ssl.lo -MD -MP -MF ldap/servers/slapd/$(DEPDIR)/libslapd_la-ssl.Tpo -c -o ldap/servers/slapd/libslapd_la-ssl.lo `test -f 'ldap/servers/slapd/ssl.c' || echo '$(srcdir)/'`ldap/servers/slapd/ssl.c
@am__fastdepCC_TRUE@ mv -f ldap/servers/slapd/$(DEPDIR)/libslapd_la-ssl.Tpo ldap/servers/slapd/$(DEPDIR)/libslapd_la-ssl.Plo
diff --git a/configure b/configure
index b30f80125..599eea8aa 100755
--- a/configure
+++ b/configure
@@ -24061,9 +24061,9 @@ fi
# the default prefix - override with --prefix or --with-fhs
-brand=fedora
-capbrand=Fedora
-vendor="Fedora Project"
+brand=389
+capbrand=389
+vendor="389 Project"
# BEGIN COPYRIGHT BLOCK
# Copyright (C) 2006 Red Hat, Inc.
diff --git a/configure.ac b/configure.ac
index 57dd54d20..d083484a2 100644
--- a/configure.ac
+++ b/configure.ac
@@ -185,9 +185,9 @@ AM_CONDITIONAL(enable_presence,test "$enable_presence" = "yes")
# the default prefix - override with --prefix or --with-fhs
AC_PREFIX_DEFAULT([/opt/$PACKAGE_NAME])
-brand=fedora
-capbrand=Fedora
-vendor="Fedora Project"
+brand=389
+capbrand=389
+vendor="389 Project"
m4_include(m4/fhs.m4)
diff --git a/ldap/servers/slapd/back-ldbm/filterindex.c b/ldap/servers/slapd/back-ldbm/filterindex.c
index 413d01f40..d41829f86 100644
--- a/ldap/servers/slapd/back-ldbm/filterindex.c
+++ b/ldap/servers/slapd/back-ldbm/filterindex.c
@@ -782,16 +782,31 @@ list_candidates(
(idl_length(idl) <= FILTER_TEST_THRESHOLD))
break;
} else {
+ Slapi_Operation *operation;
+ struct ldbminfo *li;
+ slapi_pblock_get( pb, SLAPI_OPERATION, &operation );
+ slapi_pblock_get( pb, SLAPI_PLUGIN_PRIVATE, &li );
+
idl = idl_union( be, idl, tmp );
idl_free( tmp );
idl_free( tmp2 );
/* stop if we're already committed to an exhaustive
* search. :(
*/
+ /* PAGED RESULTS: if not Directory Manager, we strictly limit
+ * the idlist size by the lookthrough limit.
+ */
+ if (operation->o_flags & OP_FLAG_PAGED_RESULTS) {
+ int nids = IDL_NIDS(idl);
+ int lookthroughlimits = compute_lookthrough_limit( pb, li );
+ if ( lookthroughlimits > 0 && nids > lookthroughlimits ) {
+ idl_free( idl );
+ idl = idl_allids( be );
+ }
+ }
if (idl_is_allids(idl))
break;
}
-
}
LDAPDebug( LDAP_DEBUG_TRACE, "<= list_candidates %lu\n",
diff --git a/ldap/servers/slapd/back-ldbm/init.c b/ldap/servers/slapd/back-ldbm/init.c
index f6c00aa83..c32f9825a 100644
--- a/ldap/servers/slapd/back-ldbm/init.c
+++ b/ldap/servers/slapd/back-ldbm/init.c
@@ -182,6 +182,8 @@ ldbm_back_init( Slapi_PBlock *pb )
(void *) ldbm_back_next_search_entry_ext );
rc |= slapi_pblock_set( pb, SLAPI_PLUGIN_DB_ENTRY_RELEASE_FN,
(void *) ldbm_back_entry_release );
+ rc |= slapi_pblock_set( pb, SLAPI_PLUGIN_DB_SEARCH_RESULTS_RELEASE_FN,
+ (void *) ldbm_back_search_results_release );
rc |= slapi_pblock_set( pb, SLAPI_PLUGIN_DB_COMPARE_FN,
(void *) ldbm_back_compare );
rc |= slapi_pblock_set( pb, SLAPI_PLUGIN_DB_MODIFY_FN,
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_search.c b/ldap/servers/slapd/back-ldbm/ldbm_search.c
index 600a9c476..7d91d47ab 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_search.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_search.c
@@ -66,7 +66,7 @@ static int can_skip_filter_test( Slapi_PBlock *pb, struct slapi_filter *f,
#define ISLEGACY(be) (be?(be->be_instance_info?(((ldbm_instance *)be->be_instance_info)->inst_li?(((ldbm_instance *)be->be_instance_info)->inst_li->li_legacy_errcode):0):0):0)
-static int
+int
compute_lookthrough_limit( Slapi_PBlock *pb, struct ldbminfo *li )
{
Slapi_Connection *conn = NULL;
@@ -122,6 +122,8 @@ int ldbm_back_search_cleanup(Slapi_PBlock *pb, struct ldbminfo *li, sort_spec_th
back_search_result_set *sr = NULL;
slapi_pblock_get( pb, SLAPI_SEARCH_RESULT_SET, &sr );
if ( (NULL != sr) && (function_result != 0) ) {
+ /* in case paged results, clean up the conn */
+ pagedresults_set_search_result(pb->pb_conn, NULL);
delete_search_result_set(&sr);
}
}
@@ -214,6 +216,8 @@ ldbm_back_search( Slapi_PBlock *pb )
/* Badly formed SORT control */
return ldbm_back_search_cleanup(pb, li, sort_control, LDAP_PROTOCOL_ERROR, "Sort Control", SLAPI_FAIL_GENERAL, &basesdn, NULL);
}
+ /* set this operation includes the server side sorting */
+ operation->o_flags |= OP_FLAG_SERVER_SIDE_SORTING;
}
is_sorting_critical = is_sorting_critical_orig;
@@ -303,7 +307,7 @@ ldbm_back_search( Slapi_PBlock *pb )
vlv_make_response_control(pb, &vlv_response);
if (sort)
{
- make_sort_response_control(pb, LDAP_UNWILLING_TO_PERFORM, NULL);
+ sort_make_sort_response_control(pb, LDAP_UNWILLING_TO_PERFORM, NULL);
}
if (ISLEGACY(be))
{
@@ -327,7 +331,7 @@ ldbm_back_search( Slapi_PBlock *pb )
vlv_response.result = LDAP_UNWILLING_TO_PERFORM;
vlv_make_response_control(pb, &vlv_response);
}
- make_sort_response_control(pb, LDAP_UNWILLING_TO_PERFORM, NULL);
+ sort_make_sort_response_control(pb, LDAP_UNWILLING_TO_PERFORM, NULL);
return ldbm_back_search_cleanup(pb, li, sort_control,
LDAP_UNAVAILABLE_CRITICAL_EXTENSION, ctrlstr,
SLAPI_FAIL_GENERAL, &basesdn, &vlv_request_control);
@@ -341,7 +345,7 @@ ldbm_back_search( Slapi_PBlock *pb )
}
if (sort)
{
- make_sort_response_control(pb, LDAP_UNWILLING_TO_PERFORM, NULL);
+ sort_make_sort_response_control(pb, LDAP_UNWILLING_TO_PERFORM, NULL);
}
sort = 0;
virtual_list_view = 0;
@@ -385,39 +389,58 @@ ldbm_back_search( Slapi_PBlock *pb )
int vlv_rc;
/*
* Build a list of IDs for this entry and scope
- */
- if ((NULL != controls) && (sort)) {
- switch (vlv_search_build_candidate_list(pb, &basesdn, &vlv_rc, sort_control, (vlv ? &vlv_request_control : NULL), &candidates, &vlv_response_control)) {
+ */
+ if ((NULL != controls) && (sort) && (vlv)) {
+ /* This candidate list is for vlv, no need for sort only. */
+ switch (vlv_search_build_candidate_list(pb, &basesdn, &vlv_rc,
+ sort_control,
+ (vlv ? &vlv_request_control : NULL),
+ &candidates, &vlv_response_control)) {
case VLV_ACCESS_DENIED:
- return ldbm_back_search_cleanup(pb, li, sort_control, vlv_rc, "VLV Control", SLAPI_FAIL_GENERAL, &basesdn, &vlv_request_control);
-
+ return ldbm_back_search_cleanup(pb, li, sort_control,
+ vlv_rc, "VLV Control",
+ SLAPI_FAIL_GENERAL, &basesdn,
+ &vlv_request_control);
case VLV_BLD_LIST_FAILED:
- return ldbm_back_search_cleanup(pb, li, sort_control, vlv_response_control.result, NULL, SLAPI_FAIL_GENERAL, &basesdn, &vlv_request_control);
+ return ldbm_back_search_cleanup(pb, li, sort_control,
+ vlv_response_control.result,
+ NULL, SLAPI_FAIL_GENERAL,
+ &basesdn, &vlv_request_control);
case LDAP_SUCCESS:
/* Log to the access log the particulars of this sort request */
- /* Log message looks like this: SORT <key list useful for input to ldapsearch> <#candidates> | <unsortable> */
+ /* Log message looks like this: SORT <key list useful for input
+ * to ldapsearch> <#candidates> | <unsortable> */
sort_log_access(pb,sort_control,NULL);
/* Since a pre-computed index was found for the VLV Search then
- * the candidate list now contains exactly what should be returned.
- * There's no need to sort or trim the candidate list.
- *
- * However, the client will be expecting a Sort Response control
- */
- if (LDAP_SUCCESS != make_sort_response_control( pb, 0, NULL ) )
+ * the candidate list now contains exactly what should be
+ * returned.
+ * There's no need to sort or trim the candidate list.
+ *
+ * However, the client will be expecting a Sort Response control
+ */
+ if (LDAP_SUCCESS !=
+ sort_make_sort_response_control( pb, 0, NULL ) )
{
- return ldbm_back_search_cleanup(pb, li, sort_control, LDAP_OPERATIONS_ERROR, "Sort Response Control", SLAPI_FAIL_GENERAL, &basesdn, &vlv_request_control);
+ return ldbm_back_search_cleanup(pb, li, sort_control,
+ LDAP_OPERATIONS_ERROR,
+ "Sort Response Control",
+ SLAPI_FAIL_GENERAL,
+ &basesdn,
+ &vlv_request_control);
}
}
}
- if(candidates==NULL)
+ if (candidates == NULL)
{
int rc = build_candidate_list(pb, be, e, base, scope,
- &lookup_returned_allids, &candidates);
+ &lookup_returned_allids, &candidates);
if (rc)
{
/* Error result sent by build_candidate_list */
- return ldbm_back_search_cleanup(pb, li, sort_control, -1, NULL, rc, &basesdn, &vlv_request_control);
+ return ldbm_back_search_cleanup(pb, li, sort_control, -1,
+ NULL, rc, &basesdn,
+ &vlv_request_control);
}
/*
* If we're sorting then we must check what administrative
@@ -434,7 +457,7 @@ ldbm_back_search( Slapi_PBlock *pb )
/*
* (tlimit==-1) means no time limit
*/
- time_up = ( tlimit==-1 ? -1 : optime + tlimit);
+ time_up = (tlimit==-1 ? -1 : optime + tlimit);
lookthrough_limit = compute_lookthrough_limit( pb, li );
}
@@ -445,19 +468,23 @@ ldbm_back_search( Slapi_PBlock *pb )
*/
if (virtual_list_view && (NULL != candidates))
{
- int r= 0;
- IDList *idl= NULL;
- Slapi_Filter *filter= NULL;
+ int r = 0;
+ IDList *idl = NULL;
+ Slapi_Filter *filter = NULL;
slapi_pblock_get( pb, SLAPI_SEARCH_FILTER, &filter );
- r= vlv_filter_candidates(be, pb, candidates, &basesdn, scope, filter, &idl, lookthrough_limit, time_up);
- if(r==0)
+ r = vlv_filter_candidates(be, pb, candidates, &basesdn,
+ scope, filter, &idl,
+ lookthrough_limit, time_up);
+ if(r == 0)
{
idl_free(candidates);
candidates= idl;
}
else
{
- return ldbm_back_search_cleanup(pb, li, sort_control, r, NULL, -1, &basesdn, &vlv_request_control);
+ return ldbm_back_search_cleanup(pb, li, sort_control,
+ r, NULL, -1, &basesdn,
+ &vlv_request_control);
}
}
/*
@@ -480,39 +507,50 @@ ldbm_back_search( Slapi_PBlock *pb )
/* Don't log internal operations */
if (!operation_is_flag_set(operation, OP_FLAG_INTERNAL)) {
- /* Log to the access log the particulars of this sort request */
- /* Log message looks like this: SORT <key list useful for input to ldapsearch> <#candidates> | <unsortable> */
+ /* Log to the access log the particulars of this
+ * sort request */
+ /* Log message looks like this: SORT <key list useful for
+ * input to ldapsearch> <#candidates> | <unsortable> */
sort_log_access(pb,sort_control,candidates);
}
- sort_return_value = sort_candidates( be, lookthrough_limit, time_up, pb, candidates, sort_control, &sort_error_type );
+ sort_return_value = sort_candidates( be, lookthrough_limit,
+ time_up, pb, candidates,
+ sort_control,
+ &sort_error_type );
/* Fix for bugid # 394184, SD, 20 Jul 00 */
- /* replace the hard coded return value by the appropriate LDAP error code */
+ /* replace the hard coded return value by the appropriate
+ * LDAP error code */
switch (sort_return_value) {
case LDAP_SUCCESS: /* Everything OK */
vlv_response_control.result= LDAP_SUCCESS;
break;
case LDAP_PROTOCOL_ERROR: /* A protocol error */
- return ldbm_back_search_cleanup(pb, li, sort_control, LDAP_PROTOCOL_ERROR, "Sort Control", -1, &basesdn, &vlv_request_control);
+ return ldbm_back_search_cleanup(pb, li, sort_control,
+ LDAP_PROTOCOL_ERROR,
+ "Sort Control", -1,
+ &basesdn,
+ &vlv_request_control);
case LDAP_UNWILLING_TO_PERFORM: /* Too hard */
case LDAP_OPERATIONS_ERROR: /* Operation error */
case LDAP_TIMELIMIT_EXCEEDED: /* Timeout */
vlv_response_control.result= LDAP_TIMELIMIT_EXCEEDED;
break;
case LDAP_ADMINLIMIT_EXCEEDED: /* Admin limit exceeded */
- vlv_response_control.result= LDAP_ADMINLIMIT_EXCEEDED;
+ vlv_response_control.result = LDAP_ADMINLIMIT_EXCEEDED;
break;
case LDAP_OTHER: /* Abandoned */
- abandoned= 1; /* So that we don't return a result code */
- is_sorting_critical= 1; /* In order to have the results discarded */
+ abandoned = 1; /* So that we don't return a result code */
+ is_sorting_critical = 1; /* In order to have the results
+ discarded */
break;
default: /* Should never get here */
break;
}
/* End fix for bug # 394184 */
/*
- * If the sort control was marked as critical, and there was an error in sorting,
- * don't return any entries, and return unavailableCriticalExtension in the
- * searchResultDone message.
+ * If the sort control was marked as critical, and there was
+ * an error in sorting, don't return any entries, and return
+ * unavailableCriticalExtension in the searchResultDone message.
*/
/* Fix for bugid #394184, SD, 05 Jul 00 */
/* we were not actually returning unavailableCriticalExtension;
@@ -525,10 +563,15 @@ ldbm_back_search( Slapi_PBlock *pb )
tmp_desc = "Sort Response Control";
}
/* end Fix for bugid #394184 */
- /* Generate the control returned to the client to indicate sort result */
- if (LDAP_SUCCESS != make_sort_response_control( pb, sort_return_value, sort_error_type ) )
+ /* Generate the control returned to the client to indicate
+ * sort result */
+ if (LDAP_SUCCESS != sort_make_sort_response_control( pb,
+ sort_return_value, sort_error_type ) )
{
- return ldbm_back_search_cleanup(pb, li, sort_control, (abandoned?-1:LDAP_PROTOCOL_ERROR), "Sort Response Control", -1, &basesdn, &vlv_request_control);
+ return ldbm_back_search_cleanup(pb, li, sort_control,
+ (abandoned?-1:LDAP_PROTOCOL_ERROR),
+ "Sort Response Control", -1,
+ &basesdn, &vlv_request_control);
}
}
/*
@@ -540,30 +583,39 @@ ldbm_back_search( Slapi_PBlock *pb )
if (NULL != candidates && candidates->b_nids>0)
{
IDList *idl= NULL;
- vlv_response_control.result= vlv_trim_candidates(be, candidates, sort_control, &vlv_request_control, &idl, &vlv_response_control);
+ vlv_response_control.result =
+ vlv_trim_candidates(be, candidates, sort_control,
+ &vlv_request_control, &idl, &vlv_response_control);
if(vlv_response_control.result==0)
{
idl_free(candidates);
- candidates= idl;
+ candidates = idl;
}
else
{
- return ldbm_back_search_cleanup(pb, li, sort_control, vlv_response_control.result, NULL, -1, &basesdn, &vlv_request_control);
+ return ldbm_back_search_cleanup(pb, li, sort_control,
+ vlv_response_control.result,
+ NULL, -1, &basesdn,
+ &vlv_request_control);
}
}
else
{
- vlv_response_control.targetPosition= 0;
- vlv_response_control.contentCount= 0;
- vlv_response_control.result= LDAP_SUCCESS;
+ vlv_response_control.targetPosition = 0;
+ vlv_response_control.contentCount = 0;
+ vlv_response_control.result = LDAP_SUCCESS;
}
}
}
if (virtual_list_view)
{
- if(LDAP_SUCCESS != vlv_make_response_control( pb, &vlv_response_control ))
+ if(LDAP_SUCCESS !=
+ vlv_make_response_control( pb, &vlv_response_control ))
{
- return ldbm_back_search_cleanup(pb, li, sort_control, (abandoned?-1:LDAP_PROTOCOL_ERROR), "VLV Response Control", -1, &basesdn, &vlv_request_control);
+ return ldbm_back_search_cleanup(pb, li, sort_control,
+ (abandoned?-1:LDAP_PROTOCOL_ERROR),
+ "VLV Response Control", -1,
+ &basesdn, &vlv_request_control);
}
/* Log the VLV operation */
vlv_print_access_log(pb,&vlv_request_control,&vlv_response_control);
@@ -608,7 +660,7 @@ ldbm_back_search( Slapi_PBlock *pb )
/* check to see if we can skip the filter test */
if ( li->li_filter_bypass && NULL != candidates && !virtual_list_view
&& !lookup_returned_allids ) {
- Slapi_Filter *filter= NULL;
+ Slapi_Filter *filter = NULL;
slapi_pblock_get( pb, SLAPI_SEARCH_FILTER, &filter );
if ( can_skip_filter_test( pb, filter, scope, candidates)) {
@@ -618,7 +670,9 @@ ldbm_back_search( Slapi_PBlock *pb )
/* Fix for bugid #394184, SD, 05 Jul 00 */
/* tmp_err == -1: no error */
- return ldbm_back_search_cleanup(pb, li, sort_control, tmp_err, tmp_desc, (tmp_err == -1 ? 0 : -1), &basesdn, &vlv_request_control);
+ return ldbm_back_search_cleanup(pb, li, sort_control, tmp_err, tmp_desc,
+ (tmp_err == -1 ? 0 : -1), &basesdn,
+ &vlv_request_control);
/* end Fix for bugid #394184 */
}
@@ -1082,6 +1136,8 @@ ldbm_back_next_search_entry_ext( Slapi_PBlock *pb, int use_extension )
/* check for abandon */
if ( slapi_op_abandoned( pb ))
{
+ /* in case paged results, clean up the conn */
+ pagedresults_set_search_result(pb->pb_conn, NULL);
delete_search_result_set( &sr );
if ( use_extension ) {
slapi_pblock_set( pb, SLAPI_SEARCH_RESULT_ENTRY_EXT, NULL );
@@ -1096,6 +1152,8 @@ ldbm_back_next_search_entry_ext( Slapi_PBlock *pb, int use_extension )
if ( tlimit != -1 && curtime > stoptime )
{
slapi_send_ldap_result( pb, LDAP_TIMELIMIT_EXCEEDED, NULL, NULL, nentries, urls );
+ /* in case paged results, clean up the conn */
+ pagedresults_set_search_result(pb->pb_conn, NULL);
delete_search_result_set( &sr );
if ( use_extension ) {
slapi_pblock_set( pb, SLAPI_SEARCH_RESULT_ENTRY_EXT, NULL );
@@ -1109,6 +1167,8 @@ ldbm_back_next_search_entry_ext( Slapi_PBlock *pb, int use_extension )
if ( llimit != -1 && sr->sr_lookthroughcount >= llimit )
{
slapi_send_ldap_result( pb, LDAP_ADMINLIMIT_EXCEEDED, NULL, NULL, nentries, urls );
+ /* in case paged results, clean up the conn */
+ pagedresults_set_search_result(pb->pb_conn, NULL);
delete_search_result_set( &sr );
if ( use_extension ) {
slapi_pblock_set( pb, SLAPI_SEARCH_RESULT_ENTRY_EXT, NULL );
@@ -1123,6 +1183,9 @@ ldbm_back_next_search_entry_ext( Slapi_PBlock *pb, int use_extension )
if ( id == NOID )
{
/* No more entries */
+ /* destroy back_search_result_set */
+ /* in case paged results, clean up the conn */
+ pagedresults_set_search_result(pb->pb_conn, NULL);
delete_search_result_set( &sr );
if ( use_extension ) {
slapi_pblock_set( pb, SLAPI_SEARCH_RESULT_ENTRY_EXT, NULL );
@@ -1261,6 +1324,8 @@ ldbm_back_next_search_entry_ext( Slapi_PBlock *pb, int use_extension )
{
if ( --slimit < 0 ) {
cache_return( &inst->inst_cache, &e );
+ /* in case paged results, clean up the conn */
+ pagedresults_set_search_result(pb->pb_conn, NULL);
delete_search_result_set( &sr );
slapi_send_ldap_result( pb, LDAP_SIZELIMIT_EXCEEDED, NULL, NULL, nentries, urls );
rc = SLAPI_FAIL_GENERAL;
@@ -1348,6 +1413,11 @@ delete_search_result_set( back_search_result_set **sr )
slapi_ch_free( (void**)sr );
}
+void
+ldbm_back_search_results_release( void **sr )
+{
+ delete_search_result_set( (back_search_result_set **)sr );
+}
int
ldbm_back_entry_release( Slapi_PBlock *pb, void *backend_info_ptr ) {
diff --git a/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h b/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h
index 341e64cb7..30d607eef 100644
--- a/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h
+++ b/ldap/servers/slapd/back-ldbm/proto-back-ldbm.h
@@ -459,6 +459,7 @@ int ldbm_back_next_search_entry( Slapi_PBlock *pb );
int ldbm_back_next_search_entry_ext( Slapi_PBlock *pb, int use_extension );
int ldbm_back_db_test( Slapi_PBlock *pb );
int ldbm_back_entry_release( Slapi_PBlock *pb, void *backend_info_ptr );
+void ldbm_back_search_results_release( void **search_results );
int ldbm_back_init( Slapi_PBlock *pb );
/*
@@ -534,13 +535,15 @@ int bedse_add_index_entry(int argc, char **argv);
#endif
/*
- * search.c
+ * ldbm_search.c
*/
Slapi_Filter* create_onelevel_filter(Slapi_Filter* filter, const struct backentry *e, int managedsait, Slapi_Filter** fid2kids, Slapi_Filter** focref, Slapi_Filter** fand, Slapi_Filter** forr);
Slapi_Filter* create_subtree_filter(Slapi_Filter* filter, int managedsait, Slapi_Filter** focref, Slapi_Filter** forr);
IDList* subtree_candidates(Slapi_PBlock *pb, backend *be, const char *base, const struct backentry *e, Slapi_Filter *filter, int managedsait, int *allids_before_scopingp, int *err);
void search_set_tune(struct ldbminfo *li,int val);
int search_get_tune(struct ldbminfo *li);
+int compute_lookthrough_limit( Slapi_PBlock *pb, struct ldbminfo *li );
+
/*
* matchrule.c
diff --git a/ldap/servers/slapd/back-ldbm/sort.c b/ldap/servers/slapd/back-ldbm/sort.c
index 055a5c867..2694338e0 100644
--- a/ldap/servers/slapd/back-ldbm/sort.c
+++ b/ldap/servers/slapd/back-ldbm/sort.c
@@ -230,81 +230,6 @@ int sort_candidates(backend *be,int lookthrough_limit,time_t time_up, Slapi_PBlo
}
/* End fix for bug # 394184 */
-/* Fix for bug # 394184, SD, 20 Jul 00 */
-/* fix and cleanup (switch(code) {} removed) */
-/* arg 'code' has now the correct sortResult value */
-int
-make_sort_response_control ( Slapi_PBlock *pb, int code, char *error_type) {
-
- LDAPControl new_ctrl = {0};
- BerElement *ber= NULL;
- struct berval *bvp = NULL;
- int rc = -1;
- ber_int_t control_code = code;
-
- /*
- SortResult ::= SEQUENCE {
- sortResult ENUMERATED {
- success (0), -- results are sorted
- operationsError (1), -- server internal failure
- timeLimitExceeded (3), -- timelimit reached before
- -- sorting was completed
- strongAuthRequired (8), -- refused to return sorted
- -- results via insecure
- -- protocol
- adminLimitExceeded (11), -- too many matching entries
- -- for the server to sort
- noSuchAttribute (16), -- unrecognized attribute
- -- type in sort key
- inappropriateMatching (18), -- unrecognized or inappro-
- -- priate matching rule in
- -- sort key
- insufficientAccessRights (50), -- refused to return sorted
- -- results to this client
- busy (51), -- too busy to process
- unwillingToPerform (53), -- unable to sort
- other (80)
- },
- attributeType [0] AttributeType OPTIONAL }
-
- */
-
- if ( ( ber = ber_alloc()) == NULL ) {
- return -1;
- }
-
- if (( rc = ber_printf( ber, "{e", control_code )) != -1 ) {
- if ( rc != -1 && NULL != error_type ) {
- rc = ber_printf( ber, "s", error_type );
- }
- if ( rc != -1 ) {
- rc = ber_printf( ber, "}" );
- }
- }
- if ( rc != -1 ) {
- rc = ber_flatten( ber, &bvp );
- }
-
- ber_free( ber, 1 );
-
- if ( rc == -1 ) {
- return rc;
- }
-
- new_ctrl.ldctl_oid = LDAP_CONTROL_SORTRESPONSE;
- new_ctrl.ldctl_value = *bvp;
- new_ctrl.ldctl_iscritical = 1;
-
- if ( slapi_pblock_set( pb, SLAPI_ADD_RESCONTROL, &new_ctrl ) != 0 ) {
- ber_bvfree(bvp);
- return( -1 );
- }
-
- ber_bvfree(bvp);
- return( LDAP_SUCCESS );
-}
-/* End fix for bug #394184 */
-
static int term_tag(ber_tag_t tag)
{
return ( (LBER_END_OF_SEQORSET == tag) || (LBER_ERROR == tag) );
@@ -804,7 +729,7 @@ static int compare_entries_sv(ID *id_a, ID *id_b, sort_spec *s,baggage_carrier *
* -5: Admin limit exceeded now is: LDAP_ADMINLIMIT_EXCEEDED
* -6: Abandoned now is: LDAP_OTHER
*/
-static int sort_nazi(baggage_carrier *bc)
+static int sort_check(baggage_carrier *bc)
{
time_t curtime = 0;
/* check for abandon */
@@ -956,7 +881,7 @@ recurse:
swap(loguy, higuy);
/* Check admin and time limits here on the sort */
- if ( LDAP_SUCCESS != (return_value = sort_nazi(bc)) )
+ if ( LDAP_SUCCESS != (return_value = sort_check(bc)) )
{
return return_value;
}
diff --git a/ldap/servers/slapd/backend.c b/ldap/servers/slapd/backend.c
index e6083a66b..1bded05e2 100644
--- a/ldap/servers/slapd/backend.c
+++ b/ldap/servers/slapd/backend.c
@@ -393,6 +393,9 @@ slapi_be_getentrypoint(Slapi_Backend *be, int entrypoint, void **ret_fnptr, Slap
case SLAPI_PLUGIN_DB_ENTRY_RELEASE_FN:
*ret_fnptr = (void*)be->be_entry_release;
break;
+ case SLAPI_PLUGIN_DB_SEARCH_RESULTS_RELEASE_FN:
+ *ret_fnptr = (void*)be->be_search_results_release;
+ break;
case SLAPI_PLUGIN_DB_SIZE_FN:
*ret_fnptr = (void*)be->be_dbsize;
break;
@@ -498,6 +501,9 @@ slapi_be_setentrypoint(Slapi_Backend *be, int entrypoint, void *ret_fnptr, Slapi
case SLAPI_PLUGIN_DB_ENTRY_RELEASE_FN:
be->be_entry_release=(IFP) ret_fnptr;
break;
+ case SLAPI_PLUGIN_DB_SEARCH_RESULTS_RELEASE_FN:
+ be->be_search_results_release=(IFP) ret_fnptr;
+ break;
case SLAPI_PLUGIN_DB_SIZE_FN:
be->be_dbsize=(IFP) ret_fnptr;
break;
diff --git a/ldap/servers/slapd/connection.c b/ldap/servers/slapd/connection.c
index 045e527c0..874766b80 100644
--- a/ldap/servers/slapd/connection.c
+++ b/ldap/servers/slapd/connection.c
@@ -130,8 +130,8 @@ void
connection_cleanup(Connection *conn)
{
bind_credentials_clear( conn, PR_FALSE /* do not lock conn */,
- PR_TRUE /* clear external creds. */ );
- slapi_ch_free((void**)&conn->c_authtype);
+ PR_TRUE /* clear external creds. */ );
+ slapi_ch_free((void**)&conn->c_authtype);
/* Call the plugin extension destructors */
factory_destroy_extension(connection_type,conn,NULL/*Parent*/,&(conn->c_extension));
@@ -197,6 +197,15 @@ connection_cleanup(Connection *conn)
/* remove any SASL I/O from the connection */
sasl_io_cleanup(conn);
sasl_dispose((sasl_conn_t**)&conn->c_sasl_conn);
+ /* PAGED_RESULTS */
+ if (conn->c_search_result_set) {
+ conn->c_current_be->be_search_results_release(&(conn->c_search_result_set));
+ conn->c_search_result_set = NULL;
+ }
+ conn->c_current_be = NULL;
+ conn->c_search_result_count = 0;
+ conn->c_timelimit = 0;
+ /* PAGED_RESULTS ENDS */
/* free the connection socket buffer */
connection_free_private_buffer(conn);
@@ -1946,11 +1955,17 @@ void connection_enter_leave_turbo(Connection *conn, int *new_turbo_flag)
PR_Lock(conn->c_mutex);
/* We can already be in turbo mode, or not */
current_mode = conn->c_private->turbo_flag;
- if(conn->c_private->operation_rate == 0) {
- /* The connection is ranked by the passed activities. If some other connection have more activity,
- increase rank by one. The highest rank is least activity, good candidates to move out of turbo mode.
- However, if no activity on all the connections, then every connection gets 0 rank, so none move out.
- No bother to do so much calcuation, short-cut to non-turbo mode if no activities in passed interval */
+ if (conn->c_search_result_set) {
+ /* PAGED_RESULTS does not need turbo mode */
+ new_mode = 0;
+ } else if (conn->c_private->operation_rate == 0) {
+ /* The connection is ranked by the passed activities. If some other
+ * connection have more activity, increase rank by one. The highest
+ * rank is least activity, good candidates to move out of turbo mode.
+ * However, if no activity on all the connections, then every
+ * connection gets 0 rank, so none move out.
+ * No bother to do so much calcuation, short-cut to non-turbo mode
+ * if no activities in passed interval */
new_mode = 0;
} else {
double activet = 0.0;
@@ -1964,7 +1979,7 @@ void connection_enter_leave_turbo(Connection *conn, int *new_turbo_flag)
one measure to reduce thread startvation.
*/
if (connection_count > threshold_rank) {
- threshold_rank -= (connection_count - threshold_rank) / 5;
+ threshold_rank -= (connection_count - threshold_rank) / 5;
}
if (current_mode) {
@@ -2065,7 +2080,7 @@ connection_threadmain()
PR_Unlock(conn->c_mutex);
if (! config_check_referral_mode()) {
slapi_counter_increment(ops_initiated);
- slapi_counter_increment(g_get_global_snmp_vars()->ops_tbl.dsInOps);
+ slapi_counter_increment(g_get_global_snmp_vars()->ops_tbl.dsInOps);
}
}
/* Once we're here we have a pb */
@@ -2097,7 +2112,7 @@ connection_threadmain()
switch (ret) {
case CONN_DONE:
/* This means that the connection was closed, so clear turbo mode */
- /*FALLTHROUGH*/
+ /*FALLTHROUGH*/
case CONN_TIMEDOUT:
thread_turbo_flag = 0;
is_timedout = 1;
diff --git a/ldap/servers/slapd/control.c b/ldap/servers/slapd/control.c
index 268a48cde..aadf201bb 100644
--- a/ldap/servers/slapd/control.c
+++ b/ldap/servers/slapd/control.c
@@ -112,6 +112,10 @@ init_controls( void )
*/
slapi_register_supported_control( LDAP_CONTROL_GET_EFFECTIVE_RIGHTS,
SLAPI_OPERATION_SEARCH );
+
+ /* LDAP_CONTROL_PAGEDRESULTS is shared by request and response */
+ slapi_register_supported_control( LDAP_CONTROL_PAGEDRESULTS,
+ SLAPI_OPERATION_SEARCH );
}
@@ -124,8 +128,7 @@ slapi_register_supported_control( char *controloid, unsigned long controlops )
if ( controloid != NULL ) {
PR_RWLock_Wlock(supported_controls_lock);
++supported_controls_count;
- charray_add( &supported_controls,
- slapi_ch_strdup( controloid ));
+ charray_add( &supported_controls, slapi_ch_strdup( controloid ));
supported_controls_ops = (unsigned long *)slapi_ch_realloc(
(char *)supported_controls_ops,
supported_controls_count * sizeof( unsigned long ));
@@ -418,10 +421,8 @@ slapi_control_present( LDAPControl **controls, char *oid, struct berval **val, i
for ( i = 0; controls[i] != NULL; i++ ) {
if ( strcmp( controls[i]->ldctl_oid, oid ) == 0 ) {
- if ( val != NULL ) {
- if (NULL != val) {
- *val = &controls[i]->ldctl_value;
- }
+ if (NULL != val) {
+ *val = &controls[i]->ldctl_value;
if (NULL != iscritical) {
*iscritical = (int) controls[i]->ldctl_iscritical;
}
@@ -502,7 +503,7 @@ add_control_ext( LDAPControl ***ctrlsp, LDAPControl *newctrl, int copy )
{
int count;
- if ( *ctrlsp == NULL ) {
+ if ( *ctrlsp == NULL ) {
count = 0;
} else {
for ( count = 0; (*ctrlsp)[count] != NULL; ++count ) {
@@ -510,14 +511,14 @@ add_control_ext( LDAPControl ***ctrlsp, LDAPControl *newctrl, int copy )
}
}
- *ctrlsp = (LDAPControl **)slapi_ch_realloc( (char *)*ctrlsp,
- ( count + 2 ) * sizeof(LDAPControl *));
+ *ctrlsp = (LDAPControl **)slapi_ch_realloc( (char *)*ctrlsp,
+ ( count + 2 ) * sizeof(LDAPControl *));
- if (copy) {
- (*ctrlsp)[ count ] = slapi_dup_control( newctrl );
- } else {
- (*ctrlsp)[ count ] = newctrl;
- }
+ if (copy) {
+ (*ctrlsp)[ count ] = slapi_dup_control( newctrl );
+ } else {
+ (*ctrlsp)[ count ] = newctrl;
+ }
(*ctrlsp)[ ++count ] = NULL;
}
diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c
index 3e91c124a..3b070e208 100644
--- a/ldap/servers/slapd/daemon.c
+++ b/ldap/servers/slapd/daemon.c
@@ -1219,6 +1219,16 @@ setup_pr_read_pds(Connection_Table *ct, PRFileDesc **n_tcps, PRFileDesc **s_tcps
c->c_fdi = SLAPD_INVALID_SOCKET_INDEX;
}
}
+ if (c->c_timelimit > 0) /* check timeout for PAGED RESULTS */
+ {
+ time_t ctime = current_time();
+ if (ctime > c->c_timelimit)
+ {
+ /* Exceeded the timelimit; disconnect the client */
+ disconnect_server_nomutex(c, c->c_connid, -1,
+ SLAPD_DISCONNECT_IO_TIMEOUT, 0);
+ }
+ }
PR_Unlock( c->c_mutex );
}
c = next;
diff --git a/ldap/servers/slapd/opshared.c b/ldap/servers/slapd/opshared.c
index 8fc24126d..06ff32f6e 100644
--- a/ldap/servers/slapd/opshared.c
+++ b/ldap/servers/slapd/opshared.c
@@ -45,74 +45,78 @@
#include "slap.h"
#include "index_subsys.h"
+#define PAGEDRESULTS_PAGE_END 1
+#define PAGEDRESULTS_SEARCH_END 2
+
/* helper functions */
static void compute_limits (Slapi_PBlock *pb);
/* attributes that no clients are allowed to add or modify */
-static char *protected_attrs_all [] = { PSEUDO_ATTR_UNHASHEDUSERPASSWORD,
- NULL
- };
+static char *protected_attrs_all [] = { PSEUDO_ATTR_UNHASHEDUSERPASSWORD,
+ NULL
+ };
static char *pwpolicy_lock_attrs_all [] = { "passwordRetryCount",
- "retryCountResetTime",
- "accountUnlockTime",
- NULL};
+ "retryCountResetTime",
+ "accountUnlockTime",
+ NULL};
/* Forward declarations */
static void compute_limits (Slapi_PBlock *pb);
static int send_results (Slapi_PBlock *pb, int send_result, int *nentries);
+static int send_results_ext (Slapi_PBlock *pb, int send_result, int *nentries, int pagesize, unsigned int *pr_stat);
static int process_entry(Slapi_PBlock *pb, Slapi_Entry *e, int send_result);
-
+
int op_shared_is_allowed_attr (const char *attr_name, int replicated_op)
{
- int i;
- slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
-
- /* check list of attributes that no client is allowed to specify */
- for (i = 0; protected_attrs_all[i]; i ++)
- {
- if (strcasecmp (attr_name, protected_attrs_all[i]) == 0)
- {
- /* this attribute is not allowed */
- return 0;
- }
- }
-
- /* ONREPL - should allow backends to plugin here to specify
- attributes that are not allowed */
-
- if (!replicated_op)
- {
- /*
- * check to see if attribute is marked as one clients can't modify
- */
- struct asyntaxinfo *asi;
- int no_user_mod = 0;
-
- asi = attr_syntax_get_by_name( attr_name );
- if ( NULL != asi &&
- 0 != ( asi->asi_flags & SLAPI_ATTR_FLAG_NOUSERMOD ))
- {
- /* this attribute is not allowed */
- no_user_mod = 1;
- }
- attr_syntax_return( asi );
-
- if ( no_user_mod ) {
- return( 0 );
- }
- } else if (!slapdFrontendConfig->pw_is_global_policy) {
- /* check list of password policy attributes for locking accounts */
- for (i = 0; pwpolicy_lock_attrs_all[i]; i ++)
- {
- if (strcasecmp (attr_name, pwpolicy_lock_attrs_all[i]) == 0)
- {
- /* this attribute is not allowed */
- return 0;
- }
- }
- }
-
- /* this attribute is ok */
- return 1;
+ int i;
+ slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
+
+ /* check list of attributes that no client is allowed to specify */
+ for (i = 0; protected_attrs_all[i]; i ++)
+ {
+ if (strcasecmp (attr_name, protected_attrs_all[i]) == 0)
+ {
+ /* this attribute is not allowed */
+ return 0;
+ }
+ }
+
+ /* ONREPL - should allow backends to plugin here to specify
+ attributes that are not allowed */
+
+ if (!replicated_op)
+ {
+ /*
+ * check to see if attribute is marked as one clients can't modify
+ */
+ struct asyntaxinfo *asi;
+ int no_user_mod = 0;
+
+ asi = attr_syntax_get_by_name( attr_name );
+ if ( NULL != asi &&
+ 0 != ( asi->asi_flags & SLAPI_ATTR_FLAG_NOUSERMOD ))
+ {
+ /* this attribute is not allowed */
+ no_user_mod = 1;
+ }
+ attr_syntax_return( asi );
+
+ if ( no_user_mod ) {
+ return( 0 );
+ }
+ } else if (!slapdFrontendConfig->pw_is_global_policy) {
+ /* check list of password policy attributes for locking accounts */
+ for (i = 0; pwpolicy_lock_attrs_all[i]; i ++)
+ {
+ if (strcasecmp (attr_name, pwpolicy_lock_attrs_all[i]) == 0)
+ {
+ /* this attribute is not allowed */
+ return 0;
+ }
+ }
+ }
+
+ /* this attribute is ok */
+ return 1;
}
@@ -122,95 +126,102 @@ void
do_ps_service(Slapi_Entry *e, Slapi_Entry *eprev, ber_int_t chgtype, ber_int_t chgnum)
{
if (NULL == ps_service_fn) {
- if (get_entry_point(ENTRY_POINT_PS_SERVICE, (caddr_t *)(&ps_service_fn)) < 0) {
- return;
- }
+ if (get_entry_point(ENTRY_POINT_PS_SERVICE, (caddr_t *)(&ps_service_fn)) < 0) {
+ return;
+ }
}
(ps_service_fn)(e, eprev, chgtype, chgnum);
}
void modify_update_last_modified_attr(Slapi_PBlock *pb, Slapi_Mods *smods)
{
- char buf[20];
- struct berval bv;
- struct berval *bvals[2];
- time_t curtime;
- struct tm utm;
- Operation *op;
-
- LDAPDebug(LDAP_DEBUG_TRACE, "modify_update_last_modified_attr\n", 0, 0, 0);
-
- slapi_pblock_get(pb, SLAPI_OPERATION, &op);
-
- bvals[0] = &bv;
- bvals[1] = NULL;
-
- /* fill in modifiersname */
- if (slapi_sdn_isempty(&op->o_sdn)) {
- bv.bv_val = "";
- bv.bv_len = strlen(bv.bv_val);
- } else {
- bv.bv_val = (char*)slapi_sdn_get_dn(&op->o_sdn);
- bv.bv_len = strlen(bv.bv_val);
- }
-
- slapi_mods_add_modbvps(smods, LDAP_MOD_REPLACE | LDAP_MOD_BVALUES,
- "modifiersname", bvals);
-
- /* fill in modifytimestamp */
- curtime = current_time();
+ char buf[20];
+ struct berval bv;
+ struct berval *bvals[2];
+ time_t curtime;
+ struct tm utm;
+ Operation *op;
+
+ LDAPDebug(LDAP_DEBUG_TRACE, "modify_update_last_modified_attr\n", 0, 0, 0);
+
+ slapi_pblock_get(pb, SLAPI_OPERATION, &op);
+
+ bvals[0] = &bv;
+ bvals[1] = NULL;
+
+ /* fill in modifiersname */
+ if (slapi_sdn_isempty(&op->o_sdn)) {
+ bv.bv_val = "";
+ bv.bv_len = strlen(bv.bv_val);
+ } else {
+ bv.bv_val = (char*)slapi_sdn_get_dn(&op->o_sdn);
+ bv.bv_len = strlen(bv.bv_val);
+ }
+
+ slapi_mods_add_modbvps(smods, LDAP_MOD_REPLACE | LDAP_MOD_BVALUES,
+ "modifiersname", bvals);
+
+ /* fill in modifytimestamp */
+ curtime = current_time();
#ifdef _WIN32
{
- struct tm *pt;
- pt = gmtime(&curtime);
- memcpy(&utm, pt, sizeof(struct tm));
+ struct tm *pt;
+ pt = gmtime(&curtime);
+ memcpy(&utm, pt, sizeof(struct tm));
}
#else
- gmtime_r(&curtime, &utm);
+ gmtime_r(&curtime, &utm);
#endif
- strftime(buf, sizeof(buf), "%Y%m%d%H%M%SZ", &utm);
- bv.bv_val = buf;
- bv.bv_len = strlen(bv.bv_val);
- slapi_mods_add_modbvps(smods, LDAP_MOD_REPLACE | LDAP_MOD_BVALUES,
- "modifytimestamp", bvals);
+ strftime(buf, sizeof(buf), "%Y%m%d%H%M%SZ", &utm);
+ bv.bv_val = buf;
+ bv.bv_len = strlen(bv.bv_val);
+ slapi_mods_add_modbvps(smods, LDAP_MOD_REPLACE | LDAP_MOD_BVALUES,
+ "modifytimestamp", bvals);
}
/*
- * Returns: 0 - if the operation is successful
- * < 0 - if operation fails.
+ * Returns: 0 - if the operation is successful
+ * < 0 - if operation fails.
* Note that an operation is considered "failed" if a result is sent
* directly to the client when send_result is 0.
*/
void
op_shared_search (Slapi_PBlock *pb, int send_result)
{
- char *base, *fstr;
- int scope;
- Slapi_Backend *be = NULL;
- Slapi_Backend *be_single = NULL;
- Slapi_Backend *be_list[BE_LIST_SIZE];
- Slapi_Entry *referral_list[BE_LIST_SIZE];
- char ebuf[ BUFSIZ ];
- char attrlistbuf[ 1024 ], *attrliststr, **attrs = NULL;
- int rc = 0;
- int internal_op;
+ char *base, *fstr;
+ int scope;
+ Slapi_Backend *be = NULL;
+ Slapi_Backend *be_single = NULL;
+ Slapi_Backend *be_list[BE_LIST_SIZE];
+ Slapi_Entry *referral_list[BE_LIST_SIZE];
+ char ebuf[ BUFSIZ ];
+ char attrlistbuf[ 1024 ], *attrliststr, **attrs = NULL;
+ int rc = 0;
+ int internal_op;
Slapi_DN sdn;
Slapi_Operation *operation;
- Slapi_Entry *referral = NULL;
+ Slapi_Entry *referral = NULL;
- char errorbuf[BUFSIZ];
- int nentries,pnentries;
- int flag_search_base_found = 0;
- int flag_no_such_object = 0;
- int flag_referral = 0;
- int flag_psearch = 0;
- int err_code = LDAP_SUCCESS;
- LDAPControl **ctrlp;
- struct berval *ctl_value = NULL;
- int iscritical = 0;
- char * be_name = NULL;
- int index = 0;
- int sent_result = 0;
+ char errorbuf[BUFSIZ];
+ int nentries,pnentries;
+ int flag_search_base_found = 0;
+ int flag_no_such_object = 0;
+ int flag_referral = 0;
+ int flag_psearch = 0;
+ int err_code = LDAP_SUCCESS;
+ LDAPControl **ctrlp;
+ struct berval *ctl_value = NULL;
+ int iscritical = 0;
+ char *be_name = NULL;
+ int index = 0;
+ int sent_result = 0;
+ unsigned int pr_stat = 0;
+
+ ber_int_t pagesize = -1;
+ int curr_search_count = 0;
+ Slapi_Backend *pr_be = NULL;
+ void *pr_search_result = NULL;
+ int pr_search_result_count = 0;
be_list[0] = NULL;
referral_list[0] = NULL;
@@ -234,7 +245,7 @@ op_shared_search (Slapi_PBlock *pb, int send_result)
#define SLAPD_SEARCH_FMTSTR_BASE_INT "conn=%s op=%d SRCH base=\"%s\" scope=%d "
#define SLAPD_SEARCH_FMTSTR_REMAINDER " attrs=%s%s\n"
- PR_ASSERT(fstr);
+ PR_ASSERT(fstr);
if ( strlen(fstr) > 1024 )
{
/*
@@ -349,11 +360,30 @@ op_shared_search (Slapi_PBlock *pb, int send_result)
}
}
- if ( slapi_control_present (ctrlp, LDAP_CONTROL_GET_EFFECTIVE_RIGHTS,
- &ctl_value, &iscritical) )
- {
- operation->o_flags |= OP_FLAG_GET_EFFECTIVE_RIGHTS;
- }
+ if ( slapi_control_present (ctrlp, LDAP_CONTROL_GET_EFFECTIVE_RIGHTS,
+ &ctl_value, &iscritical) )
+ {
+ operation->o_flags |= OP_FLAG_GET_EFFECTIVE_RIGHTS;
+ }
+
+ if ( slapi_control_present (ctrlp, LDAP_CONTROL_PAGEDRESULTS,
+ &ctl_value, &iscritical) )
+ {
+ rc = pagedresults_parse_control_value(ctl_value,
+ &pagesize, &curr_search_count);
+ if (LDAP_SUCCESS == rc) {
+ operation->o_flags |= OP_FLAG_PAGED_RESULTS;
+ pr_be = pagedresults_get_current_be(pb->pb_conn);
+ pr_search_result = pagedresults_get_search_result(pb->pb_conn);
+ pr_search_result_count =
+ pagedresults_get_search_result_count(pb->pb_conn);
+ } else {
+ /* parse paged-results-control failed */
+ if (iscritical) { /* return an error since it's critical */
+ goto free_and_return;
+ }
+ }
+ }
}
if (be_name == NULL)
@@ -372,18 +402,29 @@ op_shared_search (Slapi_PBlock *pb, int send_result)
if (be_list[0] != NULL)
{
index = 0;
- while (be_list[index] && be_list[index+1])
- index++;
+ if (pr_be) { /* PAGED RESULT: be is found from the previous paging. */
+ /* move the index in the be_list which matches pr_be */
+ while (be_list[index] && be_list[index+1] && pr_be != be_list[index])
+ index++;
+ } else {
+ while (be_list[index] && be_list[index+1])
+ index++;
+ }
+ /* "be" is either pr_be or the last backend */
be = be_list[index];
}
else
- be = NULL;
+ be = pr_be?pr_be:NULL;
}
else
{
/* specific backend be_name was requested, use slapi_be_select_by_instance_name
*/
- be_single = be = slapi_be_select_by_instance_name(be_name);
+ if (pr_be) {
+ be_single = be = pr_be;
+ } else {
+ be_single = be = slapi_be_select_by_instance_name(be_name);
+ }
if (be_single)
slapi_be_Rlock(be_single);
be_list[0] = NULL;
@@ -454,7 +495,17 @@ op_shared_search (Slapi_PBlock *pb, int send_result)
slapi_pblock_get(pb, SLAPI_PLUGIN_OPRETURN, &rc);
goto free_and_return;
}
- }
+ }
+
+ /* set the timelimit to clean up the too-long-lived-paged results requests */
+ if (operation->o_flags & OP_FLAG_PAGED_RESULTS) {
+ time_t optime, time_up;
+ int tlimit;
+ slapi_pblock_get( pb, SLAPI_SEARCH_TIMELIMIT, &tlimit );
+ slapi_pblock_get( pb, SLAPI_OPINITIATED_TIME, &optime );
+ time_up = (tlimit==-1 ? -1 : optime + tlimit); /* -1: no time limit */
+ pagedresults_set_timelimit(pb->pb_conn, time_up);
+ }
/* PAR: now filters have been rewritten, we can assign plugins to work on them */
index_subsys_assign_filter_decoders(pb);
@@ -465,6 +516,7 @@ op_shared_search (Slapi_PBlock *pb, int send_result)
{
const Slapi_DN * be_suffix;
int err = 0;
+ Slapi_Backend *next_be = NULL;
if (be->be_search == NULL)
{
@@ -489,136 +541,196 @@ op_shared_search (Slapi_PBlock *pb, int send_result)
/* that's mean we only support one suffix per backend */
be_suffix = slapi_be_getsuffix(be, 0);
-
- /* be_suffix null means that we are searching the default backend
- * -> don't change the search parameters in pblock
- */
- if (be_suffix != NULL)
+
+ if (be_list[0] == NULL)
+ {
+ next_be = NULL;
+ }
+ else
{
- if ((be_name == NULL) && (scope == LDAP_SCOPE_ONELEVEL))
+ index--;
+ if (index>=0)
+ next_be = be_list[index];
+ else
+ next_be = NULL;
+ }
+
+ if ((operation->o_flags & OP_FLAG_PAGED_RESULTS) && pr_search_result) {
+ /* PAGED RESULTS and already have the search results from the prev op */
+ slapi_pblock_set( pb, SLAPI_SEARCH_RESULT_SET, pr_search_result );
+ rc = send_results_ext (pb, 1, &pnentries, pagesize, &pr_stat);
+
+ if (PAGEDRESULTS_SEARCH_END == pr_stat) {
+ /* no more entries to send in the backend */
+ if (NULL == next_be) {
+ /* no more entries && no more backends */
+ curr_search_count = -1;
+ } else {
+ curr_search_count = pnentries;
+ }
+ } else {
+ curr_search_count = pnentries;
+ }
+ pagedresults_set_response_control(pb, 0, pagesize, curr_search_count);
+ if (pagedresults_get_with_sort(pb->pb_conn)) {
+ sort_make_sort_response_control(pb, CONN_GET_SORT_RESULT_CODE, NULL);
+ }
+ next_be = NULL; /* to break the loop */
+ } else {
+ /* be_suffix null means that we are searching the default backend
+ * -> don't change the search parameters in pblock
+ */
+ if (be_suffix != NULL)
{
- /* one level searches
- * - depending on the suffix of the backend we might have to
- * do a one level search or a base search
- * - we might also have to change the search target
- */
- if (slapi_sdn_isparent(&sdn, be_suffix)
- || (slapi_sdn_get_ndn_len(&sdn) == 0))
+ if ((be_name == NULL) && (scope == LDAP_SCOPE_ONELEVEL))
{
- int tmp_scope = LDAP_SCOPE_BASE;
- slapi_pblock_set(pb, SLAPI_SEARCH_SCOPE, &tmp_scope);
- slapi_pblock_set(pb, SLAPI_SEARCH_TARGET,
- (void *)slapi_sdn_get_ndn(be_suffix));
+ /* one level searches
+ * - depending on the suffix of the backend we might have to
+ * do a one level search or a base search
+ * - we might also have to change the search target
+ */
+ if (slapi_sdn_isparent(&sdn, be_suffix)
+ || (slapi_sdn_get_ndn_len(&sdn) == 0))
+ {
+ int tmp_scope = LDAP_SCOPE_BASE;
+ slapi_pblock_set(pb, SLAPI_SEARCH_SCOPE, &tmp_scope);
+ slapi_pblock_set(pb, SLAPI_SEARCH_TARGET,
+ (void *)slapi_sdn_get_ndn(be_suffix));
+ }
+ else if (slapi_sdn_issuffix(&sdn, be_suffix))
+ {
+ int tmp_scope = LDAP_SCOPE_ONELEVEL;
+ slapi_pblock_set(pb, SLAPI_SEARCH_SCOPE, &tmp_scope);
+ slapi_pblock_set(pb, SLAPI_SEARCH_TARGET,
+ (void *)slapi_sdn_get_ndn (&sdn));
+ }
+ else
+ goto next_be;
}
- else if (slapi_sdn_issuffix(&sdn, be_suffix))
+
+ /* subtree searches :
+ * if the search was started above the backend suffix
+ * - temporarily set the SLAPI_SEARCH_TARGET to the
+ * base of the node so that we don't get a NO SUCH OBJECT error
+ * - do not change the scope
+ */
+ if (scope == LDAP_SCOPE_SUBTREE)
{
- int tmp_scope = LDAP_SCOPE_ONELEVEL;
- slapi_pblock_set(pb, SLAPI_SEARCH_SCOPE, &tmp_scope);
- slapi_pblock_set(pb, SLAPI_SEARCH_TARGET,
- (void *)slapi_sdn_get_ndn (&sdn));
+ if (slapi_sdn_issuffix(be_suffix, &sdn))
+ {
+ slapi_pblock_set(pb, SLAPI_SEARCH_TARGET,
+ (void *)slapi_sdn_get_ndn(be_suffix));
+ }
+ else
+ slapi_pblock_set(pb, SLAPI_SEARCH_TARGET, (void *)slapi_sdn_get_ndn(&sdn));
}
- else
- goto next_be;
}
-
- /* subtree searches :
- * if the search was started above the backend suffix
- * - temporarily set the SLAPI_SEARCH_TARGET to the
- * base of the node so that we don't get a NO SUCH OBJECT error
- * - do not change the scope
- */
- if (scope == LDAP_SCOPE_SUBTREE)
+
+ slapi_pblock_set(pb, SLAPI_BACKEND, be);
+ slapi_pblock_set(pb, SLAPI_PLUGIN, be->be_database);
+ slapi_pblock_set(pb, SLAPI_SEARCH_RESULT_SET, NULL);
+
+ /* ONREPL - we need to be able to tell the backend not to send results directly */
+ rc = (*be->be_search)(pb);
+ switch (rc)
{
- if (slapi_sdn_issuffix(be_suffix, &sdn))
+ case 1:
+ /* if the backend returned LDAP_NO_SUCH_OBJECT for a SEARCH request,
+ * it will not have sent back a result - otherwise, it will have
+ * sent a result */
+ rc = SLAPI_FAIL_GENERAL;
+ slapi_pblock_get(pb, SLAPI_RESULT_CODE, &err);
+ if (err == LDAP_NO_SUCH_OBJECT)
{
- slapi_pblock_set(pb, SLAPI_SEARCH_TARGET,
- (void *)slapi_sdn_get_ndn(be_suffix));
+ /* may be the object exist somewhere else
+ * wait the end of the loop to send back this error
+ */
+ flag_no_such_object = 1;
+ break;
+ }
+ /* err something other than LDAP_NO_SUCH_OBJECT, so the backend will
+ * have sent the result -
+ * Set a flag here so we don't return another result. */
+ sent_result = 1;
+ /* fall through */
+
+ case -1: /* an error occurred */
+ slapi_pblock_get(pb, SLAPI_RESULT_CODE, &err);
+ if (err == LDAP_NO_SUCH_OBJECT)
+ {
+ /* may be the object exist somewhere else
+ * wait the end of the loop to send back this error
+ */
+ flag_no_such_object = 1;
+ break;
}
else
- slapi_pblock_set(pb, SLAPI_SEARCH_TARGET, (void *)slapi_sdn_get_ndn(&sdn));
- }
- }
-
- slapi_pblock_set(pb, SLAPI_BACKEND, be);
- slapi_pblock_set(pb, SLAPI_PLUGIN, be->be_database);
- slapi_pblock_set(pb, SLAPI_SEARCH_RESULT_SET, NULL);
-
- /* ONREPL - we need to be able to tell the backend not to send results directly */
- rc = (*be->be_search)(pb);
- switch (rc)
- {
- case 1: /* if the backend returned LDAP_NO_SUCH_OBJECT for a SEARCH request,
- it will not have sent back a result - otherwise, it will have
- sent a result */
- rc = SLAPI_FAIL_GENERAL;
- slapi_pblock_get(pb, SLAPI_RESULT_CODE, &err);
- if (err == LDAP_NO_SUCH_OBJECT)
- {
- /* may be the object exist somewhere else
- * wait the end of the loop to send back this error
- */
- flag_no_such_object = 1;
- break;
- }
- /* err something other than LDAP_NO_SUCH_OBJECT, so the backend will have sent the result -
- Set a flag here so we don't return another result. */
- sent_result = 1;
- /* fall through */
-
- case -1: /* an error occurred */
- slapi_pblock_get(pb, SLAPI_RESULT_CODE, &err);
- if (err == LDAP_NO_SUCH_OBJECT)
- {
- /* may be the object exist somewhere else
- * wait the end of the loop to send back this error
- */
- flag_no_such_object = 1;
- break;
- }
- else
- {
- /* for error other than LDAP_NO_SUCH_OBJECT
- * the error has already been sent
- * stop the search here
- */
+ {
+ /* for error other than LDAP_NO_SUCH_OBJECT
+ * the error has already been sent
+ * stop the search here
+ */
+ goto free_and_return;
+ }
+
+ /* when rc == SLAPI_FAIL_DISKFULL this case is executed */
+
+ case SLAPI_FAIL_DISKFULL:
+ operation_out_of_disk_space();
+ goto free_and_return;
+
+ case 0: /* search was successful and we need to send the result */
+ flag_search_base_found++;
+ rc = send_results_ext (pb, 1, &pnentries, pagesize, &pr_stat);
+
+ /* PAGED RESULTS */
+ if (operation->o_flags & OP_FLAG_PAGED_RESULTS) {
+ void *sr = NULL;
+ int with_sort = operation->o_flags & OP_FLAG_SERVER_SIDE_SORTING;
+
+ curr_search_count = pnentries;
+ if (PAGEDRESULTS_SEARCH_END == pr_stat) {
+ if (NULL == next_be) {
+ /* no more entries && no more backends */
+ curr_search_count = -1;
+ } else {
+ /* no more entries, but at least another backend */
+ if (pagedresults_set_current_be(pb->pb_conn, next_be) < 0) {
+ goto free_and_return;
+ }
+ }
+ } else {
+ curr_search_count = pnentries;
+ slapi_pblock_get(pb, SLAPI_SEARCH_RESULT_SET, &sr);
+ if (pagedresults_set_current_be(pb->pb_conn, be) < 0 ||
+ pagedresults_set_search_result(pb->pb_conn, sr) < 0 ||
+ pagedresults_set_search_result_count(pb->pb_conn,
+ curr_search_count) < 0 ||
+ pagedresults_set_with_sort(pb->pb_conn, with_sort) < 0) {
+ goto free_and_return;
+ }
+ }
+ pagedresults_set_response_control(pb, 0,
+ pagesize, curr_search_count);
+ slapi_pblock_set( pb, SLAPI_SEARCH_RESULT_SET, NULL );
+ next_be = NULL; /* to break the loop */
+ }
+
+ /* if rc != 0 an error occurred while sending back the entries
+ * to the LDAP client
+ * LDAP error should already have been sent to the client
+ * stop the search, free and return
+ */
+ if (rc != 0)
goto free_and_return;
+ break;
}
-
- /* when rc == SLAPI_FAIL_DISKFULL this case is executed */
-
- case SLAPI_FAIL_DISKFULL:
- operation_out_of_disk_space();
- goto free_and_return;
-
- case 0: /* search was successful and we need to send the result */
- flag_search_base_found++;
- rc = send_results (pb, 1, &pnentries);
-
- /* if rc != 0 an error occurred while sending back the entries
- * to the LDAP client
- * LDAP error should already have been sent to the client
- * stop the search, free and return
- */
- if (rc != 0)
- goto free_and_return;
- break;
}
nentries += pnentries;
- next_be:
- if (be_list[0] == NULL)
- {
- be = NULL;
- }
- else
- {
- index--;
- if (index>=0)
- be = be_list[index];
- else
- be = NULL;
- }
+next_be:
+ be = next_be; /* this be won't be used for PAGED_RESULTS */
}
/* if referrals were sent back by the mapping tree
@@ -731,14 +843,14 @@ process_entry(Slapi_PBlock *pb, Slapi_Entry *e, int send_result)
{
int managedsait;
Slapi_Attr *a=NULL;
- int numValues=0, i;
+ int numValues=0, i;
if (!send_result)
- {
- /* server requested that we don't send results to the client,
- for instance, in case of a persistent search
- */
- return 1;
+ {
+ /* server requested that we don't send results to the client,
+ for instance, in case of a persistent search
+ */
+ return 1;
}
/* ONREPL - check if the entry should be referred (because of the copyingFrom) */
@@ -749,48 +861,48 @@ process_entry(Slapi_PBlock *pb, Slapi_Entry *e, int send_result)
* the referrals are just squirreled away and sent with the
* final result. For v3, the referrals are sent in separate LDAP messages.
*/
- slapi_pblock_get(pb, SLAPI_MANAGEDSAIT, &managedsait);
+ slapi_pblock_get(pb, SLAPI_MANAGEDSAIT, &managedsait);
if (!managedsait && slapi_entry_attr_find(e, "ref", &a)== 0)
- {
- /* to fix 522189: when rootDSE, don't interpret attribute ref as a referral entry */
-
- if ( slapi_is_rootdse(slapi_entry_get_dn_const(e)) )
- return 0; /* more to do for this entry, e.g., send it back to the client */
-
- /* end fix */
- slapi_attr_get_numvalues(a, &numValues );
- if (numValues == 0)
- {
- char ebuf[ BUFSIZ ];
- LDAPDebug(LDAP_DEBUG_ANY, "null ref in (%s)\n",
- escape_string(slapi_entry_get_dn_const(e), ebuf), 0, 0);
- }
- else
- {
- Slapi_Value *val=NULL;
- struct berval **refscopy=NULL;
- struct berval **urls, **tmpUrls=NULL;
- tmpUrls=(struct berval **) slapi_ch_malloc((numValues + 1) * sizeof(struct berval*));
- for ( i = slapi_attr_first_value(a, &val); i != -1;
- i = slapi_attr_next_value(a, i, &val)) {
- tmpUrls[i]=(struct berval*)slapi_value_get_berval(val);
- }
- tmpUrls[numValues]=NULL;
- refscopy = ref_adjust(pb, tmpUrls, slapi_entry_get_sdn_const(e), 1);
- slapi_pblock_get(pb, SLAPI_SEARCH_REFERRALS, &urls);
- send_ldap_referral(pb, e, refscopy, &urls);
- slapi_pblock_set(pb, SLAPI_SEARCH_REFERRALS, urls);
- if (NULL != refscopy)
- {
- ber_bvecfree(refscopy);
- refscopy = NULL;
- }
- if( NULL != tmpUrls) {
- slapi_ch_free( (void **)&tmpUrls );
- }
- }
-
- return 1; /* done with this entry */
+ {
+ /* to fix 522189: when rootDSE, don't interpret attribute ref as a referral entry */
+
+ if ( slapi_is_rootdse(slapi_entry_get_dn_const(e)) )
+ return 0; /* more to do for this entry, e.g., send it back to the client */
+
+ /* end fix */
+ slapi_attr_get_numvalues(a, &numValues );
+ if (numValues == 0)
+ {
+ char ebuf[ BUFSIZ ];
+ LDAPDebug(LDAP_DEBUG_ANY, "null ref in (%s)\n",
+ escape_string(slapi_entry_get_dn_const(e), ebuf), 0, 0);
+ }
+ else
+ {
+ Slapi_Value *val=NULL;
+ struct berval **refscopy=NULL;
+ struct berval **urls, **tmpUrls=NULL;
+ tmpUrls=(struct berval **) slapi_ch_malloc((numValues + 1) * sizeof(struct berval*));
+ for ( i = slapi_attr_first_value(a, &val); i != -1;
+ i = slapi_attr_next_value(a, i, &val)) {
+ tmpUrls[i]=(struct berval*)slapi_value_get_berval(val);
+ }
+ tmpUrls[numValues]=NULL;
+ refscopy = ref_adjust(pb, tmpUrls, slapi_entry_get_sdn_const(e), 1);
+ slapi_pblock_get(pb, SLAPI_SEARCH_REFERRALS, &urls);
+ send_ldap_referral(pb, e, refscopy, &urls);
+ slapi_pblock_set(pb, SLAPI_SEARCH_REFERRALS, urls);
+ if (NULL != refscopy)
+ {
+ ber_bvecfree(refscopy);
+ refscopy = NULL;
+ }
+ if( NULL != tmpUrls) {
+ slapi_ch_free( (void **)&tmpUrls );
+ }
+ }
+
+ return 1; /* done with this entry */
}
return 0;
@@ -812,7 +924,7 @@ iterate_with_lookahead(Slapi_PBlock *pb, Slapi_Backend *be, int send_result, int
Slapi_Entry *next_e;
void *next_backend_info_ptr;
char **attrs = NULL;
- int send_result_status = 0;
+ int send_result_status = 0;
slapi_pblock_get(pb, SLAPI_SEARCH_ATTRS, &attrs);
slapi_pblock_get(pb, SLAPI_SEARCH_ATTRSONLY, &attrsonly);
@@ -820,24 +932,24 @@ iterate_with_lookahead(Slapi_PBlock *pb, Slapi_Backend *be, int send_result, int
/* setup for the loop */
rc = be->be_next_search_entry_ext(pb, 1);
if (rc < 0)
- {
- /*
- * Some exceptional condition occurred. Results
- * have been sent, so we're finished.
- */
- if (rc == SLAPI_FAIL_DISKFULL)
- {
- operation_out_of_disk_space();
- }
- return -1;
+ {
+ /*
+ * Some exceptional condition occurred. Results
+ * have been sent, so we're finished.
+ */
+ if (rc == SLAPI_FAIL_DISKFULL)
+ {
+ operation_out_of_disk_space();
+ }
+ return -1;
}
-
+
slapi_pblock_get(pb, SLAPI_SEARCH_RESULT_ENTRY, &next_e);
slapi_pblock_get(pb, SLAPI_SEARCH_RESULT_ENTRY_EXT, &next_backend_info_ptr);
if (NULL == next_e)
- {
- /* no entries */
- done = 1;
+ {
+ /* no entries */
+ done = 1;
}
backend_info_ptr = NULL;
@@ -845,98 +957,98 @@ iterate_with_lookahead(Slapi_PBlock *pb, Slapi_Backend *be, int send_result, int
/* Done setting up the loop, now here it comes */
while (!done)
- {
- /* Allow the backend to free the entry we just finished using */
- /* It is ok to call this when backend_info_ptr is NULL */
- be->be_entry_release(pb, backend_info_ptr);
- e = next_e;
- backend_info_ptr = next_backend_info_ptr;
-
- rc = be->be_next_search_entry_ext(pb, 1);
- if (rc < 0)
- {
- /*
- * Some exceptional condition occurred. Results
- * have been sent, so we're finished.
- */
- if (rc == SLAPI_FAIL_DISKFULL)
- {
- operation_out_of_disk_space();
- }
- return -1;
- }
- else
- {
- slapi_pblock_get(pb, SLAPI_SEARCH_RESULT_ENTRY, &next_e);
- slapi_pblock_get(pb, SLAPI_SEARCH_RESULT_ENTRY_EXT, &next_backend_info_ptr);
- if (next_e == NULL)
- {
- /* no more entries */
- done = 1;
- }
- }
-
- if (process_entry(pb, e, send_result))
- {
- /* shouldn't send this entry */
- continue;
- }
-
- /*
- * It's a regular entry, or it's a referral and
- * managedsait control is on. In either case, send the entry.
- */
- if (done)
- {
- struct berval **urls = NULL;
- /* Send the entry and the result at the same time */
- slapi_pblock_get(pb, SLAPI_SEARCH_REFERRALS, &urls);
- rc = send_ldap_search_entry_ext(pb, e, NULL, attrs, attrsonly, 1,
- (*pnentries)+1, urls);
- if (rc == 1)
- {
- /* this means we didn't have access to the entry. Since the
- * entry was not sent, we need to send the done packet.
- */
- send_result_status = 1;
- }
- }
- else
- {
- /* Send the entry */
- rc = send_ldap_search_entry(pb, e, NULL, attrs,
- attrsonly);
- }
- switch (rc)
- {
- case 0: /* entry sent ok */
- (*pnentries)++;
- slapi_pblock_set(pb, SLAPI_NENTRIES, pnentries);
- break;
- case 1: /* entry not sent */
- break;
- case -1: /* connection closed */
- /*
- * mark the operation as abandoned so the backend
- * next entry function gets called again and has
- * a chance to clean things up.
- */
- pb->pb_op->o_status = SLAPI_OP_STATUS_ABANDONED;
- break;
- }
+ {
+ /* Allow the backend to free the entry we just finished using */
+ /* It is ok to call this when backend_info_ptr is NULL */
+ be->be_entry_release(pb, backend_info_ptr);
+ e = next_e;
+ backend_info_ptr = next_backend_info_ptr;
+
+ rc = be->be_next_search_entry_ext(pb, 1);
+ if (rc < 0)
+ {
+ /*
+ * Some exceptional condition occurred. Results
+ * have been sent, so we're finished.
+ */
+ if (rc == SLAPI_FAIL_DISKFULL)
+ {
+ operation_out_of_disk_space();
+ }
+ return -1;
+ }
+ else
+ {
+ slapi_pblock_get(pb, SLAPI_SEARCH_RESULT_ENTRY, &next_e);
+ slapi_pblock_get(pb, SLAPI_SEARCH_RESULT_ENTRY_EXT, &next_backend_info_ptr);
+ if (next_e == NULL)
+ {
+ /* no more entries */
+ done = 1;
+ }
+ }
+
+ if (process_entry(pb, e, send_result))
+ {
+ /* shouldn't send this entry */
+ continue;
+ }
+
+ /*
+ * It's a regular entry, or it's a referral and
+ * managedsait control is on. In either case, send the entry.
+ */
+ if (done)
+ {
+ struct berval **urls = NULL;
+ /* Send the entry and the result at the same time */
+ slapi_pblock_get(pb, SLAPI_SEARCH_REFERRALS, &urls);
+ rc = send_ldap_search_entry_ext(pb, e, NULL, attrs, attrsonly, 1,
+ (*pnentries)+1, urls);
+ if (rc == 1)
+ {
+ /* this means we didn't have access to the entry. Since the
+ * entry was not sent, we need to send the done packet.
+ */
+ send_result_status = 1;
+ }
+ }
+ else
+ {
+ /* Send the entry */
+ rc = send_ldap_search_entry(pb, e, NULL, attrs,
+ attrsonly);
+ }
+ switch (rc)
+ {
+ case 0: /* entry sent ok */
+ (*pnentries)++;
+ slapi_pblock_set(pb, SLAPI_NENTRIES, pnentries);
+ break;
+ case 1: /* entry not sent */
+ break;
+ case -1: /* connection closed */
+ /*
+ * mark the operation as abandoned so the backend
+ * next entry function gets called again and has
+ * a chance to clean things up.
+ */
+ pb->pb_op->o_status = SLAPI_OP_STATUS_ABANDONED;
+ break;
+ }
}
be->be_entry_release(pb, backend_info_ptr);
if (*pnentries == 0 || send_result_status)
- {
- /* We didn't send the result done message so the caller
- * must send it */
- return 1;
+ {
+ /* We didn't send the result done message so the caller
+ * must send it */
+ return 1;
}
- else
- {
- /* The result message has been sent */
- return 0;
+ else
+ {
+ /* The result message has been sent */
+ return 0;
}
}
#endif
@@ -948,7 +1060,8 @@ iterate_with_lookahead(Slapi_PBlock *pb, Slapi_Backend *be, int send_result, int
* iterate_with_lookahead trys to do.
*/
static int
-iterate(Slapi_PBlock *pb, Slapi_Backend *be, int send_result, int *pnentries)
+iterate(Slapi_PBlock *pb, Slapi_Backend *be, int send_result,
+ int *pnentries, int pagesize, unsigned int *pr_stat)
{
int rc;
int attrsonly;
@@ -960,6 +1073,9 @@ iterate(Slapi_PBlock *pb, Slapi_Backend *be, int send_result, int *pnentries)
slapi_pblock_get(pb, SLAPI_SEARCH_ATTRSONLY, &attrsonly);
*pnentries = 0;
+ if (pr_stat) {
+ *pr_stat = 0;
+ }
while (!done)
{
@@ -970,12 +1086,15 @@ iterate(Slapi_PBlock *pb, Slapi_Backend *be, int send_result, int *pnentries)
if (rc < 0)
{
/*
- * Some exceptional condition occurred. Results have been sent, so we're finished.
+ * Some exceptional condition occurred. Results have been sent,
+ * so we're finished.
*/
if (rc == SLAPI_FAIL_DISKFULL)
{
operation_out_of_disk_space();
}
+ *pr_stat = PAGEDRESULTS_SEARCH_END;
+ pagedresults_set_timelimit(pb->pb_conn, 0);
return -1;
}
@@ -1025,6 +1144,7 @@ iterate(Slapi_PBlock *pb, Slapi_Backend *be, int send_result, int *pnentries)
slapi_pblock_get(pb, SLAPI_SEARCH_RESULT_ENTRY, &e);
if (NULL == e) {
/* everything is ok - don't send the result */
+ *pr_stat = PAGEDRESULTS_SEARCH_END;
return 1;
}
gerentry = e;
@@ -1044,6 +1164,7 @@ iterate(Slapi_PBlock *pb, Slapi_Backend *be, int send_result, int *pnentries)
slapi_entry_free(gerentry);
gerentry = e = NULL;
}
+ *pr_stat = PAGEDRESULTS_SEARCH_END;
return( -1 );
}
slapi_ch_free ( (void**)&errbuf );
@@ -1097,6 +1218,9 @@ iterate(Slapi_PBlock *pb, Slapi_Backend *be, int send_result, int *pnentries)
{
/* no more entries */
done = 1;
+ if (pr_stat) {
+ *pr_stat = PAGEDRESULTS_SEARCH_END;
+ }
}
}
else if (e)
@@ -1129,11 +1253,22 @@ iterate(Slapi_PBlock *pb, Slapi_Backend *be, int send_result, int *pnentries)
pb->pb_op->o_status = SLAPI_OP_STATUS_ABANDONED;
break;
}
+ if (pagesize == *pnentries)
+ {
+ /* PAGED RESULTS: reached the pagesize */
+ done = 1;
+ if (pr_stat) {
+ *pr_stat = PAGEDRESULTS_PAGE_END;
+ }
+ }
}
else
{
/* no more entries */
done = 1;
+ if (pr_stat) {
+ *pr_stat = PAGEDRESULTS_SEARCH_END;
+ }
}
}
@@ -1141,8 +1276,8 @@ iterate(Slapi_PBlock *pb, Slapi_Backend *be, int send_result, int *pnentries)
}
-static int timelimit_reslimit_handle = -1;
-static int sizelimit_reslimit_handle = -1;
+static int timelimit_reslimit_handle = -1;
+static int sizelimit_reslimit_handle = -1;
/*
* Register size and time limit with the binder-based resource limits
@@ -1151,18 +1286,18 @@ static int sizelimit_reslimit_handle = -1;
int
search_register_reslimits( void )
{
- int rc1, rc2;
+ int rc1, rc2;
rc1 = slapi_reslimit_register( SLAPI_RESLIMIT_TYPE_INT,
"nsSizeLimit" , &sizelimit_reslimit_handle );
rc2 = slapi_reslimit_register( SLAPI_RESLIMIT_TYPE_INT,
"nsTimeLimit", &timelimit_reslimit_handle );
- if ( rc1 != SLAPI_RESLIMIT_STATUS_SUCCESS ) {
- return( rc1 );
- } else {
- return( rc2 );
- }
+ if ( rc1 != SLAPI_RESLIMIT_STATUS_SUCCESS ) {
+ return( rc1 );
+ } else {
+ return( rc2 );
+ }
}
@@ -1179,162 +1314,171 @@ search_register_reslimits( void )
static void
compute_limits (Slapi_PBlock *pb)
{
- int timelimit, sizelimit;
- int requested_timelimit, max_timelimit, requested_sizelimit, max_sizelimit;
- int isroot;
- int isCertAuth;
- Slapi_ComponentId *component_id = NULL;
- Slapi_Backend *be;
-
- slapi_pblock_get (pb, SLAPI_SEARCH_TIMELIMIT, &requested_timelimit);
- slapi_pblock_get (pb, SLAPI_SEARCH_SIZELIMIT, &requested_sizelimit);
- slapi_pblock_get (pb, SLAPI_REQUESTOR_ISROOT, &isroot);
- slapi_pblock_get (pb, SLAPI_BACKEND, &be);
-
-
- /* If the search belongs to the client authentication process, take the value at
- * nsslapd-timelimit as the actual time limit.
- */
-
- slapi_pblock_get (pb, SLAPI_PLUGIN_IDENTITY, &component_id);
- if (component_id) {
- isCertAuth = (! strcasecmp(component_id->sci_component_name, COMPONENT_CERT_AUTH) ) ? 1 : 0;
- if (isCertAuth) {
- timelimit = config_get_timelimit();
- goto set_timelimit;
- }
- }
-
- /*
- * Compute the time limit.
- */
- if ( slapi_reslimit_get_integer_limit( pb->pb_conn,
- timelimit_reslimit_handle, &max_timelimit )
- != SLAPI_RESLIMIT_STATUS_SUCCESS ) {
- /*
- * no limit associated with binder/connection or some other error
- * occurred. use the default maximum.
- */
- if ( isroot ) {
- max_timelimit = -1; /* no limit */
- } else {
- max_timelimit = be->be_timelimit;
- }
- }
-
- if ( requested_timelimit == 0 ) {
- timelimit = ( max_timelimit == -1 ) ? -1 : max_timelimit;
- } else if ( max_timelimit == -1 || requested_timelimit < max_timelimit ) {
- timelimit = requested_timelimit;
- } else {
- timelimit = max_timelimit;
- }
+ int timelimit, sizelimit;
+ int requested_timelimit, max_timelimit, requested_sizelimit, max_sizelimit;
+ int isroot;
+ int isCertAuth;
+ Slapi_ComponentId *component_id = NULL;
+ Slapi_Backend *be;
+
+ slapi_pblock_get (pb, SLAPI_SEARCH_TIMELIMIT, &requested_timelimit);
+ slapi_pblock_get (pb, SLAPI_SEARCH_SIZELIMIT, &requested_sizelimit);
+ slapi_pblock_get (pb, SLAPI_REQUESTOR_ISROOT, &isroot);
+ slapi_pblock_get (pb, SLAPI_BACKEND, &be);
+
+
+ /* If the search belongs to the client authentication process, take the value at
+ * nsslapd-timelimit as the actual time limit.
+ */
+
+ slapi_pblock_get (pb, SLAPI_PLUGIN_IDENTITY, &component_id);
+ if (component_id) {
+ isCertAuth = (! strcasecmp(component_id->sci_component_name, COMPONENT_CERT_AUTH) ) ? 1 : 0;
+ if (isCertAuth) {
+ timelimit = config_get_timelimit();
+ goto set_timelimit;
+ }
+ }
+
+ /*
+ * Compute the time limit.
+ */
+ if ( slapi_reslimit_get_integer_limit( pb->pb_conn,
+ timelimit_reslimit_handle, &max_timelimit )
+ != SLAPI_RESLIMIT_STATUS_SUCCESS ) {
+ /*
+ * no limit associated with binder/connection or some other error
+ * occurred. use the default maximum.
+ */
+ if ( isroot ) {
+ max_timelimit = -1; /* no limit */
+ } else {
+ max_timelimit = be->be_timelimit;
+ }
+ }
+
+ if ( requested_timelimit == 0 ) {
+ timelimit = ( max_timelimit == -1 ) ? -1 : max_timelimit;
+ } else if ( max_timelimit == -1 || requested_timelimit < max_timelimit ) {
+ timelimit = requested_timelimit;
+ } else {
+ timelimit = max_timelimit;
+ }
set_timelimit:
- slapi_pblock_set(pb, SLAPI_SEARCH_TIMELIMIT, &timelimit);
-
-
- /*
- * Compute the size limit.
- */
- if ( slapi_reslimit_get_integer_limit( pb->pb_conn,
- sizelimit_reslimit_handle, &max_sizelimit )
- != SLAPI_RESLIMIT_STATUS_SUCCESS ) {
- /*
- * no limit associated with binder/connection or some other error
- * occurred. use the default maximum.
- */
- if ( isroot ) {
- max_sizelimit = -1; /* no limit */
- } else {
- max_sizelimit = be->be_sizelimit;
- }
- }
-
- if ( requested_sizelimit == 0 ) {
- sizelimit = ( max_sizelimit == -1 ) ? -1 : max_sizelimit;
- } else if ( max_sizelimit == -1 || requested_sizelimit < max_sizelimit ) {
- sizelimit = requested_sizelimit;
- } else {
- sizelimit = max_sizelimit;
- }
- slapi_pblock_set(pb, SLAPI_SEARCH_SIZELIMIT, &sizelimit);
-
- LDAPDebug( LDAP_DEBUG_TRACE,
- "=> compute_limits: sizelimit=%d, timelimit=%d\n",
- sizelimit, timelimit, 0 );
+ slapi_pblock_set(pb, SLAPI_SEARCH_TIMELIMIT, &timelimit);
+
+
+ /*
+ * Compute the size limit.
+ */
+ if ( slapi_reslimit_get_integer_limit( pb->pb_conn,
+ sizelimit_reslimit_handle, &max_sizelimit )
+ != SLAPI_RESLIMIT_STATUS_SUCCESS ) {
+ /*
+ * no limit associated with binder/connection or some other error
+ * occurred. use the default maximum.
+ */
+ if ( isroot ) {
+ max_sizelimit = -1; /* no limit */
+ } else {
+ max_sizelimit = be->be_sizelimit;
+ }
+ }
+
+ if ( requested_sizelimit == 0 ) {
+ sizelimit = ( max_sizelimit == -1 ) ? -1 : max_sizelimit;
+ } else if ( max_sizelimit == -1 || requested_sizelimit < max_sizelimit ) {
+ sizelimit = requested_sizelimit;
+ } else {
+ sizelimit = max_sizelimit;
+ }
+ slapi_pblock_set(pb, SLAPI_SEARCH_SIZELIMIT, &sizelimit);
+
+ LDAPDebug( LDAP_DEBUG_TRACE,
+ "=> compute_limits: sizelimit=%d, timelimit=%d\n",
+ sizelimit, timelimit, 0 );
}
+/* Iterates through results and send them to the client.
+ * Returns 0 if successful and -1 otherwise
+ */
+static int
+send_results_ext(Slapi_PBlock *pb, int send_result, int *nentries, int pagesize, unsigned int *pr_stat)
+{
+ Slapi_Backend *be;
+ int rc;
+
+ slapi_pblock_get (pb, SLAPI_BACKEND, &be);
+
+ if (be->be_next_search_entry == NULL)
+ {
+ /* we need to send the result, but the function to iterate through
+ the result set is not implemented */
+ /* ONREPL - log error */
+ send_ldap_result(pb, LDAP_UNWILLING_TO_PERFORM, NULL, "Search not supported", 0, NULL);
+ return -1;
+ }
+
+ /* Iterate through the returned result set */
+ if (be->be_next_search_entry_ext != NULL)
+ {
+ /* The iterate look ahead is causing a whole mess with the ACL.
+ ** the entries are now visiting the ACL land in a random way
+ ** and not the ordered way it was before. Until we figure out
+ ** let's not change the behavior.
+ **
+ ** Don't use iterate_with_lookahead because it sends the result
+ * in the same times as the entry and this can cause failure
+ * of the mapping tree scanning algorithme
+ * if (getFrontendConfig()->result_tweak)
+ * {
+ * rc = iterate_with_lookahead(pb, be, send_result, nentries);
+ * }
+ * else
+ */
+ rc = iterate(pb, be, send_result, nentries, pagesize, pr_stat);
+ }
+ else
+ {
+ rc = iterate(pb, be, send_result, nentries, pagesize, pr_stat);
+ }
+
+ switch(rc)
+ {
+ case -1: /* an error occured */
+
+ case 0 : /* everything is ok - result is sent */
+ /* If this happens we are dead but hopefully iterate
+ * never sends the result itself
+ */
+ break;
+
+ case 1: /* everything is ok - don't send the result */
+ rc = 0;
+ }
+
+ return rc;
+}
/* Iterates through results and send them to the client.
* Returns 0 if successful and -1 otherwise
*/
-static int send_results (Slapi_PBlock *pb, int send_result, int * nentries)
+static int
+send_results(Slapi_PBlock *pb, int send_result, int *nentries)
{
- Slapi_Backend *be;
- int rc;
-
- slapi_pblock_get (pb, SLAPI_BACKEND, &be);
-
- if (be->be_next_search_entry == NULL)
- {
- /* we need to send the result, but the function to iterate through
- the result set is not implemented */
- /* ONREPL - log error */
- send_ldap_result(pb, LDAP_UNWILLING_TO_PERFORM, NULL, "Search not supported", 0, NULL);
- return -1;
- }
-
- /* Iterate through the returned result set */
- if (be->be_next_search_entry_ext != NULL)
- {
- /* The iterate look ahead is causing a whole mess with the ACL.
- ** the entries are now visiting the ACL land in a random way
- ** and not the ordered way it was before. Until we figure out
- ** let's not change the behavior.
- **
- ** Don't use iterate_with_lookahead because it sends the result
- * in the same times as the entry and this can cause failure
- * of the mapping tree scanning algorithme
- * if (getFrontendConfig()->result_tweak)
- * {
- * rc = iterate_with_lookahead(pb, be, send_result, nentries);
- * }
- * else
- */
- rc = iterate(pb, be, send_result, nentries);
- }
- else
- {
- rc = iterate(pb, be, send_result, nentries);
- }
-
- switch(rc)
- {
- case -1: /* an error occured */
-
- case 0 : /* everything is ok - result is sent */
- /* If this happens we are dead but hopefully iterate
- * never sends the result itself
- */
- break;
-
- case 1: /* everything is ok - don't send the result */
- rc = 0;
- }
-
- return rc;
+ return send_results_ext(pb, send_result, nentries, -1, NULL);
}
void op_shared_log_error_access (Slapi_PBlock *pb, const char *type, const char *dn, const char *msg)
{
- char ebuf[BUFSIZ];
- slapi_log_access( LDAP_DEBUG_STATS, "conn=%" NSPRIu64 " op=%d %s dn=\"%s\", %s\n",
- ( pb->pb_conn ? pb->pb_conn->c_connid : 0),
- ( pb->pb_op ? pb->pb_op->o_opid : 0),
- type,
- escape_string( dn, ebuf ),
- msg ? msg : "" );
+ char ebuf[BUFSIZ];
+ slapi_log_access( LDAP_DEBUG_STATS, "conn=%" NSPRIu64 " op=%d %s dn=\"%s\", %s\n",
+ ( pb->pb_conn ? pb->pb_conn->c_connid : 0),
+ ( pb->pb_op ? pb->pb_op->o_opid : 0),
+ type,
+ escape_string( dn, ebuf ),
+ msg ? msg : "" );
}
diff --git a/ldap/servers/slapd/pagedresults.c b/ldap/servers/slapd/pagedresults.c
new file mode 100644
index 000000000..f140933e6
--- /dev/null
+++ b/ldap/servers/slapd/pagedresults.c
@@ -0,0 +1,315 @@
+/** BEGIN COPYRIGHT BLOCK
+ * This Program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License as published by the Free Software
+ * Foundation; version 2 of the License.
+ *
+ * This Program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this Program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * In addition, as a special exception, Red Hat, Inc. gives You the additional
+ * right to link the code of this Program with code not covered under the GNU
+ * General Public License ("Non-GPL Code") and to distribute linked combinations
+ * including the two, subject to the limitations in this paragraph. Non-GPL Code
+ * permitted under this exception must only link to the code of this Program
+ * through those well defined interfaces identified in the file named EXCEPTION
+ * found in the source code files (the "Approved Interfaces"). The files of
+ * Non-GPL Code may instantiate templates or use macros or inline functions from
+ * the Approved Interfaces without causing the resulting work to be covered by
+ * the GNU General Public License. Only Red Hat, Inc. may make changes or
+ * additions to the list of Approved Interfaces. You must obey the GNU General
+ * Public License in all respects for all of the Program code and other code used
+ * in conjunction with the Program except the Non-GPL Code covered by this
+ * exception. If you modify this file, you may extend this exception to your
+ * version of the file, but you are not obligated to do so. If you do not wish to
+ * provide this exception without modification, you must delete this exception
+ * statement from your version and license this file solely under the GPL without
+ * exception.
+ *
+ *
+ * Copyright (C) 2009 Red Hat, Inc.
+ * All rights reserved.
+ * END COPYRIGHT BLOCK **/
+
+#ifdef HAVE_CONFIG_H
+# include <config.h>
+#endif
+
+#include "slap.h"
+
+/*
+ * Parse the value from an LDAPv3 "Simple Paged Results" control. They look
+ * like this:
+ *
+ * realSearchControlValue ::= SEQUENCE {
+ * size INTEGER (0..maxInt),
+ * -- requested page size from client
+ * -- result set size estimate from server
+ * cookie OCTET STRING
+ * }
+ *
+ * Return an LDAP error code (LDAP_SUCCESS if all goes well).
+ */
+int
+pagedresults_parse_control_value( struct berval *psbvp,
+ ber_int_t *pagesize, int *curr_search_count )
+{
+ int rc = LDAP_SUCCESS;
+ struct berval cookie = {0};
+
+ if ( NULL == pagesize || NULL == curr_search_count ) {
+ return LDAP_OPERATIONS_ERROR;
+ }
+
+ if ( psbvp->bv_len == 0 || psbvp->bv_val == NULL )
+ {
+ rc = LDAP_PROTOCOL_ERROR;
+ }
+ else
+ {
+ BerElement *ber = ber_init( psbvp );
+ if ( ber == NULL )
+ {
+ rc = LDAP_OPERATIONS_ERROR;
+ }
+ else
+ {
+ if ( ber_scanf( ber, "{io}", pagesize, &cookie ) == LBER_ERROR )
+ {
+ rc = LDAP_PROTOCOL_ERROR;
+ }
+ /* the ber encoding is no longer needed */
+ ber_free(ber, 1);
+ if ( cookie.bv_len <= 0 ) {
+ *curr_search_count = 0;
+ } else {
+ /* not an error */
+ char *ptr = slapi_ch_malloc(cookie.bv_len + 1);
+ memcpy(ptr, cookie.bv_val, cookie.bv_len);
+ *(ptr+cookie.bv_len) = '\0';
+ *curr_search_count = strtol(ptr, NULL, 10);
+ slapi_ch_free_string(&ptr);
+ }
+ slapi_ch_free((void **)&cookie.bv_val);
+ }
+ }
+
+ return rc;
+}
+
+/*
+ * controlType = LDAP_CONTROL_PAGEDRESULTS;
+ * criticality = n/a;
+ * controlValue:
+ * realSearchControlValue ::= SEQUENCE {
+ * size INTEGER (0..maxInt),
+ * -- requested page size from client
+ * -- result set size estimate from server
+ * cookie OCTET STRING
+ * }
+ */
+void
+pagedresults_set_response_control( Slapi_PBlock *pb, int iscritical,
+ ber_int_t pagesize, int curr_search_count )
+{
+ LDAPControl **resultctrls = NULL;
+ LDAPControl pr_respctrl;
+ BerElement *ber = NULL;
+ struct berval *berval = NULL;
+ char *cookie_str = NULL;
+ int found = 0;
+ int i;
+
+ if ( (ber = der_alloc()) == NULL )
+ {
+ goto bailout;
+ }
+
+ /* begin sequence, payload, end sequence */
+ if (curr_search_count < 0) {
+ cookie_str = slapi_ch_smprintf("");
+ } else {
+ cookie_str = slapi_ch_smprintf("%d", curr_search_count);
+ }
+ ber_printf ( ber, "{io}", pagesize, cookie_str, strlen(cookie_str) );
+ if ( ber_flatten ( ber, &berval ) != LDAP_SUCCESS )
+ {
+ goto bailout;
+ }
+ pr_respctrl.ldctl_oid = LDAP_CONTROL_PAGEDRESULTS;
+ pr_respctrl.ldctl_iscritical = iscritical;
+ pr_respctrl.ldctl_value.bv_val = berval->bv_val;
+ pr_respctrl.ldctl_value.bv_len = berval->bv_len;
+
+ slapi_pblock_get ( pb, SLAPI_RESCONTROLS, &resultctrls );
+ for (i = 0; resultctrls && resultctrls[i]; i++)
+ {
+ if (strcmp(resultctrls[i]->ldctl_oid, LDAP_CONTROL_PAGEDRESULTS) == 0)
+ {
+ /*
+ * We get here if search returns more than one entry
+ * and this is not the first entry.
+ */
+ ldap_control_free ( resultctrls[i] );
+ resultctrls[i] = slapi_dup_control (&pr_respctrl);
+ found = 1;
+ break;
+ }
+ }
+
+ if ( !found )
+ {
+ /* slapi_pblock_set() will dup the control */
+ slapi_pblock_set ( pb, SLAPI_ADD_RESCONTROL, &pr_respctrl );
+ }
+
+bailout:
+ slapi_ch_free_string(&cookie_str);
+ ber_free ( ber, 1 ); /* ber_free() checks for NULL param */
+ ber_bvfree ( berval ); /* ber_bvfree() checks for NULL param */
+}
+
+/* setters and getters for the connection */
+Slapi_Backend *
+pagedresults_get_current_be(Connection *conn)
+{
+ Slapi_Backend *be = NULL;
+ if (conn) {
+ PR_Lock(conn->c_mutex);
+ be = conn->c_current_be;
+ PR_Unlock(conn->c_mutex);
+ }
+ return be;
+}
+
+int
+pagedresults_set_current_be(Connection *conn, Slapi_Backend *be)
+{
+ int rc = -1;
+ if (conn) {
+ PR_Lock(conn->c_mutex);
+ conn->c_current_be = be;
+ PR_Unlock(conn->c_mutex);
+ rc = 0;
+ }
+ return rc;
+}
+
+void *
+pagedresults_get_search_result(Connection *conn)
+{
+ void *sr = NULL;
+ if (conn) {
+ PR_Lock(conn->c_mutex);
+ sr = conn->c_search_result_set;
+ PR_Unlock(conn->c_mutex);
+ }
+ return sr;
+}
+
+int
+pagedresults_set_search_result(Connection *conn, void *sr)
+{
+ int rc = -1;
+ if (conn) {
+ PR_Lock(conn->c_mutex);
+ conn->c_search_result_set = sr;
+ PR_Unlock(conn->c_mutex);
+ rc = 0;
+ }
+ return rc;
+}
+
+int
+pagedresults_get_search_result_count(Connection *conn)
+{
+ int count = 0;
+ if (conn) {
+ PR_Lock(conn->c_mutex);
+ count = conn->c_search_result_count;
+ PR_Unlock(conn->c_mutex);
+ }
+ return count;
+}
+
+int
+pagedresults_set_search_result_count(Connection *conn, int count)
+{
+ int rc = -1;
+ if (conn) {
+ PR_Lock(conn->c_mutex);
+ conn->c_search_result_count = count;
+ PR_Unlock(conn->c_mutex);
+ rc = 0;
+ }
+ return rc;
+}
+
+int
+pagedresults_get_with_sort(Connection *conn)
+{
+ int flags = 0;
+ if (conn) {
+ PR_Lock(conn->c_mutex);
+ flags = conn->c_flags&CONN_FLAG_PAGEDRESULTS_WITH_SORT;
+ PR_Unlock(conn->c_mutex);
+ }
+ return flags;
+}
+
+int
+pagedresults_set_with_sort(Connection *conn, int flags)
+{
+ int rc = -1;
+ if (conn) {
+ PR_Lock(conn->c_mutex);
+ if (flags & OP_FLAG_SERVER_SIDE_SORTING)
+ conn->c_flags |= CONN_FLAG_PAGEDRESULTS_WITH_SORT;
+ PR_Unlock(conn->c_mutex);
+ rc = 0;
+ }
+ return rc;
+}
+
+int
+pagedresults_get_sort_result_code(Connection *conn)
+{
+ int code = 0;
+ if (conn) {
+ PR_Lock(conn->c_mutex);
+ code = conn->c_sort_result_code;
+ PR_Unlock(conn->c_mutex);
+ }
+ return code;
+}
+
+int
+pagedresults_set_sort_result_code(Connection *conn, int code)
+{
+ int rc = -1;
+ if (conn) {
+ PR_Lock(conn->c_mutex);
+ conn->c_sort_result_code = code;
+ PR_Unlock(conn->c_mutex);
+ rc = 0;
+ }
+ return rc;
+}
+
+int
+pagedresults_set_timelimit(Connection *conn, time_t timelimit)
+{
+ int rc = -1;
+ if (conn) {
+ PR_Lock(conn->c_mutex);
+ conn->c_timelimit = timelimit;
+ PR_Unlock(conn->c_mutex);
+ rc = 0;
+ }
+ return rc;
+}
+
diff --git a/ldap/servers/slapd/pblock.c b/ldap/servers/slapd/pblock.c
index 062a87f82..dd8239db7 100644
--- a/ldap/servers/slapd/pblock.c
+++ b/ldap/servers/slapd/pblock.c
@@ -512,6 +512,12 @@ slapi_pblock_get( Slapi_PBlock *pblock, int arg, void *value )
}
(*(IFP *)value) = pblock->pb_plugin->plg_entry_release;
break;
+ case SLAPI_PLUGIN_DB_SEARCH_RESULTS_RELEASE_FN:
+ if ( pblock->pb_plugin->plg_type != SLAPI_PLUGIN_DATABASE ) {
+ return( -1 );
+ }
+ (*(IFP *)value) = pblock->pb_plugin->plg_search_results_release;
+ break;
case SLAPI_PLUGIN_DB_COMPARE_FN:
if ( pblock->pb_plugin->plg_type != SLAPI_PLUGIN_DATABASE ) {
return( -1 );
@@ -1804,6 +1810,12 @@ slapi_pblock_set( Slapi_PBlock *pblock, int arg, void *value )
}
pblock->pb_plugin->plg_entry_release = (IFP) value;
break;
+ case SLAPI_PLUGIN_DB_SEARCH_RESULTS_RELEASE_FN:
+ if ( pblock->pb_plugin->plg_type != SLAPI_PLUGIN_DATABASE ) {
+ return( -1 );
+ }
+ pblock->pb_plugin->plg_search_results_release = (IFP) value;
+ break;
case SLAPI_PLUGIN_DB_COMPARE_FN:
if ( pblock->pb_plugin->plg_type != SLAPI_PLUGIN_DATABASE ) {
return( -1 );
diff --git a/ldap/servers/slapd/proto-slap.h b/ldap/servers/slapd/proto-slap.h
index 2041a9978..1ba7ddaef 100644
--- a/ldap/servers/slapd/proto-slap.h
+++ b/ldap/servers/slapd/proto-slap.h
@@ -1326,4 +1326,23 @@ void signal2sigaction( int s, void *a );
int slapd_do_all_nss_ssl_init(int slapd_exemode, int importexport_encrypt,
int s_port, daemon_ports_t *ports_info);
+/*
+ * pagedresults.c
+ */
+int pagedresults_parse_control_value(struct berval *psbvp, ber_int_t *pagesize, int *curr_search_count);
+void pagedresults_set_response_control(Slapi_PBlock *pb, int iscritical, ber_int_t pagesize, int curr_search_count);
+Slapi_Backend *pagedresults_get_current_be(Connection *conn);
+int pagedresults_set_current_be(Connection *conn, Slapi_Backend *be);
+void *pagedresults_get_search_result(Connection *conn);
+int pagedresults_set_search_result(Connection *conn, void *sr);
+int pagedresults_get_search_result_count(Connection *conn);
+int pagedresults_set_search_result_count(Connection *conn, int cnt);
+int pagedresults_get_with_sort(Connection *conn);
+int pagedresults_set_with_sort(Connection *conn, int flags);
+
+/*
+ * sort.c
+ */
+int sort_make_sort_response_control(Slapi_PBlock *pb, int code, char *error_type);
+
#endif /* _PROTO_SLAP */
diff --git a/ldap/servers/slapd/result.c b/ldap/servers/slapd/result.c
index 749361f1c..cd02f01f6 100644
--- a/ldap/servers/slapd/result.c
+++ b/ldap/servers/slapd/result.c
@@ -67,10 +67,10 @@ static long current_conn_count;
static PRLock *current_conn_count_mutex;
static int flush_ber( Slapi_PBlock *pb, Connection *conn,
- Operation *op, BerElement *ber, int type );
+ Operation *op, BerElement *ber, int type );
static char *notes2str( unsigned int notes, char *buf, size_t buflen );
static void log_result( Slapi_PBlock *pb, Operation *op, int err,
- ber_tag_t tag, int nentries );
+ ber_tag_t tag, int nentries );
static void log_entry( Operation *op, Slapi_Entry *e );
static void log_referral( Operation *op );
@@ -1346,7 +1346,7 @@ send_ldap_search_entry_ext(
if ( conn->c_ldapversion >= LDAP_VERSION3 ) {
if ( ectrls != NULL ) {
- rc = write_controls( ber, ectrls );
+ rc = write_controls( ber, ectrls );
}
/*
* The get-effective-rights control is called within
@@ -1360,7 +1360,20 @@ send_ldap_search_entry_ext(
if (strcmp(ctrlp[i]->ldctl_oid, LDAP_CONTROL_GET_EFFECTIVE_RIGHTS ) == 0 ) {
gerctrl[0] = ctrlp[i];
gerctrl[1] = NULL;
- rc = write_controls( ber, gerctrl );
+ rc = write_controls( ber, gerctrl );
+ break;
+ }
+ }
+ }
+ if ( operation->o_flags & OP_FLAG_PAGED_RESULTS ) {
+ LDAPControl *pagedctrl[2];
+ slapi_pblock_get (pb, SLAPI_RESCONTROLS, &ctrlp);
+ for ( i = 0; ctrlp != NULL && ctrlp[i] != NULL; i++ ) {
+ if (strcmp(ctrlp[i]->ldctl_oid, LDAP_CONTROL_PAGEDRESULTS )
+ == 0 ) {
+ pagedctrl[0] = ctrlp[i];
+ pagedctrl[1] = NULL;
+ rc = write_controls( ber, pagedctrl );
break;
}
}
diff --git a/ldap/servers/slapd/slap.h b/ldap/servers/slapd/slap.h
index 724bef93b..3bcadde80 100644
--- a/ldap/servers/slapd/slap.h
+++ b/ldap/servers/slapd/slap.h
@@ -202,6 +202,11 @@ typedef struct symbol_t {
#define LDAP_CONTROL_GET_EFFECTIVE_RIGHTS "1.3.6.1.4.1.42.2.27.9.5.2"
#endif
+/* PAGED RESULTS control (shared by request and response) */
+#ifndef LDAP_CONTROL_PAGEDRESULTS
+#define LDAP_CONTROL_PAGEDRESULTS "1.2.840.113556.1.4.319"
+#endif
+
#define SLAPD_VENDOR_NAME "Fedora Project"
#define SLAPD_VERSION_STR "Fedora-Directory/" PRODUCTTEXT
#define SLAPD_SHORT_VERSION_STR PRODUCTTEXT
@@ -760,6 +765,7 @@ struct slapdplugin {
IFP plg_un_db_search; /* search */
IFP plg_un_db_next_search_entry; /* iterate */
IFP plg_un_db_next_search_entry_ext;
+ IFP plg_un_db_search_results_release; /* PAGED RESULTS */
IFP plg_un_db_entry_release;
IFP plg_un_db_compare; /* compare */
IFP plg_un_db_modify; /* modify */
@@ -797,6 +803,7 @@ struct slapdplugin {
#define plg_search plg_un.plg_un_db.plg_un_db_search
#define plg_next_search_entry plg_un.plg_un_db.plg_un_db_next_search_entry
#define plg_next_search_entry_ext plg_un.plg_un_db.plg_un_db_next_search_entry_ext
+#define plg_search_results_release plg_un.plg_un_db.plg_un_db_search_results_release
#define plg_entry_release plg_un.plg_un_db.plg_un_db_entry_release
#define plg_compare plg_un.plg_un_db.plg_un_db_compare
#define plg_modify plg_un.plg_un_db.plg_un_db_modify
@@ -1050,6 +1057,7 @@ typedef struct backend {
#define be_next_search_entry be_database->plg_next_search_entry
#define be_next_search_entry_ext be_database->plg_next_search_entry_ext
#define be_entry_release be_database->plg_entry_release
+#define be_search_results_release be_database->plg_search_results_release
#define be_compare be_database->plg_compare
#define be_modify be_database->plg_modify
#define be_modrdn be_database->plg_modrdn
@@ -1268,6 +1276,13 @@ typedef struct conn {
int c_local_valid; /* flag true if the uid/gid are valid */
uid_t c_local_uid; /* uid of connecting process */
gid_t c_local_gid; /* gid of connecting process */
+ /* PAGED_RESULTS */
+ Slapi_Backend *c_current_be; /* backend being used */
+ void *c_search_result_set; /* search result set for paging */
+ int c_search_result_count; /* search result count */
+ int c_sort_result_code; /* sort result put in response */
+ time_t c_timelimit; /* time limit for this connection */
+ /* PAGED_RESULTS ENDS */
} Connection;
#define CONN_FLAG_SSL 1 /* Is this connection an SSL connection or not ?
* Used to direct I/O code when SSL is handled differently
@@ -1291,6 +1306,10 @@ typedef struct conn {
* successfully completed.
*/
+#define CONN_FLAG_PAGEDRESULTS_WITH_SORT 64 /* paged results control is
+ * sent with server side sorting
+ */
+#define CONN_GET_SORT_RESULT_CODE (-1)
#define START_TLS_OID "1.3.6.1.4.1.1466.20037"
diff --git a/ldap/servers/slapd/slapi-private.h b/ldap/servers/slapd/slapi-private.h
index 115dcc2ae..23baf296a 100644
--- a/ldap/servers/slapd/slapi-private.h
+++ b/ldap/servers/slapd/slapi-private.h
@@ -417,6 +417,8 @@ slapi_filter_to_string_internal( const struct slapi_filter *f, char *buf, size_t
* is used to skip VLV op.
* (see #329951)
*/
+#define OP_FLAG_PAGED_RESULTS 0x40000 /* simple paged results */
+#define OP_FLAG_SERVER_SIDE_SORTING 0x80000 /* server side sorting */
CSN *operation_get_csn(Slapi_Operation *op);
@@ -867,6 +869,7 @@ int valuearray_find(const Slapi_Attr *a, Slapi_Value **va, const Slapi_Value *v)
#define SLAPI_PLUGIN_DB_UPGRADEDB_FN 235
#define SLAPI_PLUGIN_DB_DBVERIFY_FN 236
#define SLAPI_PLUGIN_DB_ADD_SCHEMA_FN 237
+#define SLAPI_PLUGIN_DB_SEARCH_RESULTS_RELEASE_FN 238
/* database plugin-specific parameters */
#define SLAPI_PLUGIN_DB_NO_ACL 250
#define SLAPI_PLUGIN_DB_RMDB_FN 280
diff --git a/ldap/servers/slapd/sort.c b/ldap/servers/slapd/sort.c
new file mode 100644
index 000000000..b6814b269
--- /dev/null
+++ b/ldap/servers/slapd/sort.c
@@ -0,0 +1,130 @@
+/** BEGIN COPYRIGHT BLOCK
+ * This Program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License as published by the Free Software
+ * Foundation; version 2 of the License.
+ *
+ * This Program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this Program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * In addition, as a special exception, Red Hat, Inc. gives You the additional
+ * right to link the code of this Program with code not covered under the GNU
+ * General Public License ("Non-GPL Code") and to distribute linked combinations
+ * including the two, subject to the limitations in this paragraph. Non-GPL Code
+ * permitted under this exception must only link to the code of this Program
+ * through those well defined interfaces identified in the file named EXCEPTION
+ * found in the source code files (the "Approved Interfaces"). The files of
+ * Non-GPL Code may instantiate templates or use macros or inline functions from
+ * the Approved Interfaces without causing the resulting work to be covered by
+ * the GNU General Public License. Only Red Hat, Inc. may make changes or
+ * additions to the list of Approved Interfaces. You must obey the GNU General
+ * Public License in all respects for all of the Program code and other code used
+ * in conjunction with the Program except the Non-GPL Code covered by this
+ * exception. If you modify this file, you may extend this exception to your
+ * version of the file, but you are not obligated to do so. If you do not wish to
+ * provide this exception without modification, you must delete this exception
+ * statement from your version and license this file solely under the GPL without
+ * exception.
+ *
+ *
+ * Copyright (C) 2009 Red Hat, Inc.
+ * All rights reserved.
+ * END COPYRIGHT BLOCK **/
+
+#ifdef HAVE_CONFIG_H
+# include <config.h>
+#endif
+
+#include "slap.h"
+
+/* Fix for bug # 394184, SD, 20 Jul 00 */
+/* fix and cleanup (switch(code) {} removed) */
+/* arg 'code' has now the correct sortResult value */
+int
+sort_make_sort_response_control ( Slapi_PBlock *pb, int code, char *error_type)
+{
+
+ LDAPControl new_ctrl = {0};
+ BerElement *ber= NULL;
+ struct berval *bvp = NULL;
+ int rc = -1;
+ ber_int_t control_code;
+
+ if (code == CONN_GET_SORT_RESULT_CODE) {
+ code = pagedresults_get_sort_result_code(pb->pb_conn);
+ } else {
+ Slapi_Operation *operation;
+ slapi_pblock_get (pb, SLAPI_OPERATION, &operation);
+ if (operation->o_flags & OP_FLAG_PAGED_RESULTS) {
+ pagedresults_set_sort_result_code(pb->pb_conn, code);
+ }
+ }
+
+ control_code = code;
+
+ /*
+ SortResult ::= SEQUENCE {
+ sortResult ENUMERATED {
+ success (0), -- results are sorted
+ operationsError (1), -- server internal failure
+ timeLimitExceeded (3), -- timelimit reached before
+ -- sorting was completed
+ strongAuthRequired (8), -- refused to return sorted
+ -- results via insecure
+ -- protocol
+ adminLimitExceeded (11), -- too many matching entries
+ -- for the server to sort
+ noSuchAttribute (16), -- unrecognized attribute
+ -- type in sort key
+ inappropriateMatching (18), -- unrecognized or inappro-
+ -- priate matching rule in
+ -- sort key
+ insufficientAccessRights (50), -- refused to return sorted
+ -- results to this client
+ busy (51), -- too busy to process
+ unwillingToPerform (53), -- unable to sort
+ other (80)
+ },
+ attributeType [0] AttributeType OPTIONAL
+ }
+ */
+
+ if ( ( ber = ber_alloc()) == NULL ) {
+ return -1;
+ }
+
+ if (( rc = ber_printf( ber, "{e", control_code )) != -1 ) {
+ if ( rc != -1 && NULL != error_type ) {
+ rc = ber_printf( ber, "s", error_type );
+ }
+ if ( rc != -1 ) {
+ rc = ber_printf( ber, "}" );
+ }
+ }
+ if ( rc != -1 ) {
+ rc = ber_flatten( ber, &bvp );
+ }
+
+ ber_free( ber, 1 );
+
+ if ( rc == -1 ) {
+ return rc;
+ }
+
+ new_ctrl.ldctl_oid = LDAP_CONTROL_SORTRESPONSE;
+ new_ctrl.ldctl_value = *bvp;
+ new_ctrl.ldctl_iscritical = 1;
+
+ if ( slapi_pblock_set( pb, SLAPI_ADD_RESCONTROL, &new_ctrl ) != 0 ) {
+ ber_bvfree(bvp);
+ return( -1 );
+ }
+
+ ber_bvfree(bvp);
+ return( LDAP_SUCCESS );
+}
+/* End fix for bug #394184 */
| 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.